blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
93cf90e40434c01078c4fc0ac5a1aaf0200efabc
|
b8441dc1987be9e64fa3081d456b2a3060ec44d1
|
/mars/services/task/analyzer/fusion.py
|
d87f85616ad2ba30f8450fee5869cc7734f3d351
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mars-project/mars
|
f99fefbce999d58a9249bc72046787a9731c9c73
|
c36c53fa22e10ef9477d9c454401a2f281375f31
|
refs/heads/master
| 2023-07-23T00:23:55.133015
| 2023-07-03T11:44:54
| 2023-07-03T11:44:54
| 160,543,708
| 2,704
| 362
|
Apache-2.0
| 2023-09-11T07:57:35
| 2018-12-05T16:04:03
|
Python
|
UTF-8
|
Python
| false
| false
| 8,864
|
py
|
fusion.py
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from collections import defaultdict
from typing import Dict, List
from ....config import options
from ....core import ChunkGraph
from ....core.operand import VirtualOperand
from ....typing import BandType, ChunkType, OperandType
class Coloring:
"""
Coloring a chunk graph according to an algorithm
described in https://github.com/mars-project/mars/issues/2435
"""
def __init__(
self,
chunk_graph: ChunkGraph,
all_bands: List[BandType],
chunk_to_bands: Dict[ChunkType, BandType],
initial_same_color_num: int = None,
as_broadcaster_successor_num: int = None,
):
self.chunk_graph = chunk_graph
self.all_bands = all_bands
self.chunk_to_bands = chunk_to_bands
if initial_same_color_num is None:
has_gpu = any(c.op.gpu for c in chunk_graph)
if not has_gpu:
initial_same_color_num = max(options.combine_size // 2, 1)
else:
# if gpu exists, we try to fuse more node to reduce cost
initial_same_color_num = max(options.combine_size * 2, 1)
self.initial_same_color_num = initial_same_color_num
if as_broadcaster_successor_num is None:
as_broadcaster_successor_num = options.combine_size * 2
self.successor_same_color_num = as_broadcaster_successor_num
self._coloring_iter = itertools.count()
def next_color(self) -> int:
return next(self._coloring_iter)
@classmethod
def _can_color_same(cls, chunk: ChunkType, predecessors: List[ChunkType]) -> bool:
if (
# VirtualOperand cannot be fused
any(isinstance(n.op, VirtualOperand) for n in [chunk] + predecessors)
# allocated on different bands
or len({n.op.gpu for n in [chunk] + predecessors}) > 1
# expect worker changed
or len({n.op.expect_worker for n in [chunk] + predecessors}) > 1
# scheduling hint tells that cannot be fused
or (
chunk.op.scheduling_hint is not None
and not chunk.op.scheduling_hint.can_be_fused()
)
):
return False
return True
def _color_init_nodes(self) -> Dict[OperandType, int]:
# for initial op with same band but different priority
# we color them w/ different colors,
# to prevent from wrong fusion.
# e.g. md.read_csv ensure incremental index by generating
# chunks with ascending priorities (smaller one has higher priority),
# chunk 0 has higher priority than chunk 1,
# so that when chunk 1 executing, it would know chunk 0's shape
# TODO: make it general instead handle priority as a special case
band_priority_to_colors = dict()
for chunk, band in self.chunk_to_bands.items():
band_priority = (band, chunk.op.priority)
if band_priority not in band_priority_to_colors:
band_priority_to_colors[band_priority] = self.next_color()
band_priority_to_color_list = defaultdict(list)
for (band, priority), color in band_priority_to_colors.items():
band_priority_to_color_list[band, priority].append(color)
color_to_size = defaultdict(lambda: 0)
op_to_colors = dict()
for chunk, band in self.chunk_to_bands.items():
priority = chunk.op.priority
color = band_priority_to_color_list[band, priority][-1]
size = color_to_size[color]
if size >= self.initial_same_color_num:
color = self.next_color()
band_priority_to_color_list[band, priority].append(color)
color_to_size[color] += 1
op_to_colors[chunk.op] = color
return op_to_colors
def color(self) -> Dict[ChunkType, int]:
chunk_to_colors = dict()
# step 1: Coloring the initial nodes according to the bands that assigned by assigner
op_to_colors = self._color_init_nodes()
# step2: Propagate color in the topological order,
# if the input nodes have same color, color it with the same color;
# otherwise, color with a new color.
broadcaster_chunk_set = set()
for chunk in self.chunk_graph.topological_iter():
if self.chunk_graph.count_successors(chunk) > self.successor_same_color_num:
# is broadcaster
broadcaster_chunk_set.add(chunk)
if chunk.op in op_to_colors:
# colored
chunk_to_colors[chunk] = op_to_colors[chunk.op]
continue
predecessors = self.chunk_graph.predecessors(chunk)
pred_colors = {op_to_colors[pred.op] for pred in predecessors}
if len(predecessors) == 1 and predecessors[0] in broadcaster_chunk_set:
# TODO: handle situation that chunks which specify reassign_workers
# predecessor is broadcaster, just allocate a new color
color = self.next_color()
elif len(pred_colors) == 1:
if self._can_color_same(chunk, predecessors):
# predecessors have only 1 color, will color with same one
color = next(iter(pred_colors))
else:
color = self.next_color()
else:
# has more than 1 color, color a new one
assert len(pred_colors) > 1
color = self.next_color()
op_to_colors[chunk.op] = chunk_to_colors[chunk] = color
# step 3: Propagate with reversed topological order,
# check a node with its inputs, if all inputs have different color with itself, skip;
# otherwise, if some of inputs have the same color, but some others have different color,
# color the input nodes with same one with a new color, and propagate to its inputs and so on.
for chunk in self.chunk_graph.topological_iter(reverse=True):
pred_colors = {
op_to_colors[pred.op]
for pred in self.chunk_graph.iter_successors(chunk)
}
chunk_color = chunk_to_colors[chunk]
if chunk_color in pred_colors and len(pred_colors) > 1:
# conflict
# color the successors with new colors
stack = []
for succ in self.chunk_graph.iter_successors(chunk):
if chunk_to_colors[succ] == chunk_color:
new_color = op_to_colors[succ.op] = self.next_color()
for c in succ.op.outputs:
if c not in self.chunk_graph: # pragma: no cover
continue
chunk_to_colors[c] = new_color
stack.extend(self.chunk_graph.successors(c))
# color the descendants with same color to the new one
# the descendants will not be visited more than 2 times
while len(stack) > 0:
node = stack.pop()
node_color = chunk_to_colors[node]
if node_color == chunk_color:
# same color, recolor to the new one
node_pred_colors = list(
{
op_to_colors[inp.op]
for inp in self.chunk_graph.iter_predecessors(node)
}
)
node_input_same_color = len(node_pred_colors) == 1
if node_input_same_color:
node_new_color = node_pred_colors[0]
else:
node_new_color = self.next_color()
op_to_colors[node.op] = node_new_color
for c in node.op.outputs:
if c not in self.chunk_graph: # pragma: no cover
continue
chunk_to_colors[c] = node_new_color
stack.extend(self.chunk_graph.successors(c))
return chunk_to_colors
|
96c6caa8ea0008494172125e3c90727d271c45a1
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/oauthlib/oauthlib/oauth2/rfc6749/clients/legacy_application.pyi
|
b7f692737e817f8541a7806409332e030ee26a06
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 359
|
pyi
|
legacy_application.pyi
|
from typing import Any
from .base import Client as Client
class LegacyApplicationClient(Client):
grant_type: str
def __init__(self, client_id, **kwargs) -> None: ...
def prepare_request_body( # type: ignore[override]
self, username, password, body: str = ..., scope: Any | None = ..., include_client_id: bool = ..., **kwargs
): ...
|
30b2a25e7db2673403161c2ad9e5e2f134f51a4d
|
a0447b03ad89a41a5c2e2073e32aeaf4d6279340
|
/ironic/tests/unit/drivers/modules/storage/test_cinder.py
|
6d753a8a7da524faeea5cc5a7259d1998e035a07
|
[
"Apache-2.0"
] |
permissive
|
openstack/ironic
|
2ae87e36d7a62d44b7ed62cad4e2e294d48e061b
|
ab76ff12e1c3c2208455e917f1a40d4000b4e990
|
refs/heads/master
| 2023-08-31T11:08:34.486456
| 2023-08-31T04:45:05
| 2023-08-31T04:45:05
| 10,066,301
| 411
| 365
|
Apache-2.0
| 2023-07-25T02:05:53
| 2013-05-14T22:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 27,869
|
py
|
test_cinder.py
|
# Copyright 2016 Hewlett Packard Enterprise Development Company LP.
# Copyright 2016 IBM Corp
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_utils import uuidutils
from ironic.common import cinder as cinder_common
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules.storage import cinder
from ironic.drivers import utils as driver_utils
from ironic import objects
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as object_utils
class CinderInterfaceTestCase(db_base.DbTestCase):
def setUp(self):
super(CinderInterfaceTestCase, self).setUp()
self.config(action_retries=3,
action_retry_interval=0,
group='cinder')
self.config(enabled_boot_interfaces=['fake', 'pxe'],
enabled_storage_interfaces=['noop', 'cinder'])
self.interface = cinder.CinderStorage()
self.node = object_utils.create_test_node(self.context,
boot_interface='fake',
storage_interface='cinder')
@mock.patch.object(cinder, 'LOG', autospec=True)
def test__fail_validation(self, mock_log):
"""Ensure the validate helper logs and raises exceptions."""
fake_error = 'an error!'
expected = ("Failed to validate cinder storage interface for node "
"%s. an error!" % self.node.uuid)
with task_manager.acquire(self.context, self.node.id) as task:
self.assertRaises(exception.InvalidParameterValue,
self.interface._fail_validation,
task,
fake_error)
mock_log.error.assert_called_with(expected)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test__generate_connector_raises_with_insufficient_data(self, mock_log):
with task_manager.acquire(self.context, self.node.id) as task:
self.assertRaises(exception.StorageError,
self.interface._generate_connector,
task)
self.assertTrue(mock_log.error.called)
def test__generate_connector_iscsi(self):
expected = {
'initiator': 'iqn.address',
'ip': 'ip.address',
'host': self.node.uuid,
'multipath': True}
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='iqn.address')
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='ip',
connector_id='ip.address', uuid=uuidutils.generate_uuid())
with task_manager.acquire(self.context, self.node.id) as task:
return_value = self.interface._generate_connector(task)
self.assertDictEqual(expected, return_value)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test__generate_connector_iscsi_and_unknown(self, mock_log):
"""Validate we return and log with valid and invalid connectors."""
expected = {
'initiator': 'iqn.address',
'host': self.node.uuid,
'multipath': True}
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='iqn.address')
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='foo',
connector_id='bar', uuid=uuidutils.generate_uuid())
with task_manager.acquire(self.context, self.node.id) as task:
return_value = self.interface._generate_connector(task)
self.assertDictEqual(expected, return_value)
self.assertEqual(1, mock_log.warning.call_count)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test__generate_connector_unknown_raises_excption(self, mock_log):
"""Validate an exception is raised with only an invalid connector."""
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='foo',
connector_id='bar')
with task_manager.acquire(self.context, self.node.id) as task:
self.assertRaises(
exception.StorageError,
self.interface._generate_connector,
task)
self.assertEqual(1, mock_log.warning.call_count)
self.assertEqual(1, mock_log.error.call_count)
def test__generate_connector_single_path(self):
"""Validate an exception is raised with only an invalid connector."""
expected = {
'initiator': 'iqn.address',
'host': self.node.uuid}
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='iqn.address')
with task_manager.acquire(self.context, self.node.id) as task:
return_value = self.interface._generate_connector(task)
self.assertDictEqual(expected, return_value)
def test__generate_connector_multiple_fc_wwns(self):
"""Validate handling of WWPNs and WWNNs."""
expected = {
'wwpns': ['wwpn1', 'wwpn2'],
'wwnns': ['wwnn3', 'wwnn4'],
'host': self.node.uuid,
'multipath': True}
object_utils.create_test_volume_connector(
self.context,
node_id=self.node.id,
type='wwpn',
connector_id='wwpn1',
uuid=uuidutils.generate_uuid())
object_utils.create_test_volume_connector(
self.context,
node_id=self.node.id,
type='wwpn',
connector_id='wwpn2',
uuid=uuidutils.generate_uuid())
object_utils.create_test_volume_connector(
self.context,
node_id=self.node.id,
type='wwnn',
connector_id='wwnn3',
uuid=uuidutils.generate_uuid())
object_utils.create_test_volume_connector(
self.context,
node_id=self.node.id,
type='wwnn',
connector_id='wwnn4',
uuid=uuidutils.generate_uuid())
with task_manager.acquire(self.context, self.node.id) as task:
return_value = self.interface._generate_connector(task)
self.assertDictEqual(expected, return_value)
@mock.patch.object(cinder.CinderStorage, '_fail_validation', autospec=True)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_validate_success_no_settings(self, mock_log, mock_fail):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.validate(task)
self.assertFalse(mock_fail.called)
self.assertFalse(mock_log.called)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_validate_failure_if_iscsi_boot_no_connectors(self, mock_log):
valid_types = ', '.join(cinder.VALID_ISCSI_TYPES)
expected_msg = ("Failed to validate cinder storage interface for node "
"%(id)s. In order to enable the 'iscsi_boot' "
"capability for the node, an associated "
"volume_connector type must be valid for "
"iSCSI (%(options)s)." %
{'id': self.node.uuid, 'options': valid_types})
with task_manager.acquire(self.context, self.node.id) as task:
driver_utils.add_node_capability(task, 'iscsi_boot', 'True')
self.assertRaises(exception.InvalidParameterValue,
self.interface.validate,
task)
mock_log.error.assert_called_once_with(expected_msg)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_validate_failure_if_fc_boot_no_connectors(self, mock_log):
valid_types = ', '.join(cinder.VALID_FC_TYPES)
expected_msg = ("Failed to validate cinder storage interface for node "
"%(id)s. In order to enable the 'fibre_channel_boot' "
"capability for the node, an associated "
"volume_connector type must be valid for "
"Fibre Channel (%(options)s)." %
{'id': self.node.uuid, 'options': valid_types})
with task_manager.acquire(self.context, self.node.id) as task:
driver_utils.add_node_capability(task,
'fibre_channel_boot',
'True')
self.assertRaises(exception.InvalidParameterValue,
self.interface.validate,
task)
mock_log.error.assert_called_once_with(expected_msg)
@mock.patch.object(cinder.CinderStorage, '_fail_validation', autospec=True)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_validate_success_iscsi_connector(self, mock_log, mock_fail):
"""Perform validate with only an iSCSI connector in place."""
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='iqn.address')
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.validate(task)
self.assertFalse(mock_log.called)
self.assertFalse(mock_fail.called)
@mock.patch.object(cinder.CinderStorage, '_fail_validation', autospec=True)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_validate_success_fc_connectors(self, mock_log, mock_fail):
"""Perform validate with only FC connectors in place"""
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='wwpn',
connector_id='wwpn.address', uuid=uuidutils.generate_uuid())
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='wwnn',
connector_id='wwnn.address', uuid=uuidutils.generate_uuid())
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.validate(task)
self.assertFalse(mock_log.called)
self.assertFalse(mock_fail.called)
@mock.patch.object(cinder.CinderStorage, '_fail_validation', autospec=True)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_validate_success_connectors_and_boot(self, mock_log, mock_fail):
"""Perform validate with volume connectors and boot capabilities."""
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='iqn.address', uuid=uuidutils.generate_uuid())
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='wwpn',
connector_id='wwpn.address', uuid=uuidutils.generate_uuid())
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='wwnn',
connector_id='wwnn.address', uuid=uuidutils.generate_uuid())
with task_manager.acquire(self.context, self.node.id) as task:
driver_utils.add_node_capability(task,
'fibre_channel_boot',
'True')
driver_utils.add_node_capability(task, 'iscsi_boot', 'True')
self.interface.validate(task)
self.assertFalse(mock_log.called)
self.assertFalse(mock_fail.called)
@mock.patch.object(cinder.CinderStorage, '_fail_validation', autospec=True)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_validate_success_iscsi_targets(self, mock_log, mock_fail):
"""Validate success with full iscsi scenario."""
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='iqn.address', uuid=uuidutils.generate_uuid())
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234')
with task_manager.acquire(self.context, self.node.id) as task:
driver_utils.add_node_capability(task, 'iscsi_boot', 'True')
self.interface.validate(task)
self.assertFalse(mock_log.called)
self.assertFalse(mock_fail.called)
@mock.patch.object(cinder.CinderStorage, '_fail_validation', autospec=True)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_validate_success_fc_targets(self, mock_log, mock_fail):
"""Validate success with full fc scenario."""
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='wwpn',
connector_id='fc.address', uuid=uuidutils.generate_uuid())
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='wwnn',
connector_id='fc.address', uuid=uuidutils.generate_uuid())
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='fibre_channel',
boot_index=0, volume_id='1234')
with task_manager.acquire(self.context, self.node.id) as task:
driver_utils.add_node_capability(task,
'fibre_channel_boot',
'True')
self.interface.validate(task)
self.assertFalse(mock_log.called)
self.assertFalse(mock_fail.called)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_validate_fails_with_ipxe_not_enabled(self, mock_log):
"""Ensure a validation failure is raised when iPXE not enabled."""
self.node.boot_interface = 'pxe'
self.node.save()
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='foo.address')
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='2345')
with task_manager.acquire(self.context, self.node.id) as task:
driver_utils.add_node_capability(task, 'iscsi_boot', 'True')
self.assertRaises(exception.InvalidParameterValue,
self.interface.validate,
task)
self.assertTrue(mock_log.error.called)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_validate_fails_when_fc_connectors_unequal(self, mock_log):
"""Validate should fail with only wwnn FC connector in place"""
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='wwnn',
connector_id='wwnn.address')
with task_manager.acquire(self.context, self.node.id) as task:
self.assertRaises(exception.StorageError,
self.interface.validate,
task)
self.assertTrue(mock_log.error.called)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_validate_fail_on_unknown_volume_types(self, mock_log):
"""Ensure exception is raised when connector/target do not match."""
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='foo.address')
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='wetcat',
boot_index=0, volume_id='1234')
with task_manager.acquire(self.context, self.node.id) as task:
driver_utils.add_node_capability(task, 'iscsi_boot', 'True')
self.assertRaises(exception.InvalidParameterValue,
self.interface.validate,
task)
self.assertTrue(mock_log.error.called)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_validate_fails_iscsi_conn_fc_target(self, mock_log):
"""Validate failure of iSCSI connectors with FC target."""
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='foo.address')
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='fibre_channel',
boot_index=0, volume_id='1234')
with task_manager.acquire(self.context, self.node.id) as task:
driver_utils.add_node_capability(task, 'iscsi_boot', 'True')
self.assertRaises(exception.InvalidParameterValue,
self.interface.validate,
task)
self.assertTrue(mock_log.error.called)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_validate_fails_fc_conn_iscsi_target(self, mock_log):
"""Validate failure of FC connectors with iSCSI target."""
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='fibre_channel',
connector_id='foo.address')
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234')
with task_manager.acquire(self.context, self.node.id) as task:
driver_utils.add_node_capability(task,
'fibre_channel_boot',
'True')
self.assertRaises(exception.InvalidParameterValue,
self.interface.validate,
task)
self.assertTrue(mock_log.error.called)
@mock.patch.object(cinder_common, 'detach_volumes', autospec=True)
@mock.patch.object(cinder_common, 'attach_volumes', autospec=True)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_attach_detach_volumes_no_volumes(self, mock_log,
mock_attach, mock_detach):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.attach_volumes(task)
self.interface.detach_volumes(task)
self.assertFalse(mock_attach.called)
self.assertFalse(mock_detach.called)
self.assertFalse(mock_log.called)
@mock.patch.object(cinder_common, 'detach_volumes', autospec=True)
@mock.patch.object(cinder_common, 'attach_volumes', autospec=True)
def test_attach_detach_volumes_fails_without_connectors(self,
mock_attach,
mock_detach):
"""Without connectors, attach and detach should fail."""
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234')
with task_manager.acquire(self.context, self.node.id) as task:
self.assertRaises(exception.StorageError,
self.interface.attach_volumes, task)
self.assertFalse(mock_attach.called)
self.assertRaises(exception.StorageError,
self.interface.detach_volumes, task)
self.assertFalse(mock_detach.called)
@mock.patch.object(cinder_common, 'detach_volumes', autospec=True)
@mock.patch.object(cinder_common, 'attach_volumes', autospec=True)
@mock.patch.object(cinder, 'LOG', autospec=True)
@mock.patch.object(objects.volume_target.VolumeTarget, 'list_by_volume_id',
autospec=True)
def test_attach_detach_called_with_target_and_connector(self,
mock_target_list,
mock_log,
mock_attach,
mock_detach):
target_uuid = uuidutils.generate_uuid()
test_volume_target = object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=target_uuid)
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='iqn.address')
expected_target_properties = {
'volume_id': '1234',
'ironic_volume_uuid': target_uuid,
'new_property': 'foo'}
mock_attach.return_value = [{
'driver_volume_type': 'iscsi',
'data': expected_target_properties}]
mock_target_list.return_value = [test_volume_target]
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.attach_volumes(task)
self.assertFalse(mock_log.called)
self.assertTrue(mock_attach.called)
task.volume_targets[0].refresh()
self.assertEqual(expected_target_properties,
task.volume_targets[0]['properties'])
self.interface.detach_volumes(task)
self.assertFalse(mock_log.called)
self.assertTrue(mock_detach.called)
@mock.patch.object(cinder_common, 'detach_volumes', autospec=True)
@mock.patch.object(cinder_common, 'attach_volumes', autospec=True)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_attach_volumes_failure(self, mock_log, mock_attach, mock_detach):
"""Verify detach is called upon attachment failing."""
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234')
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=1, volume_id='5678', uuid=uuidutils.generate_uuid())
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='iqn.address')
mock_attach.side_effect = exception.StorageError('foo')
with task_manager.acquire(self.context, self.node.id) as task:
self.assertRaises(exception.StorageError,
self.interface.attach_volumes, task)
self.assertTrue(mock_attach.called)
self.assertTrue(mock_detach.called)
# Replacing the mock to not return an error, should still raise an
# exception.
mock_attach.reset_mock()
mock_detach.reset_mock()
@mock.patch.object(cinder_common, 'detach_volumes', autospec=True)
@mock.patch.object(cinder_common, 'attach_volumes', autospec=True)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_attach_volumes_failure_no_attach_error(self, mock_log,
mock_attach, mock_detach):
"""Verify that detach is called on volume/connector mismatch.
Volume attachment fails if the number of attachments completed
does not match the number of configured targets.
"""
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234')
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=1, volume_id='5678', uuid=uuidutils.generate_uuid())
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='iqn.address')
mock_attach.return_value = {'mock_return'}
with task_manager.acquire(self.context, self.node.id) as task:
self.assertRaises(exception.StorageError,
self.interface.attach_volumes, task)
self.assertTrue(mock_attach.called)
self.assertTrue(mock_detach.called)
@mock.patch.object(cinder_common, 'detach_volumes', autospec=True)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_detach_volumes_failure(self, mock_log, mock_detach):
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234')
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='iqn.address')
with task_manager.acquire(self.context, self.node.id) as task:
# The first attempt should succeed.
# The second attempt should throw StorageError
# Third attempt, should log errors but not raise an exception.
mock_detach.side_effect = [None,
exception.StorageError('bar'),
None]
# This should generate 1 mock_detach call and succeed
self.interface.detach_volumes(task)
task.node.provision_state = states.DELETED
# This should generate the other 2 moc_detach calls and warn
self.interface.detach_volumes(task)
self.assertEqual(3, mock_detach.call_count)
self.assertEqual(1, mock_log.warning.call_count)
@mock.patch.object(cinder_common, 'detach_volumes', autospec=True)
@mock.patch.object(cinder, 'LOG', autospec=True)
def test_detach_volumes_failure_raises_exception(self,
mock_log,
mock_detach):
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234')
object_utils.create_test_volume_connector(
self.context, node_id=self.node.id, type='iqn',
connector_id='iqn.address')
with task_manager.acquire(self.context, self.node.id) as task:
mock_detach.side_effect = exception.StorageError('bar')
self.assertRaises(exception.StorageError,
self.interface.detach_volumes,
task)
# Check that we warn every retry except the last one.
self.assertEqual(3, mock_log.warning.call_count)
self.assertEqual(1, mock_log.error.call_count)
# CONF.cinder.action_retries + 1, number of retries is set to 3.
self.assertEqual(4, mock_detach.call_count)
def test_should_write_image(self):
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234')
with task_manager.acquire(self.context, self.node.id) as task:
self.assertFalse(self.interface.should_write_image(task))
self.node.instance_info = {'image_source': 'fake-value'}
self.node.save()
with task_manager.acquire(self.context, self.node.id) as task:
self.assertTrue(self.interface.should_write_image(task))
|
5e22661def30bc9e35cfea47c056e68e8411fcb5
|
8c0a92d54ea8b8c07648d454529cba588081ce12
|
/state_representation/models.py
|
d188587f67f71398114b92c664cd38833f9c7055
|
[
"MIT"
] |
permissive
|
araffin/robotics-rl-srl
|
79c1e7f34b5a28367fabbe80e7cfe81e7693cd4c
|
eae7c1ab310c79662f6e68c0d255e08641037ffa
|
refs/heads/master
| 2023-08-25T17:09:16.050197
| 2021-04-05T18:43:17
| 2021-04-05T18:43:17
| 118,007,580
| 590
| 102
|
MIT
| 2019-08-09T09:30:36
| 2018-01-18T16:23:17
|
Python
|
UTF-8
|
Python
| false
| false
| 8,472
|
py
|
models.py
|
import json
import pickle as pkl
from collections import OrderedDict
import numpy as np
import torch as th
import srl_zoo.preprocessing as preprocessing
from srl_zoo.models import CustomCNN, ConvolutionalNetwork, SRLModules, SRLModulesSplit
from srl_zoo.preprocessing import preprocessImage, getNChannels
from srl_zoo.utils import printGreen, printYellow
NOISE_STD = 1e-6 # To avoid NaN for SRL
def getSRLDim(path=None, env_object=None):
"""
Get the dim of SRL model
:param path: (str) Path to a srl model
:param env_object: (gym env object)
:return: (int)
"""
if path is not None:
# Get path to the log folder
log_folder = '/'.join(path.split('/')[:-1]) + '/'
with open(log_folder + 'exp_config.json', 'r') as f:
exp_config = json.load(f, object_pairs_hook=OrderedDict)
try:
return exp_config['state-dim']
except KeyError:
# Old format
return exp_config['state_dim']
else:
return env_object.getGroundTruthDim()
def loadSRLModel(path=None, cuda=False, state_dim=None, env_object=None):
"""
Load a trained SRL model, it will try to guess the model type from the path
:param path: (str) Path to a srl model
:param cuda: (bool)
:param state_dim: (int)
:param env_object: (gym env object)
:return: (srl model)
"""
model_type, losses, n_actions, model = None, None, None, None
if path is not None:
# Get path to the log folder
log_folder = '/'.join(path.split('/')[:-1]) + '/'
with open(log_folder + 'exp_config.json', 'r') as f:
# IMPORTANT: keep the order for the losses
# so the json is loaded as an OrderedDict
exp_config = json.load(f, object_pairs_hook=OrderedDict)
state_dim = exp_config.get('state-dim', None)
losses = exp_config.get('losses', None) # None in the case of baseline models (pca, supervised)
n_actions = exp_config.get('n_actions', None) # None in the case of baseline models (pca, supervised)
model_type = exp_config.get('model-type', None)
use_multi_view = exp_config.get('multi-view', False)
inverse_model_type = exp_config.get('inverse-model-type', 'linear')
assert state_dim is not None, \
"Please make sure you are loading an up to date model with a conform exp_config file."
split_dimensions = exp_config.get('split-dimensions')
if isinstance(split_dimensions, OrderedDict):
n_dims = sum(split_dimensions.values())
# Combine losses instead of splitting
if n_dims == 0:
split_dimensions = None
else:
assert env_object is not None or state_dim > 0, \
"When learning states, state_dim must be > 0. Otherwise, set SRL_MODEL_PATH \
to a srl_model.pth file with learned states."
if path is not None:
if 'baselines' in path:
if 'pca' in path:
model_type = 'pca'
model = SRLPCA(state_dim)
assert model_type is not None or model is not None, \
"Model type not supported. In order to use loadSRLModel, a path to an SRL model must be given."
assert not (losses is None and not model_type == 'pca'), \
"Please make sure you are loading an up to date model with a conform exp_config file."
assert not (n_actions is None and not (model_type == 'pca' or 'supervised' in losses)), \
"Please make sure you are loading an up to date model with a conform exp_config file."
if model is None:
if use_multi_view:
preprocessing.preprocess.N_CHANNELS = 6
model = SRLNeuralNetwork(state_dim, cuda, model_type, n_actions=n_actions, losses=losses,
split_dimensions=split_dimensions, inverse_model_type=inverse_model_type)
model_name = model_type
if 'baselines' not in path:
model_name += " with " + ", ".join(losses)
printGreen("\nSRL: Using {} \n".format(model_name))
if path is not None:
printYellow("Loading trained model...{}".format(path))
model.load(path)
return model
class SRLBaseClass(object):
"""Base class for state representation learning models"""
def __init__(self, state_dim, cuda=False):
"""
:param state_dim: (int)
:param cuda: (bool)
"""
super(SRLBaseClass, self).__init__()
self.state_dim = state_dim
self.cuda = cuda
self.model = None
def load(self, path):
"""
Load a trained SRL model
:param path: (str)
"""
raise NotImplementedError("load() not implemented")
def getState(self, observation, env_id=0):
"""
Predict the state for a given observation
:param observation: (numpy Number) the input observation
:param env_id: (int) the environment ID for multi env systems (default=0)
:return: (numpy Number)
"""
raise NotImplementedError("getState() not implemented")
class SRLNeuralNetwork(SRLBaseClass):
"""SRL using a neural network as a state representation model"""
def __init__(self, state_dim, cuda, model_type="custom_cnn", n_actions=None, losses=None, split_dimensions=None,
inverse_model_type="linear"):
"""
:param state_dim: (int)
:param cuda: (bool)
:param model_type: (string)
:param n_actions: action space dimensions (int)
:param losses: list of optimized losses defining the model (list of string)
:param split_dimensions: (OrderedDict) Number of dimensions for the different losses
:param inverse_model_type: (string)
"""
super(SRLNeuralNetwork, self).__init__(state_dim, cuda)
self.model_type = model_type
if "supervised" in losses:
if "cnn" in model_type:
self.model = CustomCNN(state_dim)
elif model_type == "resnet":
self.model = ConvolutionalNetwork(state_dim)
elif isinstance(split_dimensions, OrderedDict):
self.model = SRLModulesSplit(state_dim=state_dim, action_dim=n_actions, model_type=model_type,
cuda=self.cuda, losses=losses, split_dimensions=split_dimensions,
inverse_model_type=inverse_model_type)
else:
self.model = SRLModules(state_dim=state_dim, action_dim=n_actions, model_type=model_type,
cuda=self.cuda, losses=losses, inverse_model_type=inverse_model_type)
self.model.eval()
self.device = th.device("cuda" if th.cuda.is_available() and cuda else "cpu")
self.model = self.model.to(self.device)
def load(self, path):
self.model.load_state_dict(th.load(path))
def getState(self, observation, env_id=0):
if getNChannels() > 3:
observation = np.dstack((preprocessImage(observation[:, :, :3], convert_to_rgb=False),
preprocessImage(observation[:, :, 3:], convert_to_rgb=False)))
else:
observation = preprocessImage(observation, convert_to_rgb=False)
# Create 4D Tensor
observation = observation.reshape(1, *observation.shape)
# Channel first
observation = np.transpose(observation, (0, 3, 2, 1))
observation = th.from_numpy(observation).float().to(self.device)
with th.no_grad():
state = self.model.getStates(observation)[0]
return state.to(th.device("cpu")).detach().numpy()
class SRLPCA(SRLBaseClass):
"""PCA as a state representation"""
def __init__(self, state_dim):
super(SRLPCA, self).__init__(state_dim)
def load(self, path):
try:
with open(path, "rb") as f:
self.model = pkl.load(f)
except UnicodeDecodeError:
# Load pickle files saved with python 2
with open(path, "rb") as f:
self.model = pkl.load(f, encoding='latin1')
def getState(self, observation, env_id=0):
observation = observation[None] # Add a dimension
# n_features = width * height * n_channels
n_features = np.prod(observation.shape[1:])
# Convert to a 1D array
observation = observation.reshape(-1, n_features)
return self.model.transform(observation)[0]
|
dc1121bacbbc5c3bd9172e0d0ebef3cc7a3f2889
|
982a904a83e2caa7acd8b2ac19cfc5a4fb75bde1
|
/examples/ch13_TwitterV2/snippets_ipynb/tweetlistener.py
|
b01654d9bd316b830da46d4cfa55ea43bb3e1a26
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
pdeitel/IntroToPython
|
73bc349fe40701b51f49d17d7fbc5b9985885e48
|
978093febf2ed849a2049e0b0860d2c4998306f7
|
refs/heads/master
| 2023-02-09T08:04:15.313698
| 2023-02-03T23:23:42
| 2023-02-03T23:23:42
| 173,331,130
| 249
| 371
| null | 2022-12-04T06:52:26
| 2019-03-01T16:08:37
| null |
UTF-8
|
Python
| false
| false
| 2,857
|
py
|
tweetlistener.py
|
# tweetlistener.py
"""StreamListener subclass that processes tweets as they arrive."""
from deep_translator import GoogleTranslator
import tweepy
class TweetListener(tweepy.StreamingClient):
"""Handles incoming Tweet stream."""
def __init__(self, bearer_token, limit=10):
"""Create instance variables for tracking number of tweets."""
self.tweet_count = 0
self.TWEET_LIMIT = limit
# GoogleTranslator object for translating tweets to English
self.translator = GoogleTranslator(source='auto', target='en')
super().__init__(bearer_token, wait_on_rate_limit=True)
def on_connect(self):
"""Called when your connection attempt is successful, enabling
you to perform appropriate application tasks at that point."""
print('Connection successful\n')
def on_response(self, response):
"""Called when Twitter pushes a new tweet to you."""
try:
# get username of user who sent the tweet
username = response.includes['users'][0].username
print(f'Screen name: {username}')
print(f' Language: {response.data.lang}')
print(f' Tweet text: {response.data.text}')
if response.data.lang != 'en' and response.data.lang != 'und':
english = self.translator.translate(response.data.text)
print(f' Translated: {english}')
print()
self.tweet_count += 1
except Exception as e:
print(f'Exception occured: {e}')
self.disconnect()
# if TWEET_LIMIT is reached, terminate streaming
if self.tweet_count == self.TWEET_LIMIT:
self.disconnect()
##########################################################################
# (C) Copyright 2022 by Deitel & Associates, Inc. and #
# Pearson Education, Inc. All Rights Reserved. #
# #
# DISCLAIMER: The authors and publisher of this book have used their #
# best efforts in preparing the book. These efforts include the #
# development, research, and testing of the theories and programs #
# to determine their effectiveness. The authors and publisher make #
# no warranty of any kind, expressed or implied, with regard to these #
# programs or to the documentation contained in these books. The authors #
# and publisher shall not be liable in any event for incidental or #
# consequential damages in connection with, or arising out of, the #
# furnishing, performance, or use of these programs. #
##########################################################################
|
239860f31588adbfe2a0522241de74e1ff070bcc
|
0820f480ec00bbe3e7e75c38ee2c13adf1ace6ed
|
/zmq/backend/cffi/_poll.py
|
34734b63e49b392dbf6b0492eac425acdb115dc8
|
[
"BSD-3-Clause",
"LGPL-3.0-only",
"LicenseRef-scancode-zeromq-exception-lgpl-3.0"
] |
permissive
|
zeromq/pyzmq
|
9f2258d3bf1211cec9b12b4b0272e9ccd85b2ac5
|
9bee18aa4112bb6351c226c2000c7a858db386ab
|
refs/heads/main
| 2023-08-31T08:20:21.445949
| 2023-08-10T09:28:41
| 2023-08-10T09:28:41
| 788,264
| 2,944
| 656
|
BSD-3-Clause
| 2023-09-05T06:32:54
| 2010-07-21T07:20:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,886
|
py
|
_poll.py
|
"""zmq poll function"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
try:
from time import monotonic
except ImportError:
from time import clock as monotonic
import warnings
from zmq.error import InterruptedSystemCall, _check_rc
from ._cffi import ffi
from ._cffi import lib as C
def _make_zmq_pollitem(socket, flags):
zmq_socket = socket._zmq_socket
zmq_pollitem = ffi.new('zmq_pollitem_t*')
zmq_pollitem.socket = zmq_socket
zmq_pollitem.fd = 0
zmq_pollitem.events = flags
zmq_pollitem.revents = 0
return zmq_pollitem[0]
def _make_zmq_pollitem_fromfd(socket_fd, flags):
zmq_pollitem = ffi.new('zmq_pollitem_t*')
zmq_pollitem.socket = ffi.NULL
zmq_pollitem.fd = socket_fd
zmq_pollitem.events = flags
zmq_pollitem.revents = 0
return zmq_pollitem[0]
def zmq_poll(sockets, timeout):
cffi_pollitem_list = []
low_level_to_socket_obj = {}
from zmq import Socket
for item in sockets:
if isinstance(item[0], Socket):
low_level_to_socket_obj[item[0]._zmq_socket] = item
cffi_pollitem_list.append(_make_zmq_pollitem(item[0], item[1]))
else:
if not isinstance(item[0], int):
# not an FD, get it from fileno()
item = (item[0].fileno(), item[1])
low_level_to_socket_obj[item[0]] = item
cffi_pollitem_list.append(_make_zmq_pollitem_fromfd(item[0], item[1]))
items = ffi.new('zmq_pollitem_t[]', cffi_pollitem_list)
list_length = ffi.cast('int', len(cffi_pollitem_list))
while True:
c_timeout = ffi.cast('long', timeout)
start = monotonic()
rc = C.zmq_poll(items, list_length, c_timeout)
try:
_check_rc(rc)
except InterruptedSystemCall:
if timeout > 0:
ms_passed = int(1000 * (monotonic() - start))
if ms_passed < 0:
# don't allow negative ms_passed,
# which can happen on old Python versions without time.monotonic.
warnings.warn(
"Negative elapsed time for interrupted poll: %s."
" Did the clock change?" % ms_passed,
RuntimeWarning,
)
ms_passed = 0
timeout = max(0, timeout - ms_passed)
continue
else:
break
result = []
for item in items:
if item.revents > 0:
if item.socket != ffi.NULL:
result.append(
(
low_level_to_socket_obj[item.socket][0],
item.revents,
)
)
else:
result.append((item.fd, item.revents))
return result
__all__ = ['zmq_poll']
|
6b8e123300a3c9a0abfd27ef1021b34a23628193
|
568a2667a1b6ec33a0dec9ac01844ef74e11ab2b
|
/landlab/components/depth_dependent_diffusion/__init__.py
|
b5daec846193534fda6bc1bb71320ad66722d5e7
|
[
"MIT"
] |
permissive
|
landlab/landlab
|
0bcc9b7b1d8c4d7f79bad687e1526b80ebc83728
|
1cd72e5832ece1aa922cd1b239e2e94ed0f11f8b
|
refs/heads/master
| 2023-08-31T07:24:21.545523
| 2023-08-29T18:51:06
| 2023-08-29T18:51:06
| 19,599,383
| 326
| 313
|
MIT
| 2023-09-14T19:12:23
| 2014-05-09T04:52:50
|
Python
|
UTF-8
|
Python
| false
| false
| 112
|
py
|
__init__.py
|
from .hillslope_depth_dependent_linear_flux import DepthDependentDiffuser
__all__ = ["DepthDependentDiffuser"]
|
5c12add8e92dc11eaf4165441f4cb7441a95b4a4
|
dd91ea0a9b143371cfb186eaa74333da9488510d
|
/python/interpret-core/interpret/visual/_interactive.py
|
6a35e55e2d95a544c3ff33f7c7bed6ba1cae2d94
|
[
"MIT"
] |
permissive
|
interpretml/interpret
|
6c6ef2f2e6a6bb9c43633251089385cc44affe16
|
e6f38ea195aecbbd9d28c7183a83c65ada16e1ae
|
refs/heads/develop
| 2023-09-03T17:42:50.611413
| 2023-08-28T18:16:10
| 2023-08-28T18:16:10
| 184,704,903
| 3,731
| 472
|
MIT
| 2023-08-15T04:31:34
| 2019-05-03T05:47:52
|
C++
|
UTF-8
|
Python
| false
| false
| 7,582
|
py
|
_interactive.py
|
# Copyright (c) 2023 The InterpretML Contributors
# Distributed under the MIT software license
import sys
import logging
from ..provider import AutoVisualizeProvider, PreserveProvider, DashProvider
_log = logging.getLogger(__name__)
_current_module = sys.modules[__name__]
_current_module._preserve_provider = None
_current_module.visualize_provider = None
def get_visualize_provider():
"""Gets visualization provider for show() related calls.
Returns:
Visualization provider.
"""
return _current_module.visualize_provider
def set_visualize_provider(provider):
"""Sets visualization provider for show() related calls.
Args:
provider: Visualization provider found in "interpret.provider.visualize".
"""
has_render_method = hasattr(provider, "render")
if provider is None or has_render_method:
_current_module.visualize_provider = provider
else: # pragma: no cover
raise ValueError(
"Object of type {} is not a visualize provider.".format(type(provider))
)
def set_show_addr(addr):
"""Set a (ip, port) for inline visualizations and dashboard. Has side effects stated below.
Side effect: restarts the app runner for 'show' method.
Args:
addr: (ip, port) tuple as address to assign show method to.
Returns:
None.
"""
addr = (addr[0], int(addr[1]))
init_show_server(addr)
def get_show_addr():
"""Returns (ip, port) used for show method.
Returns:
Address tuple (ip, port).
"""
if isinstance(_current_module.visualize_provider, DashProvider):
addr = (
_current_module.visualize_provider.app_runner.ip,
_current_module.visualize_provider.app_runner.port,
)
return addr
else:
return None
def shutdown_show_server():
"""This is a hard shutdown method for the show method's backing server.
Returns:
True if show server has stopped.
"""
if isinstance(_current_module.visualize_provider, DashProvider):
return _current_module.visualize_provider.app_runner.stop()
return True # pragma: no cover
def status_show_server():
"""Returns status and associated information of show method's backing server.
Returns:
Status and associated information as a dictionary.
"""
status_dict = {}
if isinstance(_current_module.visualize_provider, DashProvider):
status_dict["app_runner_exists"] = True
status_dict.update(_current_module.visualize_provider.app_runner.status())
else:
status_dict["app_runner_exists"] = False
return status_dict
def init_show_server(addr=None, base_url=None, use_relative_links=False):
"""Initializes show method's backing server.
Args:
addr: (ip, port) tuple as address to assign show method to.
base_url: Base url path as string. Used mostly when server is running behind a proxy.
use_relative_links: Use relative links for what's returned to client. Otherwise have absolute links.
Returns:
None.
"""
# If the user uses old methods such as init_show_server, we do an immediate override to the visualization provider.
if isinstance(_current_module.visualize_provider, DashProvider):
_log.info("Stopping previous dash provider")
shutdown_show_server()
_log.info(
"Replacing visualize provider: {} with {}".format(
type(_current_module.visualize_provider), type(DashProvider)
)
)
set_visualize_provider(
DashProvider.from_address(
addr=addr, base_url=base_url, use_relative_links=use_relative_links
)
)
_current_module.visualize_provider.idempotent_start()
addr = (
_current_module.visualize_provider.app_runner.ip,
_current_module.visualize_provider.app_runner.port,
)
_log.info("Running dash provider at {}".format(addr))
return None
def _get_integer_key(key, explanation):
if key is not None and not isinstance(key, int):
series = explanation.selector[explanation.selector.columns[0]]
if key not in series.values: # pragma: no cover
raise ValueError("Key {} not in explanation's selector".format(key))
key = series[series == key].index[0]
return key
def show(explanation, key=-1, **kwargs):
"""Provides an interactive visualization for a given explanation(s).
By default, visualization provided is not preserved when the notebook exits.
Args:
explanation: Either a scalar Explanation or list of Explanations to render as visualization.
key: Specific index of explanation to visualize.
**kwargs: Kwargs passed down to provider's render() call.
Returns:
None.
"""
try:
# Get explanation key
key = _get_integer_key(key, explanation)
# Set default render if needed
if _current_module.visualize_provider is None:
_current_module.visualize_provider = AutoVisualizeProvider()
# Render
_current_module.visualize_provider.render(explanation, key=key, **kwargs)
except Exception as e: # pragma: no cover
_log.error(e, exc_info=True)
raise e
return None
def show_link(explanation, share_tables=None):
"""Provides the backing URL link behind the associated 'show' call for explanation.
Args:
explanation: Either a scalar Explanation or list of Explanations
that would be provided to 'show'.
share_tables: Boolean or dictionary that dictates if Explanations
should all use the same selector as provided to 'show'.
(table used for selecting in the Dashboard).
Returns:
URL as a string.
"""
# Initialize server if needed
if not isinstance(
_current_module.visualize_provider, DashProvider
): # pragma: no cover
init_show_server()
# Register
_current_module.visualize_provider.app_runner.register(
explanation, share_tables=share_tables
)
try:
url = _current_module.visualize_provider.app_runner.display_link(explanation)
return url
except Exception as e: # pragma: no cover
_log.error(e, exc_info=True)
raise e
def preserve(explanation, selector_key=None, file_name=None, **kwargs):
"""Preserves an explanation's visualization for Jupyter cell, or file.
If file_name is not None the following occurs:
- For Plotly figures, saves to HTML using `plot`.
- For dataframes, saves to HTML using `to_html`.
- For strings (html), saves to HTML.
- For Dash components, fails with exception. This is currently not supported.
Args:
explanation: An explanation.
selector_key: If integer, treat as index for explanation. Otherwise, looks up value in first column, gets index.
file_name: If assigned, will save the visualization to this filename.
**kwargs: Kwargs which are passed to the underlying render/export call.
Returns:
None.
"""
if _current_module._preserve_provider is None:
_current_module._preserve_provider = PreserveProvider()
try:
# Get explanation key
key = _get_integer_key(selector_key, explanation)
_current_module._preserve_provider.render(
explanation, key=key, file_name=file_name, **kwargs
)
return None
except Exception as e: # pragma: no cover
_log.error(e, exc_info=True)
raise e
|
231df3285e1e697268436aceeea9ba34379cd859
|
09c87fe780df6d1f9eb33799ed516a0bbd7ab1e3
|
/src/layout/line_crosses_shape.py
|
1f0c5ca2b3aa8211725d1dcd7bf28fccf76e7626
|
[] |
no_license
|
abulka/pynsource
|
8ad412b85dc1acaeb83d7d34af8cc033c6baba91
|
979436525c57fdaeaa832e960985e0406e123587
|
refs/heads/master
| 2023-04-13T12:58:02.911318
| 2023-04-11T09:56:32
| 2023-04-11T09:56:32
| 32,249,425
| 271
| 46
| null | 2022-10-10T04:36:57
| 2015-03-15T07:21:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,643
|
py
|
line_crosses_shape.py
|
# line intersecting with a shape?
# testing only
from graph import GraphNode
from .line_intersection import FindLineIntersection
# This function was migrated into GraphNode
def CalcLineIntersectionsWithNode(line_start_point, line_end_point, node):
result = []
for nodeline in node.lines:
result.append(
FindLineIntersection(line_start_point, line_end_point, nodeline[0], nodeline[1])
)
# trim out duplicated and Nones
def remove_duplicates(lzt):
d = {}
for x in lzt:
d[tuple(x)] = x
return list(d.values())
result = [r for r in result if r != None]
result = remove_duplicates(result)
return result
if __name__ == "__main__":
res = FindLineIntersection((0, 0), (200, 200), (10, 10), (10, 50))
assert res == [10.0, 10.0]
res = FindLineIntersection((0, 30), (200, 30), (10, 10), (10, 50))
assert res == [10.0, 30.0]
node = GraphNode("A", 10, 10, 30, 40)
assert len(node.lines) == 4
assert (10, 10) in node.lines[0]
assert (40, 10) in node.lines[0]
assert (40, 10) in node.lines[1]
assert (40, 50) in node.lines[1]
assert (40, 50) in node.lines[2]
assert (10, 50) in node.lines[2]
assert (10, 50) in node.lines[3]
assert (10, 10) in node.lines[3]
res = CalcLineIntersectionsWithNode((0, 0), (200, 200), node)
assert len(res) == 2
assert [10.0, 10.0] in res
assert [40.0, 40.0] in res
res = CalcLineIntersectionsWithNode((20, 0), (20, 1000), node)
assert len(res) == 2
assert [20.0, 10.0] in res
assert [20.0, 50.0] in res
print("Done, tests passed")
|
5595c450e876f9123fd37677387e54d799d65c21
|
76fb0a3cfc9d9362ab29174bd1d55e888ea4d7f6
|
/tfx/examples/custom_components/slack/slack_component/executor.py
|
be7a2a2c4459c55a932fdca60893a35308e2bd0a
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/tfx
|
0cfc9c55171352ecc98c9dfa8ffe976c689d7073
|
1b328504fa08a70388691e4072df76f143631325
|
refs/heads/master
| 2023-08-30T11:56:50.894497
| 2023-08-29T22:47:19
| 2023-08-29T22:48:26
| 169,116,405
| 2,116
| 899
|
Apache-2.0
| 2023-09-14T21:51:42
| 2019-02-04T17:14:36
|
Python
|
UTF-8
|
Python
| false
| false
| 9,202
|
py
|
executor.py
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of a TFX custom executor integrating with slack.
This executor along with other custom component related code will only serve as
an example and will not be supported by TFX team.
"""
import os
import signal
from typing import Any, Dict, List
import absl
import attr
import slack
from tfx import types
from tfx.components.util import model_utils
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils
from tfx.utils import io_utils
# Case-insensitive text messages that are accepted as signal for approving a
# model.
_APPROVE_TEXT = ['lgtm', 'approve']
# Case-insensitive text messages that are accepted as signal for rejecting a
# model.
_DECLINE_TEXT = ['decline', 'reject']
class Timeout:
"""Helper class for handle function timeout."""
def __init__(self, seconds):
self.seconds = seconds
def handle_timeout(self, unused_signum, unused_frame):
msg = 'Did not get model evaluation result in %d seconds' % self.seconds
absl.logging.warning(msg)
raise TimeoutError(msg) # pylint: disable=undefined-variable
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, unused_type, unused_value, unused_traceback):
signal.alarm(0)
@attr.s(auto_attribs=True, kw_only=True, frozen=True)
class _SlackResponse:
"""User slack response for the approval."""
# Whether the model is approved.
approved: bool
# The user who made that decision.
user_id: str
# The decision message.
message: str
# The slack channel that the decision is made on.
slack_channel_id: str
# The slack thread that the decision is made on.
thread_ts: str
class Executor(base_executor.BaseExecutor):
"""Executor for Slack component."""
def _fetch_slack_blessing(self, slack_token: str, slack_channel_id: str,
model_uri: str) -> _SlackResponse:
"""Send message via Slack channel and wait for response.
When the bot send message to the channel, user should reply in thread with
"approve" or "lgtm" for approval, "decline", "reject" for decline.
This example uses Slack RealTime Message (RTM) API which is only available
for **classic slack bot** (https://api.slack.com/rtm). (Events API requires
listening server endpoint which is not easy to be integrated with TFX
pipelines.)
Args:
slack_token: The user-defined function to obtain token to send and receive
messages.
slack_channel_id: The id of the Slack channel to send and receive
messages.
model_uri: The URI of the model waiting for human review.
Returns:
A _SlackResponse instance.
Raises:
ConnectionError:
When connection to slack server cannot be established.
"""
# pylint: disable=unused-argument, unused-variable
rtm_client = slack.RTMClient(token=slack_token)
thread_ts = None
result = None
@slack.RTMClient.run_on(event='hello')
def on_hello(web_client, **payload):
nonlocal thread_ts
resp = web_client.chat_postMessage(
channel=slack_channel_id,
text=(f'Please review the model in the following URI: {model_uri}\n'
f'Reply in thread by `{_APPROVE_TEXT}` for approval, '
f'or `{_DECLINE_TEXT}` for decline.'))
thread_ts = resp.data['ts']
@slack.RTMClient.run_on(event='message')
def on_message(data, rtm_client, web_client, **payload):
nonlocal result
if (data.get('channel') != slack_channel_id
or data.get('thread_ts') != thread_ts
or data.get('user') is None
or data.get('subtype') == 'bot_message'):
# Not a relevent user message.
return
user_reply = data['text'].lower()
if user_reply in _APPROVE_TEXT:
absl.logging.info('User %s approved the model at %s',
data['user'], model_uri)
rtm_client.stop()
result = _SlackResponse(
approved=True,
user_id=data['user'],
message=data['text'],
slack_channel_id=slack_channel_id,
thread_ts=thread_ts)
elif user_reply in _DECLINE_TEXT:
absl.logging.info('User %s declined the model at %s',
data['user'], model_uri)
rtm_client.stop()
result = _SlackResponse(
approved=False,
user_id=data['user'],
message=data['text'],
slack_channel_id=slack_channel_id,
thread_ts=thread_ts)
else:
web_client.chat_postMessage(
channel=slack_channel_id,
thread_ts=thread_ts,
text=(f'Unrecognized text "{data["text"]}".\n'
f'Please reply in thread by `{_APPROVE_TEXT}` for approval, '
f'or `{_DECLINE_TEXT}` for decline.'))
absl.logging.info('Will start listening user Slack response.')
rtm_client.start()
absl.logging.info('User reply: %s', result)
return result
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
"""Get human review result on a model through Slack channel.
Args:
input_dict: Input dict from input key to a list of artifacts, including:
- model_export: exported model from trainer.
- model_blessing: model blessing path from evaluator.
output_dict: Output dict from key to a list of artifacts, including:
- slack_blessing: model blessing result.
exec_properties: A dict of execution properties, including:
- slack_token: Token used to setup connection with slack server.
- slack_channel_id: The id of the Slack channel to send and receive
messages.
- timeout_sec: How long do we wait for response, in seconds.
Returns:
None
Raises:
TimeoutError:
When there is no decision made within timeout_sec.
ConnectionError:
When connection to slack server cannot be established.
"""
self._log_startup(input_dict, output_dict, exec_properties)
# Fetch execution properties from exec_properties dict.
slack_token = exec_properties['slack_token']
slack_channel_id = exec_properties['slack_channel_id']
timeout_sec = exec_properties['timeout_sec']
# Fetch input URIs from input_dict.
model_export_uri = artifact_utils.get_single_uri(input_dict['model'])
model_blessing = artifact_utils.get_single_instance(
input_dict['model_blessing'])
# Fetch output artifact from output_dict.
slack_blessing = artifact_utils.get_single_instance(
output_dict['slack_blessing'])
# We only consider a model as blessed if both of the following conditions
# are met:
# - The model is blessed by evaluator. This is determined by looking
# for file named 'BLESSED' from the output from Evaluator.
# - The model is blessed by a human reviewer. This logic is in
# _fetch_slack_blessing().
slack_response = None
with Timeout(timeout_sec):
if model_utils.is_model_blessed(model_blessing):
slack_response = self._fetch_slack_blessing(slack_token,
slack_channel_id,
model_export_uri)
# If model is blessed, write an empty file named 'BLESSED' in the assigned
# output path. Otherwise, write an empty file named 'NOT_BLESSED' instead.
if slack_response and slack_response.approved:
io_utils.write_string_file(
os.path.join(slack_blessing.uri, 'BLESSED'), '')
slack_blessing.set_int_custom_property('blessed', 1)
else:
io_utils.write_string_file(
os.path.join(slack_blessing.uri, 'NOT_BLESSED'), '')
slack_blessing.set_int_custom_property('blessed', 0)
if slack_response:
slack_blessing.set_string_custom_property('slack_decision_maker',
slack_response.user_id)
slack_blessing.set_string_custom_property('slack_decision_message',
slack_response.message)
slack_blessing.set_string_custom_property('slack_decision_channel',
slack_response.slack_channel_id)
slack_blessing.set_string_custom_property('slack_decision_thread',
slack_response.thread_ts)
absl.logging.info('Blessing result written to %s.', slack_blessing.uri)
|
8dc96b519573d9326f171028d0ec5d69b0019e3a
|
0c239c1de121e89281fe5cb3cdb473437acbbb85
|
/sahara/tests/unit/base.py
|
7954d0d2712af20ea1c08fd593a0314c5874b7cd
|
[
"Apache-2.0"
] |
permissive
|
openstack/sahara
|
6fdee5d633cf53219e77d6b7bc7f92712f158b3e
|
a806a536b623b4ce1a345d5718505a5f04c987f4
|
refs/heads/master
| 2023-08-30T02:09:07.178932
| 2023-08-09T05:48:53
| 2023-08-09T05:48:53
| 13,348,302
| 165
| 120
|
Apache-2.0
| 2021-01-28T06:06:41
| 2013-10-05T16:26:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,331
|
py
|
base.py
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from oslotest import base
from sahara import context
from sahara.db import api as db_api
from sahara import main
from sahara.utils import rpc
class SaharaTestCase(base.BaseTestCase):
def setUp(self):
super(SaharaTestCase, self).setUp()
self.setup_context()
rpc.setup('all-in-one')
def setup_context(self, username="test_user", tenant_id="tenant_1",
auth_token="test_auth_token", tenant_name='test_tenant',
service_catalog=None, **kwargs):
self.addCleanup(context.set_ctx,
context.ctx() if context.has_ctx() else None)
context.set_ctx(context.Context(
username=username, tenant_id=tenant_id,
auth_token=auth_token, service_catalog=service_catalog or {},
tenant_name=tenant_name, **kwargs))
def override_config(self, name, override, group=None):
main.CONF.set_override(name, override, group)
self.addCleanup(main.CONF.clear_override, name, group)
class SaharaWithDbTestCase(SaharaTestCase):
def setUp(self):
super(SaharaWithDbTestCase, self).setUp()
self.override_config('connection', "sqlite://", group='database')
db_api.setup_db()
self.addCleanup(db_api.drop_db)
class _ConsecutiveThreadGroup(context.ThreadGroup):
def __init__(self, _thread_pool_size=1000):
pass
def spawn(self, thread_description, func, *args, **kwargs):
func(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, *ex):
pass
def mock_thread_group(func):
return mock.patch('sahara.context.ThreadGroup',
new=_ConsecutiveThreadGroup)(func)
|
9525cfcdc8743a41ec8d35df963b2792e50a4917
|
6c066611b11a8de5e2c22c30cfcc578a4c49edce
|
/GLSL/Channel/Shuffle_AtoRGB_GL/Shuffle_AtoRGB_GL.py
|
674bf7abb2f0ebd32f32bc3ab86944046e2c99a9
|
[] |
no_license
|
NatronGitHub/natron-plugins
|
ad2d9227637b4b86b45f92856fa54d327872a0a6
|
b0c499fb6391024f54be9f26ed41b5cf7475d574
|
refs/heads/master
| 2022-12-12T10:02:20.252222
| 2022-11-30T02:29:04
| 2022-11-30T02:29:04
| 130,576,224
| 332
| 67
| null | 2022-11-30T02:29:05
| 2018-04-22T14:39:29
|
Python
|
UTF-8
|
Python
| false
| false
| 10,605
|
py
|
Shuffle_AtoRGB_GL.py
|
# -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE
# This file was automatically generated by Natron PyPlug exporter version 10.
# Hand-written code should be added in a separate file named Shuffle_AtoRGB_GLExt.py
# See http://natron.readthedocs.org/en/master/devel/groups.html#adding-hand-written-code-callbacks-etc
# Note that Viewers are never exported
import NatronEngine
import sys
# Try to import the extensions file where callbacks and hand-written code should be located.
try:
from Shuffle_AtoRGB_GLExt import *
except ImportError:
pass
def getPluginID():
return "natron.community.plugins.Shuffle_AtoRGB_GL"
def getLabel():
return "Shuffle_AtoRGB_GL"
def getVersion():
return 1
def getIconPath():
return "Shuffle_AtoRGB_GL.png"
def getGrouping():
return "Community/GLSL/Channel"
def getPluginDescription():
return "Copy alpha to RGB."
def createInstance(app,group):
# Create all nodes in the group
# Create the parameters of the group node the same way we did for all internal nodes
lastNode = group
lastNode.setColor(0.5882, 0.1255, 0.3255)
# Create the user parameters
lastNode.Credits = lastNode.createPageParam("Credits", "Credits")
param = lastNode.createStringParam("separator19", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.separator19 = param
del param
param = lastNode.createStringParam("separator20", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.separator20 = param
del param
param = lastNode.createSeparatorParam("line02", "Shuffle_AtoRGB_GL v1.0")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.line02 = param
del param
param = lastNode.createStringParam("separator21", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.separator21 = param
del param
param = lastNode.createStringParam("separator22", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.separator22 = param
del param
param = lastNode.createSeparatorParam("line03", "")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.line03 = param
del param
param = lastNode.createStringParam("separator23", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.separator23 = param
del param
param = lastNode.createStringParam("separator24", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.separator24 = param
del param
param = lastNode.createSeparatorParam("FR", "ShaderToy 0.8.8")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.FR = param
del param
param = lastNode.createStringParam("separator25", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.separator25 = param
del param
param = lastNode.createStringParam("separator26", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.separator26 = param
del param
param = lastNode.createSeparatorParam("conversion", "(Fabrice Fernandez - 2018)")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.conversion = param
del param
param = lastNode.createStringParam("separator27", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.separator27 = param
del param
param = lastNode.createStringParam("separator28", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.separator28 = param
del param
# Refresh the GUI with the newly created parameters
lastNode.setPagesOrder(['Credits', 'Node', 'Settings'])
lastNode.refreshUserParamsGUI()
del lastNode
# Start of node "Output2"
lastNode = app.createNode("fr.inria.built-in.Output", 1, group)
lastNode.setLabel("Output2")
lastNode.setPosition(4139, 3958)
lastNode.setSize(90, 36)
lastNode.setColor(0.7, 0.7, 0.7)
groupOutput2 = lastNode
del lastNode
# End of node "Output2"
# Start of node "Source"
lastNode = app.createNode("fr.inria.built-in.Input", 1, group)
lastNode.setScriptName("Source")
lastNode.setLabel("Source")
lastNode.setPosition(4139, 3646)
lastNode.setSize(90, 36)
lastNode.setColor(0.3, 0.5, 0.2)
groupSource = lastNode
del lastNode
# End of node "Source"
# Start of node "Shadertoy1"
lastNode = app.createNode("net.sf.openfx.Shadertoy", 1, group)
lastNode.setScriptName("Shadertoy1")
lastNode.setLabel("Shadertoy1_2")
lastNode.setPosition(4139, 3798)
lastNode.setSize(90, 36)
lastNode.setColor(0.3, 0.5, 0.2)
groupShadertoy1 = lastNode
param = lastNode.getParam("paramValueFloat0")
if param is not None:
param.setValue(0.44, 0)
del param
param = lastNode.getParam("imageShaderSource")
if param is not None:
param.setValue("\r\n\r\n// iChannel0: B, filter = nearest\r\n// BBox: iChannel0\r\n\r\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\r\n{\r\n\r\n\tvec2 uv = fragCoord.xy / iResolution.xy;\r\n\tvec4 source = texture2D(iChannel0, uv);\r\n\r\n\tsource.r = source.a;\r\n\tsource.g = source.a;\r\n\tsource.b = source.a;\r\n\r\n\tfragColor = source;\r\n}\t")
del param
param = lastNode.getParam("mipmap0")
if param is not None:
param.set("linear")
del param
param = lastNode.getParam("wrap0")
if param is not None:
param.set("clamp")
del param
param = lastNode.getParam("inputEnable1")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable2")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable3")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("bbox")
if param is not None:
param.set("iChannel0")
del param
param = lastNode.getParam("NatronParamFormatChoice")
if param is not None:
param.set("PC_Video")
del param
param = lastNode.getParam("mouseParams")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("paramCount")
if param is not None:
param.setValue(1, 0)
del param
param = lastNode.getParam("paramType0")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName0")
if param is not None:
param.setValue("distortion")
del param
param = lastNode.getParam("paramLabel0")
if param is not None:
param.setValue("distorsion :")
del param
param = lastNode.getParam("paramHint0")
if param is not None:
param.setValue("distorsion")
del param
param = lastNode.getParam("paramMinFloat0")
if param is not None:
param.setValue(-10, 0)
del param
param = lastNode.getParam("paramMaxFloat0")
if param is not None:
param.setValue(10, 0)
del param
del lastNode
# End of node "Shadertoy1"
# Now that all nodes are created we can connect them together, restore expressions
groupOutput2.connectInput(0, groupShadertoy1)
groupShadertoy1.connectInput(0, groupSource)
try:
extModule = sys.modules["Shuffle_AtoRGB_GLExt"]
except KeyError:
extModule = None
if extModule is not None and hasattr(extModule ,"createInstanceExt") and hasattr(extModule.createInstanceExt,"__call__"):
extModule.createInstanceExt(app,group)
|
3c071479a8b582263b84655e39192945a1c9dec2
|
64e2c522afd6a87a56e1ce6279299821b251243b
|
/App/Distribute/build_dmg.py
|
7d171c135a78545394f7a7a2ebc3b7f01c0efedc
|
[
"Apache-2.0"
] |
permissive
|
justvanrossum/fontgoggles
|
57a151048e4be41defc63a761808abbdd8e8053e
|
98c56975e81202ca0bdb73716f503a3172915c31
|
refs/heads/master
| 2023-09-02T02:36:02.915460
| 2023-05-23T06:54:29
| 2023-05-23T06:54:29
| 225,787,392
| 367
| 53
|
Apache-2.0
| 2023-09-11T17:27:16
| 2019-12-04T05:34:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,460
|
py
|
build_dmg.py
|
import os
import subprocess
import sys
import tempfile
appPath = os.path.abspath(sys.argv[1])
appFileName = os.path.basename(appPath)
appName, _ = os.path.splitext(appFileName)
dmgPath = os.path.abspath(sys.argv[2])
if os.path.exists(dmgPath):
os.remove(dmgPath)
with tempfile.TemporaryDirectory() as imgPath:
appOnImagePath = os.path.join(imgPath, appFileName)
# We temporarily _move_ the app, as shutil.copytree() apparently
# invalidates a couple of the app's code signatures :(
os.rename(appPath, appOnImagePath)
try:
os.symlink("/Applications", os.path.join(imgPath, "Applications"))
tmpImagePath = tempfile.mktemp(suffix=".dmg")
try:
createCommand = [
"hdiutil", "create", "-fs", "HFS+",
"-size", "200m",
"-srcfolder", imgPath,
"-volname", appName,
"-format", "UDZO",
"-quiet",
tmpImagePath,
]
subprocess.run(createCommand, check=True)
convertCommand = [
"hdiutil", "convert", "-format", "UDZO", "-imagekey", "zlib-level=9",
"-quiet",
"-o", dmgPath, tmpImagePath,
]
subprocess.run(convertCommand, check=True)
finally:
if os.path.exists(tmpImagePath):
os.remove(tmpImagePath)
finally:
os.rename(appOnImagePath, appPath)
|
db38bb57faf018e512a3383c7bf46860701521bc
|
88d555a009f9075e59177fac70036892f397b439
|
/bin/borzoi_bench_trip_folds.py
|
2c24e638b274422763763d1ed8d555a58c7d09e0
|
[
"Apache-2.0"
] |
permissive
|
calico/basenji
|
f9f406971d355dda81821dcf274696a7d27e332d
|
615b9eec8a591783b16d959029ddad08edae853d
|
refs/heads/master
| 2023-09-04T11:14:15.620786
| 2023-07-27T00:05:13
| 2023-07-27T00:05:13
| 96,346,574
| 326
| 143
|
Apache-2.0
| 2023-08-16T00:36:32
| 2017-07-05T17:54:18
|
Python
|
UTF-8
|
Python
| false
| false
| 7,202
|
py
|
borzoi_bench_trip_folds.py
|
#!/usr/bin/env python
# Copyright 2019 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from optparse import OptionParser, OptionGroup
import glob
import h5py
import json
import pdb
import os
import shutil
import sys
import numpy as np
import pandas as pd
import slurm
#import util
from basenji_test_folds import stat_tests
"""
borzoi_borzoi_trip_folds.py
Benchmark Basenji model replicates on TRIP prediction task.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <data_dir> <promoter_file> <insertions_file>'
parser = OptionParser(usage)
# trip
trip_options = OptionGroup(parser, 'borzoi_trip.py options')
trip_options.add_option('-f', dest='genome_fasta',
default='%s/data/hg38.fa' % os.environ['BASENJIDIR'],
help='Genome FASTA for sequences [Default: %default]')
trip_options.add_option('-o',dest='out_dir',
default='trip',
help='Output directory for tables and plots [Default: %default]')
trip_options.add_option('--site', dest='site',
default=False, action='store_true',
help='Return the insertion site without the promoter [Default: %default]')
trip_options.add_option('--reporter', dest='reporter',
default=False, action='store_true',
help='Insert the flanking piggyback reporter with the promoter [Default: %default]')
trip_options.add_option('--reporter_bare', dest='reporter_bare',
default=False, action='store_true',
help='Insert the flanking piggyback reporter with the promoter (no terminal repeats) [Default: %default]')
trip_options.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Average forward and reverse complement predictions [Default: %default]')
trip_options.add_option('--shifts', dest='shifts',
default='0', type='str',
help='Ensemble prediction shifts [Default: %default]')
trip_options.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
parser.add_option_group(trip_options)
# cross-fold
fold_options = OptionGroup(parser, 'cross-fold options')
fold_options.add_option('-c', dest='crosses',
default=1, type='int',
help='Number of cross-fold rounds [Default:%default]')
fold_options.add_option('-d', dest='data_head',
default=None, type='int',
help='Index for dataset/head [Default: %default]')
fold_options.add_option('-e', dest='conda_env',
default='tf210',
help='Anaconda environment [Default: %default]')
fold_options.add_option('--name', dest='name',
default='trip', help='SLURM name prefix [Default: %default]')
fold_options.add_option('--max_proc', dest='max_proc',
default=None, type='int',
help='Maximum concurrent processes [Default: %default]')
fold_options.add_option('-q', dest='queue',
default='geforce',
help='SLURM queue on which to run the jobs [Default: %default]')
fold_options.add_option('-r', dest='restart',
default=False, action='store_true',
help='Restart a partially completed job [Default: %default]')
parser.add_option_group(fold_options)
(options, args) = parser.parse_args()
if len(args) != 4:
print(len(args))
print(args)
parser.error('Must provide parameters file, cross-fold directory, TRIP promoter sequences, and TRIP insertion sites')
else:
params_file = args[0]
exp_dir = args[1]
promoters_file = args[2]
insertions_file = args[3]
#######################################################
# prep work
# count folds
num_folds = 0
fold0_dir = '%s/f%dc0' % (exp_dir, num_folds)
model_file = '%s/train/model_best.h5' % fold0_dir
if options.data_head is not None:
model_file = '%s/train/model%d_best.h5' % (fold0_dir, options.data_head)
while os.path.isfile(model_file):
num_folds += 1
fold0_dir = '%s/f%dc0' % (exp_dir, num_folds)
model_file = '%s/train/model_best.h5' % fold0_dir
if options.data_head is not None:
model_file = '%s/train/model%d_best.h5' % (fold0_dir, options.data_head)
print('Found %d folds' % num_folds)
if num_folds == 0:
exit(1)
################################################################
# TRIP prediction jobs
# command base
cmd_base = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
cmd_base += ' conda activate %s;' % options.conda_env
cmd_base += ' echo $HOSTNAME;'
jobs = []
for ci in range(options.crosses):
for fi in range(num_folds):
it_dir = '%s/f%dc%d' % (exp_dir, fi, ci)
name = '%s-f%dc%d' % (options.name, fi, ci)
# update output directory
it_out_dir = '%s/%s' % (it_dir, options.out_dir)
os.makedirs(it_out_dir, exist_ok=True)
model_file = '%s/train/model_best.h5' % it_dir
if options.data_head is not None:
model_file = '%s/train/model%d_best.h5' % (it_dir, options.data_head)
cmd_fold = '%s time borzoi_trip.py %s %s %s %s' % (cmd_base, params_file, model_file, promoters_file, insertions_file)
# TRIP job
job_out_dir = it_out_dir
if not options.restart or not os.path.isfile('%s/preds.h5'%job_out_dir):
cmd_job = cmd_fold
cmd_job += ' %s' % options_string(options, trip_options, job_out_dir)
j = slurm.Job(cmd_job, name,
'%s.out'%job_out_dir, '%s.err'%job_out_dir,
queue=options.queue, gpu=1,
mem=60000, time='7-0:0:0')
jobs.append(j)
slurm.multi_run(jobs, max_proc=options.max_proc, verbose=True,
launch_sleep=10, update_sleep=60)
def options_string(options, group_options, rep_dir):
options_str = ''
for opt in group_options.option_list:
opt_str = opt.get_opt_string()
opt_value = options.__dict__[opt.dest]
# wrap askeriks in ""
if type(opt_value) == str and opt_value.find('*') != -1:
opt_value = '"%s"' % opt_value
# no value for bools
elif type(opt_value) == bool:
if not opt_value:
opt_str = ''
opt_value = ''
# skip Nones
elif opt_value is None:
opt_str = ''
opt_value = ''
# modify
elif opt.dest == 'out_dir':
opt_value = rep_dir
options_str += ' %s %s' % (opt_str, opt_value)
return options_str
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
ea0328e7a848cc7b946fa1bb127e78c1e9c1401a
|
6c2dbc8d4e536220fb3b1cc72aa8104aea8b0698
|
/tests/test_issues/test_bot_context_is_usable.py
|
f3db6b9976f325a63fa89af81c95e34af158754a
|
[
"MIT"
] |
permissive
|
aiogram/aiogram
|
f8f98a0beb63bd4d93ea810638d5792569bf354b
|
04bd0c9e7c5421c060183b90d515050f41377bc1
|
refs/heads/dev-3.x
| 2023-08-30T21:20:13.018174
| 2023-08-28T23:01:54
| 2023-08-28T23:01:54
| 111,210,856
| 4,287
| 1,250
|
MIT
| 2023-09-10T21:34:03
| 2017-11-18T14:11:13
|
Python
|
UTF-8
|
Python
| false
| false
| 880
|
py
|
test_bot_context_is_usable.py
|
from datetime import datetime
from aiogram import Dispatcher, Router
from aiogram.enums import ChatType
from aiogram.filters import Command
from aiogram.methods import SendMessage
from aiogram.types import Chat, Message, Update, User
from tests.mocked_bot import MockedBot
issue_router = Router()
@issue_router.message(Command("test"))
async def my_handler(message: Message):
await message.answer("PASS")
return True
async def test_something(bot: MockedBot):
dp = Dispatcher()
dp.include_router(issue_router)
bot.add_result_for(method=SendMessage, ok=True)
chat = Chat(id=666, type=ChatType.PRIVATE)
user = User(id=666, is_bot=False, first_name="User")
msg = Message(message_id=1, date=datetime.now(), from_user=user, chat=chat, text="/test")
result = await dp.feed_update(bot, Update(message=msg, update_id=1))
assert result is True
|
2c91652cf5c20dae2c0c6523cac1b98e24ba190a
|
92ae735d5dc6f6a094daedbd32614e714d0b8c4a
|
/newsletter/tests/__init__.py
|
ff7f59c2c71a65cae5e7b39bf99d8303758f0c71
|
[
"MIT"
] |
permissive
|
Williano/Final-Senior-Year-Project-
|
3b01ac9fd85753720b01c2245cf9b71648aad35d
|
4bd988575537b37b5cf852b616d3db5666c95e7f
|
refs/heads/master
| 2023-08-07T16:11:42.778492
| 2023-06-05T04:59:06
| 2023-06-05T04:59:06
| 121,346,340
| 173
| 60
|
MIT
| 2023-06-05T04:59:07
| 2018-02-13T06:17:16
|
Python
|
UTF-8
|
Python
| false
| false
| 588
|
py
|
__init__.py
|
from .test_web import (
AnonymousNewsletterListTestCase, UserNewsletterListTestCase,
SubscribeTestCase, UserSubscribeTestCase,
InvisibleAnonymousSubscribeTestCase, InvisibleUserSubscribeTestCase,
AnonymousSubscribeTestCase, ArchiveTestcase,
ActivationEmailSentUrlTestCase, ActionActivatedUrlTestCase
)
from .test_mailing import (
MailingTestCase, ArticleTestCase, CreateSubmissionTestCase,
SubmitSubmissionTestCase, SubscriptionTestCase, HtmlEmailsTestCase,
TextOnlyEmailsTestCase, TemplateOverridesTestCase
)
from .test_settings import SettingsTestCase
|
215ea68623c635403d7cebec280f23726f26f3d6
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/tibber/__init__.py
|
6bd68e17c4d2722bff3fdd7cfbffd88636e478f7
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,719
|
py
|
__init__.py
|
"""Support for Tibber."""
import asyncio
import logging
import aiohttp
import tibber
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_NAME,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import Event, HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import dt as dt_util
from .const import DATA_HASS_CONFIG, DOMAIN
PLATFORMS = [Platform.SENSOR]
CONFIG_SCHEMA = cv.removed(DOMAIN, raise_if_present=False)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Tibber component."""
hass.data[DATA_HASS_CONFIG] = config
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
tibber_connection = tibber.Tibber(
access_token=entry.data[CONF_ACCESS_TOKEN],
websession=async_get_clientsession(hass),
time_zone=dt_util.DEFAULT_TIME_ZONE,
)
hass.data[DOMAIN] = tibber_connection
async def _close(event: Event) -> None:
await tibber_connection.rt_disconnect()
entry.async_on_unload(hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close))
try:
await tibber_connection.update_info()
except (
asyncio.TimeoutError,
aiohttp.ClientError,
tibber.RetryableHttpException,
) as err:
raise ConfigEntryNotReady("Unable to connect") from err
except tibber.InvalidLogin as exp:
_LOGGER.error("Failed to login. %s", exp)
return False
except tibber.FatalHttpException:
return False
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
# set up notify platform, no entry support for notify component yet,
# have to use discovery to load platform.
hass.async_create_task(
discovery.async_load_platform(
hass,
Platform.NOTIFY,
DOMAIN,
{CONF_NAME: DOMAIN},
hass.data[DATA_HASS_CONFIG],
)
)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
tibber_connection = hass.data[DOMAIN]
await tibber_connection.rt_disconnect()
return unload_ok
|
2976e04faf5bca8b5b452e4b2c15a8540eddc9dc
|
a85c048a4ae820beb2bc265d1845e23842fc8c2a
|
/learning/pytorch/models/train.py
|
4b8d5368334e756cd1f563376e52f19032f85a0d
|
[
"MIT"
] |
permissive
|
ithemal/Ithemal
|
e549856538c7b1f2c50d0f40b51b9bb97baf6379
|
b3c39a8942b8b3d92c0fa81815b34fa9b6cbe683
|
refs/heads/master
| 2021-12-10T11:50:00.058462
| 2021-11-30T16:18:30
| 2021-11-30T16:18:30
| 151,625,735
| 124
| 33
|
MIT
| 2020-06-26T15:11:01
| 2018-10-04T19:33:52
|
Python
|
UTF-8
|
Python
| false
| false
| 11,676
|
py
|
train.py
|
import sys
import os
sys.path.append(os.path.join(os.environ['ITHEMAL_HOME'], 'learning', 'pytorch'))
import torch
import torch.nn as nn
from enum import Enum
import common_libs.utilities as ut
import data.data_cost as dt
import torch.autograd as autograd
import torch.optim as optim
import math
import numpy as np
import os
import gc
import psutil
from tqdm import tqdm
import time
import torch
from torch import nn
import utils.messages as messages
import random
from typing import Any, Callable, Dict, IO, List, Optional, Tuple
from . import model_utils
def memReport():
# type: () -> None
num_obj = 0
for obj in gc.get_objects():
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
num_obj += 1
print 'num_obj ' + str(num_obj)
def cpuStats():
# type: () -> None
print(sys.version)
print(psutil.cpu_percent())
print(psutil.virtual_memory()) # physical memory usage
pid = os.getpid()
py = psutil.Process(pid)
memoryUse = py.memory_info()[0] / 2. ** 30 # memory use in GB...I think
print('memory GB:', memoryUse)
class PredictionType(Enum):
CLASSIFICATION = 1
REGRESSION = 2
class OptimizerType(Enum):
ADAM_PRIVATE = 1
ADAM_SHARED = 2
SGD = 3
class Train():
"""
Performs training and validation for the models listed above
"""
def __init__(self,
model,
data,
typ,
loss_fn,
num_losses,
batch_size = 1000,
tolerance = 25.,
lr = 0.001,
momentum = 0.9,
nesterov=False,
clip = 2.,
opt = OptimizerType.SGD,
weight_decay = 0.,
predict_log = False,
):
# type: (nn.Module, dt.Data, PredictionType, Callable[[torch.tensor, torch.tensor], torch.tensor], int, int, float, float, float, bool, Optional[float], OptimizerType, float, bool) -> None
self.model = model
self.typ = typ
self.data = data
self.lr = lr
self.clip = clip
self.predict_log = predict_log
self.opt_type = opt
if opt == OptimizerType.SGD:
self.optimizer = optim.SGD(self.model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay, nesterov=nesterov)
elif opt == OptimizerType.ADAM_PRIVATE or opt == OptimizerType.ADAM_SHARED:
self.optimizer = optim.Adam(self.model.parameters(), weight_decay=weight_decay, lr=lr)
if opt == OptimizerType.ADAM_SHARED:
for param in self.optimizer.param_groups[0]['params']:
param.share_memory_()
else:
raise ValueError('unknown optimizer...')
#training parameters
self.partition = (0, len(self.data.train))
self.batch_size = batch_size
#correctness
self.tolerance = tolerance
#for classification
self.correct = 0
#functions
self.loss_fn = loss_fn
self.num_losses = num_losses
self.rank = 0
self.last_save_time = 0
def dump_shared_params(self):
# type: () -> Dict[str, object]
if self.opt_type == OptimizerType.ADAM_SHARED:
return model_utils.dump_shared_params(self.optimizer)
else:
return {}
def load_shared_params(self, params):
# type: (Dict[str, object]) -> None
if self.opt_type == OptimizerType.ADAM_SHARED:
model_utils.load_shared_params(self.optimizer, params)
"""
Print routines for predicted and target values.
"""
def print_final(self,f,x,y):
# type: (IO[str], np.array, np.array) -> None
if x.shape != ():
size = x.shape[0]
for i in range(size):
f.write('%f,%f ' % (x[i],y[i]))
f.write('\n')
else:
f.write('%f,%f\n' % (x,y))
def print_max(self,f,x,y):
# type: (IO[str], np.array, np.array) -> None
x = torch.argmax(x)
y = torch.argmax(y)
f.write('%d,%d\n' % (x.item(),y.item()))
"""
correct example counting functions
"""
def correct_classification(self,x,y):
# type: (torch.tensor, torch.tensor) -> None
x = torch.argmax(x) + 1
y = torch.argmax(y) + 1
percentage = torch.abs(x - y) * 100.0 / y
if percentage < self.tolerance:
self.correct += 1
def correct_regression(self,x,y):
# type: (torch.tensor, torch.tensor) -> None
if x.shape != ():
x = x[-1]
y = y[-1]
percentage = torch.abs(x - y) * 100.0 / (y + 1e-3)
if percentage < self.tolerance:
self.correct += 1
def save_checkpoint(self, epoch, batch_num, filename, **rest):
# type: (int, int, str, **Any) -> None
state_dict = {
'epoch': epoch,
'batch_num': batch_num,
'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
for (k, v) in rest.items():
state_dict[k] = v
# ensure directory exists
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
torch.save(state_dict, filename)
def load_checkpoint(self, filename):
# type: (str) -> Dict[str, Any]
state_dict = torch.load(filename)
self.model.load_state_dict(state_dict['model'])
try:
self.optimizer.load_state_dict(state_dict['optimizer'])
except ValueError:
print('Couldnt load optimizer!')
return state_dict
def __call__(self, rank, partition, report_loss_fn=None):
# type: (int, Tuple[int, int], Optional[Callable[[messages.Message], None]]) -> None
self.rank = rank
self.partition = partition
self.train(report_loss_fn=report_loss_fn)
def get_target(self, datum):
# type: (dt.DataItem) -> torch.tensor
target = torch.FloatTensor([datum.y]).squeeze()
if self.predict_log:
target.log_()
return target
"""
Training loop - to do make the average loss for general
"""
def train(self, report_loss_fn=None):
# type: (Optional[Callable[[messages.Message], None]]) -> None
(partition_start, partition_end) = self.partition
def report_trainer_death(idx):
# type: (int) -> None
if report_loss_fn is not None:
report_loss_fn(messages.TrainerDeathMessage(
(idx + self.batch_size, partition_end),
))
for idx in range(partition_start, partition_end, self.batch_size):
batch_loss_sum = np.zeros(self.num_losses)
self.correct = 0
self.optimizer.zero_grad()
loss_tensor = torch.FloatTensor([0]).squeeze()
batch = self.data.train[idx:idx+self.batch_size]
if not batch:
continue
for datum in batch:
output = self.model(datum)
if torch.isnan(output).any():
report_trainer_death(idx)
return
#target as a tensor
target = self.get_target(datum)
#get the loss value
if self.loss_fn:
losses_opt = self.loss_fn(output, target)
if self.predict_log and self.loss_fn:
losses_rep = self.loss_fn(output.exp(), target.exp())
else:
losses_rep = losses_opt
#check how many are correct
if self.typ == PredictionType.CLASSIFICATION:
self.correct_classification(output, target)
elif self.typ == PredictionType.REGRESSION:
self.correct_regression(output, target)
#accumulate the losses
for class_idx, (loss_opt, loss_rep) in enumerate(zip(losses_opt, losses_rep)):
loss_tensor += loss_opt
l = loss_rep.item()
batch_loss_sum[class_idx] += l
batch_loss_avg = batch_loss_sum / len(batch)
#propagate gradients
loss_tensor.backward()
#clip the gradients
if self.clip is not None:
torch.nn.utils.clip_grad_norm(self.model.parameters(), self.clip)
for param in self.model.parameters():
if param.grad is None:
continue
if torch.isnan(param.grad).any():
report_trainer_death(idx)
return
#optimizer step to update parameters
self.optimizer.step()
# get those tensors out of here!
for datum in batch:
self.model.remove_refs(datum)
if report_loss_fn is not None:
report_loss_fn(messages.LossReportMessage(
self.rank,
batch_loss_avg[0],
len(batch),
))
def set_lr(self, lr):
# type: (float) -> None
self.lr = lr
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.lr
"""
Validation with a test set
"""
def validate(self, resultfile, loadfile=None):
# type: (str, Optional[str]) -> Tuple[List[List[float]], List[List[float]]]
if loadfile is not None:
print 'loaded from checkpoint for validation...'
self.load_checkpoint(loadfile)
f = open(resultfile,'w')
self.correct = 0
average_loss = [0] * self.num_losses
actual = []
predicted = []
for j, item in enumerate(tqdm(self.data.test)):
#print len(item.x)
output = self.model(item)
target = self.get_target(item)
if self.predict_log:
output.exp_()
target.exp_()
#get the target and predicted values into a list
if self.typ == PredictionType.CLASSIFICATION:
actual.append((torch.argmax(target) + 1).data.numpy().tolist())
predicted.append((torch.argmax(output) + 1).data.numpy().tolist())
else:
actual.append(target.data.numpy().tolist())
predicted.append(output.data.numpy().tolist())
self.print_final(f, output, target)
losses = self.loss_fn(output, target)
if self.typ == PredictionType.CLASSIFICATION:
self.correct_classification(output, target)
else:
self.correct_regression(output, target)
#accumulate the losses
loss = torch.zeros(1)
for c,l in enumerate(losses):
loss += l
average_loss[c] = (average_loss[c] * j + l.item()) / (j + 1)
if j % (len(self.data.test) / 100) == 0:
p_str = str(j) + ' '
for av in average_loss:
p_str += str(av) + ' '
p_str += str(self.correct) + ' '
print p_str
#remove refs; so the gc remove unwanted tensors
self.model.remove_refs(item)
for loss in average_loss:
f.write('loss - %f\n' % (loss))
f.write('%f,%f\n' % (self.correct, len(self.data.test)))
print average_loss, self.correct, len(self.data.test)
f.close()
return (actual, predicted)
|
59d1f48a414c5fa3c37eea591fce0f46dfebaf6a
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-workspaceapp/huaweicloudsdkworkspaceapp/v1/model/bandwidth.py
|
e12e5b577b9635985865795ab5ddc0749330f083
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 42,585
|
py
|
bandwidth.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class Bandwidth:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'intelligent_data_transport_flag': 'str',
'total_bandwidth_control_enable': 'bool',
'options': 'TotalBandwidthControlOptions',
'display_bandwidth_control_enable': 'bool',
'display_bandwidth_control_options': 'DisplayBandwidthControlOptions',
'multimedia_bandwidth_control_enable': 'bool',
'multimedia_bandwidth_control_options': 'MultimediaBandwidthControlOptions',
'usb_bandwidth_control_enable': 'bool',
'usb_bandwidth_control_options': 'UsbBandwidthControlOptions',
'pcsc_bandwidth_control_enable': 'bool',
'pcsc_bandwidth_control_options': 'PcscBandwidthControlOptions',
'twain_bandwidth_control_enable': 'bool',
'twain_bandwidth_control_options': 'TwainBandwidthControlOptions',
'printer_bandwidth_control_enable': 'bool',
'printer_bandwidth_control_options': 'PrinterBandwidthControlOptions',
'com_bandwidth_control_enable': 'bool',
'com_bandwidth_control_options': 'ComBandwidthControlOptions',
'file_redirection_bandwidth_control_enable': 'bool',
'file_redirection_bandwidth_control_options': 'FileRedirectionBandwidthControlOptions',
'clipboard_bandwidth_control_enable': 'bool',
'clipboard_bandwidth_control_options': 'ClipboardBandwidthControlOptions',
'secure_channel_bandwidth_control_enable': 'bool',
'secure_channel_bandwidth_control_options': 'SecureChannelBandwidthControlOptions',
'camera_bandwidth_control_enable': 'bool',
'camera_bandwidth_control_options': 'CameraBandwidthControlOptions',
'virtual_channel_bandwidth_control_enable': 'bool',
'virtual_channel_bandwidth_control_options': 'VirtualChannelBandwidthControlOptions'
}
attribute_map = {
'intelligent_data_transport_flag': 'intelligent_data_transport_flag',
'total_bandwidth_control_enable': 'total_bandwidth_control_enable',
'options': 'options',
'display_bandwidth_control_enable': 'display_bandwidth_control_enable',
'display_bandwidth_control_options': 'display_bandwidth_control_options',
'multimedia_bandwidth_control_enable': 'multimedia_bandwidth_control_enable',
'multimedia_bandwidth_control_options': 'multimedia_bandwidth_control_options',
'usb_bandwidth_control_enable': 'usb_bandwidth_control_enable',
'usb_bandwidth_control_options': 'usb_bandwidth_control_options',
'pcsc_bandwidth_control_enable': 'pcsc_bandwidth_control_enable',
'pcsc_bandwidth_control_options': 'pcsc_bandwidth_control_options',
'twain_bandwidth_control_enable': 'twain_bandwidth_control_enable',
'twain_bandwidth_control_options': 'twain_bandwidth_control_options',
'printer_bandwidth_control_enable': 'printer_bandwidth_control_enable',
'printer_bandwidth_control_options': 'printer_bandwidth_control_options',
'com_bandwidth_control_enable': 'com_bandwidth_control_enable',
'com_bandwidth_control_options': 'com_bandwidth_control_options',
'file_redirection_bandwidth_control_enable': 'file_redirection_bandwidth_control_enable',
'file_redirection_bandwidth_control_options': 'file_redirection_bandwidth_control_options',
'clipboard_bandwidth_control_enable': 'clipboard_bandwidth_control_enable',
'clipboard_bandwidth_control_options': 'clipboard_bandwidth_control_options',
'secure_channel_bandwidth_control_enable': 'secure_channel_bandwidth_control_enable',
'secure_channel_bandwidth_control_options': 'secure_channel_bandwidth_control_options',
'camera_bandwidth_control_enable': 'camera_bandwidth_control_enable',
'camera_bandwidth_control_options': 'camera_bandwidth_control_options',
'virtual_channel_bandwidth_control_enable': 'virtual_channel_bandwidth_control_enable',
'virtual_channel_bandwidth_control_options': 'virtual_channel_bandwidth_control_options'
}
def __init__(self, intelligent_data_transport_flag=None, total_bandwidth_control_enable=None, options=None, display_bandwidth_control_enable=None, display_bandwidth_control_options=None, multimedia_bandwidth_control_enable=None, multimedia_bandwidth_control_options=None, usb_bandwidth_control_enable=None, usb_bandwidth_control_options=None, pcsc_bandwidth_control_enable=None, pcsc_bandwidth_control_options=None, twain_bandwidth_control_enable=None, twain_bandwidth_control_options=None, printer_bandwidth_control_enable=None, printer_bandwidth_control_options=None, com_bandwidth_control_enable=None, com_bandwidth_control_options=None, file_redirection_bandwidth_control_enable=None, file_redirection_bandwidth_control_options=None, clipboard_bandwidth_control_enable=None, clipboard_bandwidth_control_options=None, secure_channel_bandwidth_control_enable=None, secure_channel_bandwidth_control_options=None, camera_bandwidth_control_enable=None, camera_bandwidth_control_options=None, virtual_channel_bandwidth_control_enable=None, virtual_channel_bandwidth_control_options=None):
"""Bandwidth
The model defined in huaweicloud sdk
:param intelligent_data_transport_flag: 智能显示传输。取值为:DISABLE:表示关闭。ENABLE:表示开启。DIAGNOSTIC:诊断模式
:type intelligent_data_transport_flag: str
:param total_bandwidth_control_enable: 是否开启总带宽控制。取值为:false:表示关闭。true:表示开启。
:type total_bandwidth_control_enable: bool
:param options:
:type options: :class:`huaweicloudsdkworkspaceapp.v1.TotalBandwidthControlOptions`
:param display_bandwidth_control_enable: 是否开启显示带宽控制。取值为:false:表示关闭。true:表示开启。
:type display_bandwidth_control_enable: bool
:param display_bandwidth_control_options:
:type display_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.DisplayBandwidthControlOptions`
:param multimedia_bandwidth_control_enable: 是否开启多媒体带宽控制。取值为:false:表示关闭。true:表示开启。
:type multimedia_bandwidth_control_enable: bool
:param multimedia_bandwidth_control_options:
:type multimedia_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.MultimediaBandwidthControlOptions`
:param usb_bandwidth_control_enable: 是否开启USB带宽控制。取值为:false:表示关闭。true:表示开启。
:type usb_bandwidth_control_enable: bool
:param usb_bandwidth_control_options:
:type usb_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.UsbBandwidthControlOptions`
:param pcsc_bandwidth_control_enable: 是否开启PCSC控制。取值为:false:表示关闭。true:表示开启。
:type pcsc_bandwidth_control_enable: bool
:param pcsc_bandwidth_control_options:
:type pcsc_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.PcscBandwidthControlOptions`
:param twain_bandwidth_control_enable: 是否开启TWAIN带宽控制。取值为:false:表示关闭。true:表示开启。
:type twain_bandwidth_control_enable: bool
:param twain_bandwidth_control_options:
:type twain_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.TwainBandwidthControlOptions`
:param printer_bandwidth_control_enable: 是否开启打印机带宽控制。取值为:false:表示关闭。true:表示开启。
:type printer_bandwidth_control_enable: bool
:param printer_bandwidth_control_options:
:type printer_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.PrinterBandwidthControlOptions`
:param com_bandwidth_control_enable: 是否开启串口带宽控制。取值为:false:表示关闭。true:表示开启。
:type com_bandwidth_control_enable: bool
:param com_bandwidth_control_options:
:type com_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.ComBandwidthControlOptions`
:param file_redirection_bandwidth_control_enable: 是否开启文件重定向带宽控制。取值为:false:表示关闭。true:表示开启
:type file_redirection_bandwidth_control_enable: bool
:param file_redirection_bandwidth_control_options:
:type file_redirection_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.FileRedirectionBandwidthControlOptions`
:param clipboard_bandwidth_control_enable: 是否开启剪切板带宽控制。取值为:false:表示关闭。true:表示开启。
:type clipboard_bandwidth_control_enable: bool
:param clipboard_bandwidth_control_options:
:type clipboard_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.ClipboardBandwidthControlOptions`
:param secure_channel_bandwidth_control_enable: 是否开启安全通道带宽控制。取值为:false:表示关闭。true:表示开启。
:type secure_channel_bandwidth_control_enable: bool
:param secure_channel_bandwidth_control_options:
:type secure_channel_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.SecureChannelBandwidthControlOptions`
:param camera_bandwidth_control_enable: 是否开启摄像头带宽控制。取值为:false:表示关闭。true:表示开启。
:type camera_bandwidth_control_enable: bool
:param camera_bandwidth_control_options:
:type camera_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.CameraBandwidthControlOptions`
:param virtual_channel_bandwidth_control_enable: 是否开启虚拟通道带宽控制。取值为:false:表示关闭。true:表示开启。
:type virtual_channel_bandwidth_control_enable: bool
:param virtual_channel_bandwidth_control_options:
:type virtual_channel_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.VirtualChannelBandwidthControlOptions`
"""
self._intelligent_data_transport_flag = None
self._total_bandwidth_control_enable = None
self._options = None
self._display_bandwidth_control_enable = None
self._display_bandwidth_control_options = None
self._multimedia_bandwidth_control_enable = None
self._multimedia_bandwidth_control_options = None
self._usb_bandwidth_control_enable = None
self._usb_bandwidth_control_options = None
self._pcsc_bandwidth_control_enable = None
self._pcsc_bandwidth_control_options = None
self._twain_bandwidth_control_enable = None
self._twain_bandwidth_control_options = None
self._printer_bandwidth_control_enable = None
self._printer_bandwidth_control_options = None
self._com_bandwidth_control_enable = None
self._com_bandwidth_control_options = None
self._file_redirection_bandwidth_control_enable = None
self._file_redirection_bandwidth_control_options = None
self._clipboard_bandwidth_control_enable = None
self._clipboard_bandwidth_control_options = None
self._secure_channel_bandwidth_control_enable = None
self._secure_channel_bandwidth_control_options = None
self._camera_bandwidth_control_enable = None
self._camera_bandwidth_control_options = None
self._virtual_channel_bandwidth_control_enable = None
self._virtual_channel_bandwidth_control_options = None
self.discriminator = None
if intelligent_data_transport_flag is not None:
self.intelligent_data_transport_flag = intelligent_data_transport_flag
if total_bandwidth_control_enable is not None:
self.total_bandwidth_control_enable = total_bandwidth_control_enable
if options is not None:
self.options = options
if display_bandwidth_control_enable is not None:
self.display_bandwidth_control_enable = display_bandwidth_control_enable
if display_bandwidth_control_options is not None:
self.display_bandwidth_control_options = display_bandwidth_control_options
if multimedia_bandwidth_control_enable is not None:
self.multimedia_bandwidth_control_enable = multimedia_bandwidth_control_enable
if multimedia_bandwidth_control_options is not None:
self.multimedia_bandwidth_control_options = multimedia_bandwidth_control_options
if usb_bandwidth_control_enable is not None:
self.usb_bandwidth_control_enable = usb_bandwidth_control_enable
if usb_bandwidth_control_options is not None:
self.usb_bandwidth_control_options = usb_bandwidth_control_options
if pcsc_bandwidth_control_enable is not None:
self.pcsc_bandwidth_control_enable = pcsc_bandwidth_control_enable
if pcsc_bandwidth_control_options is not None:
self.pcsc_bandwidth_control_options = pcsc_bandwidth_control_options
if twain_bandwidth_control_enable is not None:
self.twain_bandwidth_control_enable = twain_bandwidth_control_enable
if twain_bandwidth_control_options is not None:
self.twain_bandwidth_control_options = twain_bandwidth_control_options
if printer_bandwidth_control_enable is not None:
self.printer_bandwidth_control_enable = printer_bandwidth_control_enable
if printer_bandwidth_control_options is not None:
self.printer_bandwidth_control_options = printer_bandwidth_control_options
if com_bandwidth_control_enable is not None:
self.com_bandwidth_control_enable = com_bandwidth_control_enable
if com_bandwidth_control_options is not None:
self.com_bandwidth_control_options = com_bandwidth_control_options
if file_redirection_bandwidth_control_enable is not None:
self.file_redirection_bandwidth_control_enable = file_redirection_bandwidth_control_enable
if file_redirection_bandwidth_control_options is not None:
self.file_redirection_bandwidth_control_options = file_redirection_bandwidth_control_options
if clipboard_bandwidth_control_enable is not None:
self.clipboard_bandwidth_control_enable = clipboard_bandwidth_control_enable
if clipboard_bandwidth_control_options is not None:
self.clipboard_bandwidth_control_options = clipboard_bandwidth_control_options
if secure_channel_bandwidth_control_enable is not None:
self.secure_channel_bandwidth_control_enable = secure_channel_bandwidth_control_enable
if secure_channel_bandwidth_control_options is not None:
self.secure_channel_bandwidth_control_options = secure_channel_bandwidth_control_options
if camera_bandwidth_control_enable is not None:
self.camera_bandwidth_control_enable = camera_bandwidth_control_enable
if camera_bandwidth_control_options is not None:
self.camera_bandwidth_control_options = camera_bandwidth_control_options
if virtual_channel_bandwidth_control_enable is not None:
self.virtual_channel_bandwidth_control_enable = virtual_channel_bandwidth_control_enable
if virtual_channel_bandwidth_control_options is not None:
self.virtual_channel_bandwidth_control_options = virtual_channel_bandwidth_control_options
@property
def intelligent_data_transport_flag(self):
"""Gets the intelligent_data_transport_flag of this Bandwidth.
智能显示传输。取值为:DISABLE:表示关闭。ENABLE:表示开启。DIAGNOSTIC:诊断模式
:return: The intelligent_data_transport_flag of this Bandwidth.
:rtype: str
"""
return self._intelligent_data_transport_flag
@intelligent_data_transport_flag.setter
def intelligent_data_transport_flag(self, intelligent_data_transport_flag):
"""Sets the intelligent_data_transport_flag of this Bandwidth.
智能显示传输。取值为:DISABLE:表示关闭。ENABLE:表示开启。DIAGNOSTIC:诊断模式
:param intelligent_data_transport_flag: The intelligent_data_transport_flag of this Bandwidth.
:type intelligent_data_transport_flag: str
"""
self._intelligent_data_transport_flag = intelligent_data_transport_flag
@property
def total_bandwidth_control_enable(self):
"""Gets the total_bandwidth_control_enable of this Bandwidth.
是否开启总带宽控制。取值为:false:表示关闭。true:表示开启。
:return: The total_bandwidth_control_enable of this Bandwidth.
:rtype: bool
"""
return self._total_bandwidth_control_enable
@total_bandwidth_control_enable.setter
def total_bandwidth_control_enable(self, total_bandwidth_control_enable):
"""Sets the total_bandwidth_control_enable of this Bandwidth.
是否开启总带宽控制。取值为:false:表示关闭。true:表示开启。
:param total_bandwidth_control_enable: The total_bandwidth_control_enable of this Bandwidth.
:type total_bandwidth_control_enable: bool
"""
self._total_bandwidth_control_enable = total_bandwidth_control_enable
@property
def options(self):
"""Gets the options of this Bandwidth.
:return: The options of this Bandwidth.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.TotalBandwidthControlOptions`
"""
return self._options
@options.setter
def options(self, options):
"""Sets the options of this Bandwidth.
:param options: The options of this Bandwidth.
:type options: :class:`huaweicloudsdkworkspaceapp.v1.TotalBandwidthControlOptions`
"""
self._options = options
@property
def display_bandwidth_control_enable(self):
"""Gets the display_bandwidth_control_enable of this Bandwidth.
是否开启显示带宽控制。取值为:false:表示关闭。true:表示开启。
:return: The display_bandwidth_control_enable of this Bandwidth.
:rtype: bool
"""
return self._display_bandwidth_control_enable
@display_bandwidth_control_enable.setter
def display_bandwidth_control_enable(self, display_bandwidth_control_enable):
"""Sets the display_bandwidth_control_enable of this Bandwidth.
是否开启显示带宽控制。取值为:false:表示关闭。true:表示开启。
:param display_bandwidth_control_enable: The display_bandwidth_control_enable of this Bandwidth.
:type display_bandwidth_control_enable: bool
"""
self._display_bandwidth_control_enable = display_bandwidth_control_enable
@property
def display_bandwidth_control_options(self):
"""Gets the display_bandwidth_control_options of this Bandwidth.
:return: The display_bandwidth_control_options of this Bandwidth.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.DisplayBandwidthControlOptions`
"""
return self._display_bandwidth_control_options
@display_bandwidth_control_options.setter
def display_bandwidth_control_options(self, display_bandwidth_control_options):
"""Sets the display_bandwidth_control_options of this Bandwidth.
:param display_bandwidth_control_options: The display_bandwidth_control_options of this Bandwidth.
:type display_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.DisplayBandwidthControlOptions`
"""
self._display_bandwidth_control_options = display_bandwidth_control_options
@property
def multimedia_bandwidth_control_enable(self):
"""Gets the multimedia_bandwidth_control_enable of this Bandwidth.
是否开启多媒体带宽控制。取值为:false:表示关闭。true:表示开启。
:return: The multimedia_bandwidth_control_enable of this Bandwidth.
:rtype: bool
"""
return self._multimedia_bandwidth_control_enable
@multimedia_bandwidth_control_enable.setter
def multimedia_bandwidth_control_enable(self, multimedia_bandwidth_control_enable):
"""Sets the multimedia_bandwidth_control_enable of this Bandwidth.
是否开启多媒体带宽控制。取值为:false:表示关闭。true:表示开启。
:param multimedia_bandwidth_control_enable: The multimedia_bandwidth_control_enable of this Bandwidth.
:type multimedia_bandwidth_control_enable: bool
"""
self._multimedia_bandwidth_control_enable = multimedia_bandwidth_control_enable
@property
def multimedia_bandwidth_control_options(self):
"""Gets the multimedia_bandwidth_control_options of this Bandwidth.
:return: The multimedia_bandwidth_control_options of this Bandwidth.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.MultimediaBandwidthControlOptions`
"""
return self._multimedia_bandwidth_control_options
@multimedia_bandwidth_control_options.setter
def multimedia_bandwidth_control_options(self, multimedia_bandwidth_control_options):
"""Sets the multimedia_bandwidth_control_options of this Bandwidth.
:param multimedia_bandwidth_control_options: The multimedia_bandwidth_control_options of this Bandwidth.
:type multimedia_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.MultimediaBandwidthControlOptions`
"""
self._multimedia_bandwidth_control_options = multimedia_bandwidth_control_options
@property
def usb_bandwidth_control_enable(self):
"""Gets the usb_bandwidth_control_enable of this Bandwidth.
是否开启USB带宽控制。取值为:false:表示关闭。true:表示开启。
:return: The usb_bandwidth_control_enable of this Bandwidth.
:rtype: bool
"""
return self._usb_bandwidth_control_enable
@usb_bandwidth_control_enable.setter
def usb_bandwidth_control_enable(self, usb_bandwidth_control_enable):
"""Sets the usb_bandwidth_control_enable of this Bandwidth.
是否开启USB带宽控制。取值为:false:表示关闭。true:表示开启。
:param usb_bandwidth_control_enable: The usb_bandwidth_control_enable of this Bandwidth.
:type usb_bandwidth_control_enable: bool
"""
self._usb_bandwidth_control_enable = usb_bandwidth_control_enable
@property
def usb_bandwidth_control_options(self):
"""Gets the usb_bandwidth_control_options of this Bandwidth.
:return: The usb_bandwidth_control_options of this Bandwidth.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.UsbBandwidthControlOptions`
"""
return self._usb_bandwidth_control_options
@usb_bandwidth_control_options.setter
def usb_bandwidth_control_options(self, usb_bandwidth_control_options):
"""Sets the usb_bandwidth_control_options of this Bandwidth.
:param usb_bandwidth_control_options: The usb_bandwidth_control_options of this Bandwidth.
:type usb_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.UsbBandwidthControlOptions`
"""
self._usb_bandwidth_control_options = usb_bandwidth_control_options
@property
def pcsc_bandwidth_control_enable(self):
"""Gets the pcsc_bandwidth_control_enable of this Bandwidth.
是否开启PCSC控制。取值为:false:表示关闭。true:表示开启。
:return: The pcsc_bandwidth_control_enable of this Bandwidth.
:rtype: bool
"""
return self._pcsc_bandwidth_control_enable
@pcsc_bandwidth_control_enable.setter
def pcsc_bandwidth_control_enable(self, pcsc_bandwidth_control_enable):
"""Sets the pcsc_bandwidth_control_enable of this Bandwidth.
是否开启PCSC控制。取值为:false:表示关闭。true:表示开启。
:param pcsc_bandwidth_control_enable: The pcsc_bandwidth_control_enable of this Bandwidth.
:type pcsc_bandwidth_control_enable: bool
"""
self._pcsc_bandwidth_control_enable = pcsc_bandwidth_control_enable
@property
def pcsc_bandwidth_control_options(self):
"""Gets the pcsc_bandwidth_control_options of this Bandwidth.
:return: The pcsc_bandwidth_control_options of this Bandwidth.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.PcscBandwidthControlOptions`
"""
return self._pcsc_bandwidth_control_options
@pcsc_bandwidth_control_options.setter
def pcsc_bandwidth_control_options(self, pcsc_bandwidth_control_options):
"""Sets the pcsc_bandwidth_control_options of this Bandwidth.
:param pcsc_bandwidth_control_options: The pcsc_bandwidth_control_options of this Bandwidth.
:type pcsc_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.PcscBandwidthControlOptions`
"""
self._pcsc_bandwidth_control_options = pcsc_bandwidth_control_options
@property
def twain_bandwidth_control_enable(self):
"""Gets the twain_bandwidth_control_enable of this Bandwidth.
是否开启TWAIN带宽控制。取值为:false:表示关闭。true:表示开启。
:return: The twain_bandwidth_control_enable of this Bandwidth.
:rtype: bool
"""
return self._twain_bandwidth_control_enable
@twain_bandwidth_control_enable.setter
def twain_bandwidth_control_enable(self, twain_bandwidth_control_enable):
"""Sets the twain_bandwidth_control_enable of this Bandwidth.
是否开启TWAIN带宽控制。取值为:false:表示关闭。true:表示开启。
:param twain_bandwidth_control_enable: The twain_bandwidth_control_enable of this Bandwidth.
:type twain_bandwidth_control_enable: bool
"""
self._twain_bandwidth_control_enable = twain_bandwidth_control_enable
@property
def twain_bandwidth_control_options(self):
"""Gets the twain_bandwidth_control_options of this Bandwidth.
:return: The twain_bandwidth_control_options of this Bandwidth.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.TwainBandwidthControlOptions`
"""
return self._twain_bandwidth_control_options
@twain_bandwidth_control_options.setter
def twain_bandwidth_control_options(self, twain_bandwidth_control_options):
"""Sets the twain_bandwidth_control_options of this Bandwidth.
:param twain_bandwidth_control_options: The twain_bandwidth_control_options of this Bandwidth.
:type twain_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.TwainBandwidthControlOptions`
"""
self._twain_bandwidth_control_options = twain_bandwidth_control_options
@property
def printer_bandwidth_control_enable(self):
"""Gets the printer_bandwidth_control_enable of this Bandwidth.
是否开启打印机带宽控制。取值为:false:表示关闭。true:表示开启。
:return: The printer_bandwidth_control_enable of this Bandwidth.
:rtype: bool
"""
return self._printer_bandwidth_control_enable
@printer_bandwidth_control_enable.setter
def printer_bandwidth_control_enable(self, printer_bandwidth_control_enable):
"""Sets the printer_bandwidth_control_enable of this Bandwidth.
是否开启打印机带宽控制。取值为:false:表示关闭。true:表示开启。
:param printer_bandwidth_control_enable: The printer_bandwidth_control_enable of this Bandwidth.
:type printer_bandwidth_control_enable: bool
"""
self._printer_bandwidth_control_enable = printer_bandwidth_control_enable
@property
def printer_bandwidth_control_options(self):
"""Gets the printer_bandwidth_control_options of this Bandwidth.
:return: The printer_bandwidth_control_options of this Bandwidth.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.PrinterBandwidthControlOptions`
"""
return self._printer_bandwidth_control_options
@printer_bandwidth_control_options.setter
def printer_bandwidth_control_options(self, printer_bandwidth_control_options):
"""Sets the printer_bandwidth_control_options of this Bandwidth.
:param printer_bandwidth_control_options: The printer_bandwidth_control_options of this Bandwidth.
:type printer_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.PrinterBandwidthControlOptions`
"""
self._printer_bandwidth_control_options = printer_bandwidth_control_options
@property
def com_bandwidth_control_enable(self):
"""Gets the com_bandwidth_control_enable of this Bandwidth.
是否开启串口带宽控制。取值为:false:表示关闭。true:表示开启。
:return: The com_bandwidth_control_enable of this Bandwidth.
:rtype: bool
"""
return self._com_bandwidth_control_enable
@com_bandwidth_control_enable.setter
def com_bandwidth_control_enable(self, com_bandwidth_control_enable):
"""Sets the com_bandwidth_control_enable of this Bandwidth.
是否开启串口带宽控制。取值为:false:表示关闭。true:表示开启。
:param com_bandwidth_control_enable: The com_bandwidth_control_enable of this Bandwidth.
:type com_bandwidth_control_enable: bool
"""
self._com_bandwidth_control_enable = com_bandwidth_control_enable
@property
def com_bandwidth_control_options(self):
"""Gets the com_bandwidth_control_options of this Bandwidth.
:return: The com_bandwidth_control_options of this Bandwidth.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.ComBandwidthControlOptions`
"""
return self._com_bandwidth_control_options
@com_bandwidth_control_options.setter
def com_bandwidth_control_options(self, com_bandwidth_control_options):
"""Sets the com_bandwidth_control_options of this Bandwidth.
:param com_bandwidth_control_options: The com_bandwidth_control_options of this Bandwidth.
:type com_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.ComBandwidthControlOptions`
"""
self._com_bandwidth_control_options = com_bandwidth_control_options
@property
def file_redirection_bandwidth_control_enable(self):
"""Gets the file_redirection_bandwidth_control_enable of this Bandwidth.
是否开启文件重定向带宽控制。取值为:false:表示关闭。true:表示开启
:return: The file_redirection_bandwidth_control_enable of this Bandwidth.
:rtype: bool
"""
return self._file_redirection_bandwidth_control_enable
@file_redirection_bandwidth_control_enable.setter
def file_redirection_bandwidth_control_enable(self, file_redirection_bandwidth_control_enable):
"""Sets the file_redirection_bandwidth_control_enable of this Bandwidth.
是否开启文件重定向带宽控制。取值为:false:表示关闭。true:表示开启
:param file_redirection_bandwidth_control_enable: The file_redirection_bandwidth_control_enable of this Bandwidth.
:type file_redirection_bandwidth_control_enable: bool
"""
self._file_redirection_bandwidth_control_enable = file_redirection_bandwidth_control_enable
@property
def file_redirection_bandwidth_control_options(self):
"""Gets the file_redirection_bandwidth_control_options of this Bandwidth.
:return: The file_redirection_bandwidth_control_options of this Bandwidth.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.FileRedirectionBandwidthControlOptions`
"""
return self._file_redirection_bandwidth_control_options
@file_redirection_bandwidth_control_options.setter
def file_redirection_bandwidth_control_options(self, file_redirection_bandwidth_control_options):
"""Sets the file_redirection_bandwidth_control_options of this Bandwidth.
:param file_redirection_bandwidth_control_options: The file_redirection_bandwidth_control_options of this Bandwidth.
:type file_redirection_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.FileRedirectionBandwidthControlOptions`
"""
self._file_redirection_bandwidth_control_options = file_redirection_bandwidth_control_options
@property
def clipboard_bandwidth_control_enable(self):
"""Gets the clipboard_bandwidth_control_enable of this Bandwidth.
是否开启剪切板带宽控制。取值为:false:表示关闭。true:表示开启。
:return: The clipboard_bandwidth_control_enable of this Bandwidth.
:rtype: bool
"""
return self._clipboard_bandwidth_control_enable
@clipboard_bandwidth_control_enable.setter
def clipboard_bandwidth_control_enable(self, clipboard_bandwidth_control_enable):
"""Sets the clipboard_bandwidth_control_enable of this Bandwidth.
是否开启剪切板带宽控制。取值为:false:表示关闭。true:表示开启。
:param clipboard_bandwidth_control_enable: The clipboard_bandwidth_control_enable of this Bandwidth.
:type clipboard_bandwidth_control_enable: bool
"""
self._clipboard_bandwidth_control_enable = clipboard_bandwidth_control_enable
@property
def clipboard_bandwidth_control_options(self):
"""Gets the clipboard_bandwidth_control_options of this Bandwidth.
:return: The clipboard_bandwidth_control_options of this Bandwidth.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.ClipboardBandwidthControlOptions`
"""
return self._clipboard_bandwidth_control_options
@clipboard_bandwidth_control_options.setter
def clipboard_bandwidth_control_options(self, clipboard_bandwidth_control_options):
"""Sets the clipboard_bandwidth_control_options of this Bandwidth.
:param clipboard_bandwidth_control_options: The clipboard_bandwidth_control_options of this Bandwidth.
:type clipboard_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.ClipboardBandwidthControlOptions`
"""
self._clipboard_bandwidth_control_options = clipboard_bandwidth_control_options
@property
def secure_channel_bandwidth_control_enable(self):
"""Gets the secure_channel_bandwidth_control_enable of this Bandwidth.
是否开启安全通道带宽控制。取值为:false:表示关闭。true:表示开启。
:return: The secure_channel_bandwidth_control_enable of this Bandwidth.
:rtype: bool
"""
return self._secure_channel_bandwidth_control_enable
@secure_channel_bandwidth_control_enable.setter
def secure_channel_bandwidth_control_enable(self, secure_channel_bandwidth_control_enable):
"""Sets the secure_channel_bandwidth_control_enable of this Bandwidth.
是否开启安全通道带宽控制。取值为:false:表示关闭。true:表示开启。
:param secure_channel_bandwidth_control_enable: The secure_channel_bandwidth_control_enable of this Bandwidth.
:type secure_channel_bandwidth_control_enable: bool
"""
self._secure_channel_bandwidth_control_enable = secure_channel_bandwidth_control_enable
@property
def secure_channel_bandwidth_control_options(self):
"""Gets the secure_channel_bandwidth_control_options of this Bandwidth.
:return: The secure_channel_bandwidth_control_options of this Bandwidth.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.SecureChannelBandwidthControlOptions`
"""
return self._secure_channel_bandwidth_control_options
@secure_channel_bandwidth_control_options.setter
def secure_channel_bandwidth_control_options(self, secure_channel_bandwidth_control_options):
"""Sets the secure_channel_bandwidth_control_options of this Bandwidth.
:param secure_channel_bandwidth_control_options: The secure_channel_bandwidth_control_options of this Bandwidth.
:type secure_channel_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.SecureChannelBandwidthControlOptions`
"""
self._secure_channel_bandwidth_control_options = secure_channel_bandwidth_control_options
@property
def camera_bandwidth_control_enable(self):
"""Gets the camera_bandwidth_control_enable of this Bandwidth.
是否开启摄像头带宽控制。取值为:false:表示关闭。true:表示开启。
:return: The camera_bandwidth_control_enable of this Bandwidth.
:rtype: bool
"""
return self._camera_bandwidth_control_enable
@camera_bandwidth_control_enable.setter
def camera_bandwidth_control_enable(self, camera_bandwidth_control_enable):
"""Sets the camera_bandwidth_control_enable of this Bandwidth.
是否开启摄像头带宽控制。取值为:false:表示关闭。true:表示开启。
:param camera_bandwidth_control_enable: The camera_bandwidth_control_enable of this Bandwidth.
:type camera_bandwidth_control_enable: bool
"""
self._camera_bandwidth_control_enable = camera_bandwidth_control_enable
@property
def camera_bandwidth_control_options(self):
"""Gets the camera_bandwidth_control_options of this Bandwidth.
:return: The camera_bandwidth_control_options of this Bandwidth.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.CameraBandwidthControlOptions`
"""
return self._camera_bandwidth_control_options
@camera_bandwidth_control_options.setter
def camera_bandwidth_control_options(self, camera_bandwidth_control_options):
"""Sets the camera_bandwidth_control_options of this Bandwidth.
:param camera_bandwidth_control_options: The camera_bandwidth_control_options of this Bandwidth.
:type camera_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.CameraBandwidthControlOptions`
"""
self._camera_bandwidth_control_options = camera_bandwidth_control_options
@property
def virtual_channel_bandwidth_control_enable(self):
"""Gets the virtual_channel_bandwidth_control_enable of this Bandwidth.
是否开启虚拟通道带宽控制。取值为:false:表示关闭。true:表示开启。
:return: The virtual_channel_bandwidth_control_enable of this Bandwidth.
:rtype: bool
"""
return self._virtual_channel_bandwidth_control_enable
@virtual_channel_bandwidth_control_enable.setter
def virtual_channel_bandwidth_control_enable(self, virtual_channel_bandwidth_control_enable):
"""Sets the virtual_channel_bandwidth_control_enable of this Bandwidth.
是否开启虚拟通道带宽控制。取值为:false:表示关闭。true:表示开启。
:param virtual_channel_bandwidth_control_enable: The virtual_channel_bandwidth_control_enable of this Bandwidth.
:type virtual_channel_bandwidth_control_enable: bool
"""
self._virtual_channel_bandwidth_control_enable = virtual_channel_bandwidth_control_enable
@property
def virtual_channel_bandwidth_control_options(self):
"""Gets the virtual_channel_bandwidth_control_options of this Bandwidth.
:return: The virtual_channel_bandwidth_control_options of this Bandwidth.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.VirtualChannelBandwidthControlOptions`
"""
return self._virtual_channel_bandwidth_control_options
@virtual_channel_bandwidth_control_options.setter
def virtual_channel_bandwidth_control_options(self, virtual_channel_bandwidth_control_options):
"""Sets the virtual_channel_bandwidth_control_options of this Bandwidth.
:param virtual_channel_bandwidth_control_options: The virtual_channel_bandwidth_control_options of this Bandwidth.
:type virtual_channel_bandwidth_control_options: :class:`huaweicloudsdkworkspaceapp.v1.VirtualChannelBandwidthControlOptions`
"""
self._virtual_channel_bandwidth_control_options = virtual_channel_bandwidth_control_options
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Bandwidth):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
5568ae7c2aacfeb53f3eaaf7f8725885b2b5b2c8
|
ebac806d8289e6268ebd4b8075e682bdaeff860b
|
/tests/draw/svg/test_patterns.py
|
28c8b7bd1dd1a16e51a833d43516b41330bfafe1
|
[
"BSD-3-Clause"
] |
permissive
|
Kozea/WeasyPrint
|
af1613ea28ba54bf14ac2cbba37c4832393d71e7
|
6977e79ce94ef605d39276702e3484d2a9570b35
|
refs/heads/master
| 2023-08-11T07:08:47.828375
| 2023-08-09T18:14:07
| 2023-08-09T18:23:15
| 2,179,572
| 5,685
| 739
|
BSD-3-Clause
| 2023-09-04T20:52:57
| 2011-08-09T14:14:24
|
Python
|
UTF-8
|
Python
| false
| false
| 7,333
|
py
|
test_patterns.py
|
"""Test how SVG simple patterns are drawn."""
from ...testing_utils import assert_no_logs
@assert_no_logs
def test_pattern(assert_pixels):
assert_pixels('''
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
''', '''
<style>
@page { size: 8px }
svg { display: block }
</style>
<svg width="8px" height="8px" xmlns="http://www.w3.org/2000/svg">
<defs>
<pattern id="pat" x="0" y="0" width="4" height="4"
patternUnits="userSpaceOnUse"
patternContentUnits="userSpaceOnUse">
<rect x="0" y="0" width="2" height="2" fill="blue" />
<rect x="0" y="2" width="2" height="2" fill="red" />
<rect x="2" y="0" width="2" height="2" fill="red" />
<rect x="2" y="2" width="2" height="2" fill="blue" />
</pattern>
</defs>
<rect x="0" y="0" width="8" height="8" fill="url(#pat)" />
</svg>
''')
@assert_no_logs
def test_pattern_2(assert_pixels):
assert_pixels('''
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
''', '''
<style>
@page { size: 8px }
svg { display: block }
</style>
<svg width="8px" height="8px" xmlns="http://www.w3.org/2000/svg">
<defs>
<pattern id="pat" x="0" y="0" width="50%" height="50%"
patternUnits="objectBoundingBox"
patternContentUnits="userSpaceOnUse">
<rect x="0" y="0" width="2" height="2" fill="blue" />
<rect x="0" y="2" width="2" height="2" fill="red" />
<rect x="2" y="0" width="2" height="2" fill="red" />
<rect x="2" y="2" width="2" height="2" fill="blue" />
</pattern>
</defs>
<rect x="0" y="0" width="8" height="8" fill="url(#pat)" />
</svg>
''')
@assert_no_logs
def test_pattern_3(assert_pixels):
assert_pixels('''
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
''', '''
<style>
@page { size: 8px }
svg { display: block }
</style>
<svg width="8px" height="8px" xmlns="http://www.w3.org/2000/svg">
<defs>
<pattern id="pat" x="0" y="0" width="4" height="4"
patternUnits="userSpaceOnUse"
patternContentUnits="userSpaceOnUse">
<rect x="0" y="0" width="2" height="2" fill="blue" />
<rect x="0" y="2" width="2" height="2" fill="red" />
<rect x="2" y="0" width="2" height="2" fill="red" />
<rect x="2" y="2" width="2" height="2" fill="blue" />
</pattern>
</defs>
<rect x="0" y="0" width="8" height="8" fill="url(#pat)" />
</svg>
''')
@assert_no_logs
def test_pattern_4(assert_pixels):
assert_pixels('''
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
''', '''
<style>
@page { size: 8px }
svg { display: block }
</style>
<svg width="8px" height="8px" xmlns="http://www.w3.org/2000/svg">
<defs>
<pattern id="pat" x="0" y="0" width="4" height="4"
patternUnits="userSpaceOnUse"
patternContentUnits="objectBoundingBox">
<rect x="0" y="0" width="50%" height="50%" fill="blue" />
<rect x="0" y="50%" width="50%" height="50%" fill="red" />
<rect x="50%" y="0" width="50%" height="50%" fill="red" />
<rect x="50%" y="50%" width="50%" height="50%" fill="blue" />
</pattern>
</defs>
<rect x="0" y="0" width="8" height="8" fill="url(#pat)" />
</svg>
''')
@assert_no_logs
def test_pattern_inherit_attributes(assert_pixels):
assert_pixels('''
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
''', '''
<style>
@page { size: 8px }
svg { display: block }
</style>
<svg width="8px" height="8px" xmlns="http://www.w3.org/2000/svg">
<defs>
<pattern id="parent" x="0" y="0" width="4" height="4"
patternUnits="userSpaceOnUse"
patternContentUnits="userSpaceOnUse">
</pattern>
<pattern id="pat" href="#parent">
<rect x="0" y="0" width="2" height="2" fill="blue" />
<rect x="0" y="2" width="2" height="2" fill="red" />
<rect x="2" y="0" width="2" height="2" fill="red" />
<rect x="2" y="2" width="2" height="2" fill="blue" />
</pattern>
</defs>
<rect x="0" y="0" width="8" height="8" fill="url(#pat)" />
</svg>
''')
@assert_no_logs
def test_pattern_inherit_children(assert_pixels):
assert_pixels('''
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
''', '''
<style>
@page { size: 8px }
svg { display: block }
</style>
<svg width="8px" height="8px" xmlns="http://www.w3.org/2000/svg">
<defs>
<pattern id="parent">
<rect x="0" y="0" width="2" height="2" fill="blue" />
<rect x="0" y="2" width="2" height="2" fill="red" />
<rect x="2" y="0" width="2" height="2" fill="red" />
<rect x="2" y="2" width="2" height="2" fill="blue" />
</pattern>
<pattern id="pat" href="#parent" x="0" y="0" width="4" height="4"
patternUnits="userSpaceOnUse" patternContentUnits="userSpaceOnUse">
</pattern>
</defs>
<rect x="0" y="0" width="8" height="8" fill="url(#pat)" />
</svg>
''')
@assert_no_logs
def test_pattern_inherit_no_override(assert_pixels):
assert_pixels('''
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
BBrrBBrr
BBrrBBrr
rrBBrrBB
rrBBrrBB
''', '''
<style>
@page { size: 8px }
svg { display: block }
</style>
<svg width="8px" height="8px" xmlns="http://www.w3.org/2000/svg">
<defs>
<pattern id="parent" x="1" y="1" width="3" height="3"
patternUnits="objectBoundingBox"
patternContentUnits="objectBoundingBox">
<rect x="0" y="0" width="2" height="2" fill="green" />
<rect x="0" y="2" width="2" height="2" fill="green" />
<rect x="2" y="0" width="2" height="2" fill="yellow" />
<rect x="2" y="2" width="2" height="2" fill="yellow" />
</pattern>
<pattern id="pat" href="#parent" x="0" y="0" width="4" height="4"
patternUnits="userSpaceOnUse" patternContentUnits="userSpaceOnUse">
<rect x="0" y="0" width="2" height="2" fill="blue" />
<rect x="0" y="2" width="2" height="2" fill="red" />
<rect x="2" y="0" width="2" height="2" fill="red" />
<rect x="2" y="2" width="2" height="2" fill="blue" />
</pattern>
</defs>
<rect x="0" y="0" width="8" height="8" fill="url(#pat)" />
</svg>
''')
|
c12fcd57ddd9161c3f09ddffac37777c47cf5992
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/dagster-graphql/dagster_graphql/implementation/utils.py
|
63fa7403b299a3f45d93d311b78325bb06040478
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 8,074
|
py
|
utils.py
|
import sys
from contextlib import contextmanager
from contextvars import ContextVar
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import dagster._check as check
from dagster._core.definitions.events import AssetKey
from dagster._core.definitions.selector import GraphSelector, JobSubsetSelector
from dagster._core.workspace.context import BaseWorkspaceRequestContext
from dagster._utils.error import serializable_error_info_from_exc_info
from typing_extensions import ParamSpec, TypeAlias
if TYPE_CHECKING:
from dagster_graphql.schema.errors import GrapheneError, GraphenePythonError
from dagster_graphql.schema.util import ResolveInfo
P = ParamSpec("P")
T = TypeVar("T")
GrapheneResolverFn: TypeAlias = Callable[..., object]
T_Callable = TypeVar("T_Callable", bound=Callable)
def assert_permission_for_location(
graphene_info: "ResolveInfo", permission: str, location_name: str
) -> None:
from dagster_graphql.schema.errors import GrapheneUnauthorizedError
context = cast(BaseWorkspaceRequestContext, graphene_info.context)
if not context.has_permission_for_location(permission, location_name):
raise UserFacingGraphQLError(GrapheneUnauthorizedError())
def require_permission_check(permission: str) -> Callable[[GrapheneResolverFn], GrapheneResolverFn]:
def decorator(fn: GrapheneResolverFn) -> GrapheneResolverFn:
def _fn(self, graphene_info, *args: P.args, **kwargs: P.kwargs):
result = fn(self, graphene_info, *args, **kwargs)
if not graphene_info.context.was_permission_checked(permission):
raise Exception(f"Permission {permission} was never checked during the request")
return result
return _fn
return decorator
def check_permission(permission: str) -> Callable[[GrapheneResolverFn], GrapheneResolverFn]:
def decorator(fn: GrapheneResolverFn) -> GrapheneResolverFn:
def _fn(self, graphene_info, *args: P.args, **kwargs: P.kwargs):
assert_permission(graphene_info, permission)
return fn(self, graphene_info, *args, **kwargs)
return _fn
return decorator
def assert_permission(graphene_info: "ResolveInfo", permission: str) -> None:
from dagster_graphql.schema.errors import GrapheneUnauthorizedError
context = cast(BaseWorkspaceRequestContext, graphene_info.context)
if not context.has_permission(permission):
raise UserFacingGraphQLError(GrapheneUnauthorizedError())
def _noop(_) -> None:
pass
class ErrorCapture:
@staticmethod
def default_on_exception(
exc_info: Tuple[Type[BaseException], BaseException, TracebackType]
) -> "GraphenePythonError":
from dagster_graphql.schema.errors import GraphenePythonError
# Transform exception in to PythonErron to present to user
return GraphenePythonError(serializable_error_info_from_exc_info(exc_info))
# global behavior for how to handle unexpected exceptions
on_exception = default_on_exception
# context var for observing unexpected exceptions
observer: ContextVar[Callable[[Exception], None]] = ContextVar(
"error_capture_observer", default=_noop
)
@staticmethod
@contextmanager
def watch(fn: Callable[[Exception], None]) -> Iterator[None]:
token = ErrorCapture.observer.set(fn)
try:
yield
finally:
ErrorCapture.observer.reset(token)
def capture_error(
fn: Callable[P, T]
) -> Callable[P, Union[T, "GrapheneError", "GraphenePythonError"]]:
def _fn(*args: P.args, **kwargs: P.kwargs) -> T:
try:
return fn(*args, **kwargs)
except UserFacingGraphQLError as de_exception:
return de_exception.error
except Exception as exc:
ErrorCapture.observer.get()(exc)
return ErrorCapture.on_exception(sys.exc_info()) # type: ignore
return _fn
class UserFacingGraphQLError(Exception):
# The `error` arg here should be a Graphene type implementing the interface `GrapheneError`, but
# this is not trackable by the Python type system.
def __init__(self, error: Any):
self.error = error
message = "[{cls}] {message}".format(
cls=error.__class__.__name__,
message=error.message if hasattr(error, "message") else None,
)
super(UserFacingGraphQLError, self).__init__(message)
def pipeline_selector_from_graphql(data: Mapping[str, Any]) -> JobSubsetSelector:
asset_selection = cast(Optional[Iterable[Dict[str, List[str]]]], data.get("assetSelection"))
return JobSubsetSelector(
location_name=data["repositoryLocationName"],
repository_name=data["repositoryName"],
job_name=data.get("pipelineName") or data.get("jobName"), # type: ignore
op_selection=data.get("solidSelection"),
asset_selection=(
[AssetKey.from_graphql_input(asset_key) for asset_key in asset_selection]
if asset_selection
else None
),
)
def graph_selector_from_graphql(data: Mapping[str, Any]) -> GraphSelector:
return GraphSelector(
location_name=data["repositoryLocationName"],
repository_name=data["repositoryName"],
graph_name=data["graphName"],
)
class ExecutionParams(
NamedTuple(
"_ExecutionParams",
[
("selector", JobSubsetSelector),
("run_config", Mapping[str, object]),
("mode", Optional[str]),
("execution_metadata", "ExecutionMetadata"),
("step_keys", Optional[Sequence[str]]),
],
)
):
def __new__(
cls,
selector: JobSubsetSelector,
run_config: Optional[Mapping[str, object]],
mode: Optional[str],
execution_metadata: "ExecutionMetadata",
step_keys: Optional[Sequence[str]],
):
check.opt_list_param(step_keys, "step_keys", of_type=str)
return super(ExecutionParams, cls).__new__(
cls,
selector=check.inst_param(selector, "selector", JobSubsetSelector),
run_config=check.opt_mapping_param(run_config, "run_config", key_type=str),
mode=check.opt_str_param(mode, "mode"),
execution_metadata=check.inst_param(
execution_metadata, "execution_metadata", ExecutionMetadata
),
step_keys=step_keys,
)
def to_graphql_input(self) -> Mapping[str, Any]:
return {
"selector": self.selector.to_graphql_input(),
"runConfigData": self.run_config,
"mode": self.mode,
"executionMetadata": self.execution_metadata.to_graphql_input(),
"stepKeys": self.step_keys,
}
class ExecutionMetadata(
NamedTuple(
"_ExecutionMetadata",
[
("run_id", Optional[str]),
("tags", Mapping[str, str]),
("root_run_id", Optional[str]),
("parent_run_id", Optional[str]),
],
)
):
def __new__(
cls,
run_id: Optional[str],
tags: Mapping[str, str],
root_run_id: Optional[str] = None,
parent_run_id: Optional[str] = None,
):
return super(ExecutionMetadata, cls).__new__(
cls,
check.opt_str_param(run_id, "run_id"),
check.dict_param(tags, "tags", key_type=str, value_type=str),
check.opt_str_param(root_run_id, "root_run_id"),
check.opt_str_param(parent_run_id, "parent_run_id"),
)
def to_graphql_input(self) -> Mapping[str, Any]:
return {
"runId": self.run_id,
"tags": [{"key": k, "value": v} for k, v in self.tags.items()],
"rootRunId": self.root_run_id,
"parentRunId": self.parent_run_id,
}
BackfillParams: TypeAlias = Mapping[str, Any]
|
57236855824a0b30ad77865139280a3913c4dd65
|
90b358151d9da221b06ef059313ef607b0e66e45
|
/tests/bdd/conftest.py
|
c6a7cd5a6e57b586b9b2d19104f5b6c1df4d847e
|
[] |
no_license
|
truenas/webui
|
7967a33403bb718ee5bb9d72b616a9abb7bb9ba5
|
6fdbd450da688500c92e6b15710237caa5227eb4
|
refs/heads/master
| 2023-09-04T15:00:20.663494
| 2023-09-04T13:28:07
| 2023-09-04T13:28:07
| 90,044,084
| 176
| 173
| null | 2023-09-14T17:21:37
| 2017-05-02T14:42:01
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 8,274
|
py
|
conftest.py
|
# !/usr/bin/env python3
import os
import pytest
import time
import xpaths
from configparser import ConfigParser
from platform import system
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import (
NoSuchElementException,
TimeoutException,
ElementClickInterceptedException
)
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
# To avoid hostname need to be unique so using the PID should avoid this
pid = str(os.getpid())
hostname = f'uitest{pid}'
@pytest.fixture
def nas_hostname():
return hostname
@pytest.fixture
def nas_ip():
if os.environ.get("nas_ip"):
return os.environ.get("nas_ip")
elif os.path.exists('config.cfg'):
configs = ConfigParser()
configs.read('config.cfg')
return configs['NAS_CONFIG']['ip']
else:
return 'none'
@pytest.fixture
def root_password():
if os.environ.get("nas_password"):
return os.environ.get("nas_password")
elif os.path.exists('config.cfg'):
configs = ConfigParser()
configs.read('config.cfg')
return configs['NAS_CONFIG']['password']
else:
return 'none'
@pytest.fixture
def iso_version():
if os.environ.get("nas_version"):
return os.environ.get("nas_version")
elif os.path.exists('config.cfg'):
configs = ConfigParser()
configs.read('config.cfg')
return configs['NAS_CONFIG']['version']
else:
return 'none'
def browser():
profile = webdriver.FirefoxProfile()
profile.set_preference("browser.download.folderList", 2)
profile.set_preference("browser.download.dir", "/tmp")
# this is the place to add file type to autosave
# application/x-tar is use for .tar
# application/gzip is use for .tgz
profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/x-tar,application/gzip")
profile.set_preference("browser.download.manager.showWhenStarting", False)
profile.set_preference("browser.link.open_newwindow", 3)
binary = '/usr/bin/firefox' if system() == "Linux" else '/usr/local/bin/firefox'
firefox_capabilities = DesiredCapabilities.FIREFOX
firefox_capabilities['marionette'] = True
firefox_capabilities['firefox_profile'] = profile.encoded
firefox_capabilities['binary'] = binary
web_driver = webdriver.Firefox(capabilities=firefox_capabilities)
web_driver.set_window_size(1920, 1080)
web_driver.implicitly_wait(2)
return web_driver
web_driver = browser()
@pytest.fixture
def driver():
return web_driver
# Close Firefox after all tests are completed
# def pytest_sessionfinish(session, exitstatus):
# web_driver.quit()
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item):
"""
Extends the PyTest Plugin to take and embed screenshot whenever test fails.
"""
outcome = yield
report = outcome.get_result()
if report.when == 'call' or report.when == "setup":
xfail = hasattr(report, 'wasxfail')
if (report.skipped and xfail) or (report.failed and not xfail):
screenshot_name = f'screenshot/{report.nodeid.partition("[")[0].replace("::", "_")}.png'
# look if there is a Error window
if element_exist('//h1[contains(.,"Error")]') or element_exist('//h1[contains(.,"FAILED")]'):
web_driver.find_element_by_xpath('//ix-icon[@fonticon="add_circle_outline"]').click()
time.sleep(2)
traceback_name = f'screenshot/{report.nodeid.partition("[")[0].replace("::", "_")}_error.txt'
screenshot_error = f'screenshot/{report.nodeid.partition("[")[0].replace("::", "_")}_error.png'
save_traceback(traceback_name)
# take a screenshot of the error
save_screenshot(screenshot_error)
# Press CLOSE if exist
if element_exist(xpaths.button.close):
web_driver.find_element_by_xpath(xpaths.button.close).click()
# take screenshot after looking for error
save_screenshot(screenshot_name)
if element_exist('//h1[contains(text(),"Installing")]') and element_exist('//mat-dialog-content[contains(.,"Error:")]'):
web_driver.find_element_by_xpath(xpaths.button.close).click()
if wait_on_element(1, '//ix-icon[@id="ix-close-icon"]', 'clickable'):
try:
web_driver.find_element_by_xpath('//ix-icon[@id="ix-close-icon"]').click()
except ElementClickInterceptedException:
try:
# Press Tab in case a dropdown is in the way
actions = ActionChains(web_driver)
actions.send_keys(Keys.TAB)
actions.perform()
web_driver.find_element_by_xpath('//ix-icon[@id="ix-close-icon"]').click()
except ElementClickInterceptedException:
pass
def save_screenshot(name):
web_driver.save_screenshot(name)
def save_traceback(name):
traceback_file = open(name, 'w')
traceback_file.writelines(web_driver.find_element_by_xpath('//div[@id="err-bt-text"]').text)
traceback_file.close()
def element_exist(xpath):
try:
web_driver.find_element_by_xpath(xpath)
return True
except NoSuchElementException:
return False
def wait_on_element(wait, xpath, condition=None):
if condition == 'clickable':
try:
WebDriverWait(web_driver, wait).until(ec.element_to_be_clickable((By.XPATH, xpath)))
return True
except TimeoutException:
return False
elif condition == 'inputable':
time.sleep(1)
try:
WebDriverWait(web_driver, wait).until(ec.element_to_be_clickable((By.XPATH, xpath)))
return True
except TimeoutException:
return False
elif condition == 'presence':
try:
WebDriverWait(web_driver, wait).until(ec.presence_of_element_located((By.XPATH, xpath)))
return True
except TimeoutException:
return False
else:
try:
WebDriverWait(web_driver, wait).until(ec.visibility_of_element_located((By.XPATH, xpath)))
return True
except TimeoutException:
return False
def enable_failover():
web_driver.find_element_by_xpath('//mat-list-item[@ix-auto="option__Dashboard"]').click()
wait_on_element(web_driver, 0.5, 7, '//mat-list-item[@ix-auto="option__System Settings"]')
web_driver.find_element_by_xpath('//mat-list-item[@ix-auto="option__System Settings"]').click()
wait_on_element(web_driver, 0.5, 7, '//mat-list-item[@ix-auto="option__Misc"]')
web_driver.find_element_by_xpath('//mat-list-item[@ix-auto="option__Misc"]').click()
assert wait_on_element(web_driver, 0.5, 7, '//h1[contains(.,"Miscellaneous")]')
assert wait_on_element(web_driver, 0.5, 7, '//li[contains(.,"Failover")]')
web_driver.find_element_by_xpath('//li[contains(.,"Failover")]').click()
assert wait_on_element(web_driver, 0.5, 7, '//h1[contains(.,"Failover")]')
element = web_driver.find_element_by_xpath('//mat-checkbox[@ix-auto="checkbox__Disable Failover"]')
class_attribute = element.get_attribute('class')
if 'mat-checkbox-checked' in class_attribute:
web_driver.find_element_by_xpath('//mat-checkbox[@ix-auto="checkbox__Disable Failover"]').click()
wait_on_element(0.5, 5, '//button[@ix-auto="button__SAVE"]')
web_driver.find_element_by_xpath('//button[@ix-auto="button__SAVE"]').click()
wait_on_element(0.5, 4, '//h1[contains(.,"Settings saved")]')
if element_exist('//button[@ix-auto="button__CLOSE"]'):
web_driver.find_element_by_xpath('//button[@ix-auto="button__CLOSE"]').click()
time.sleep(1)
web_driver.find_element_by_xpath('//mat-list-item[@ix-auto="option__Dashboard"]').click()
|
139293647710f8d180195adeb189c85bca9c0816
|
9a46907c545a3d2e09924d1431ade1cd448e1de6
|
/DisplayUtils/TileDisplay.py
|
d4840cb32697422738e21013e1924c7cacff2ea4
|
[] |
no_license
|
kazmiekr/GasPumpOCR
|
126cf1bce517c53b1bfe5fcda4b8a3a23adf90d4
|
3ef1386f853176b514708cc6b30d332b3b8d5e9f
|
refs/heads/master
| 2022-08-19T21:40:11.048720
| 2022-07-20T11:20:54
| 2022-07-20T11:20:54
| 86,585,608
| 154
| 57
| null | 2022-07-20T11:20:55
| 2017-03-29T13:30:13
|
Python
|
UTF-8
|
Python
| false
| false
| 825
|
py
|
TileDisplay.py
|
import cv2
start_y = 20
current_stack_y = start_y
current_stack_x = 0
current_stack_width = 0
min_width = 200
max_stack_y = 450
y_buffer = 25
x_buffer = 10
def show_img(name, img):
global current_stack_y, current_stack_x, current_stack_width
height, width = img.shape[:2]
if width < min_width:
width = min_width
if width > current_stack_width:
current_stack_width = width
cv2.imshow(name, img)
cv2.moveWindow(name, current_stack_x, current_stack_y)
current_stack_y += height + y_buffer
if current_stack_y > max_stack_y:
current_stack_y = start_y
current_stack_x += current_stack_width + x_buffer
def reset_tiles():
global current_stack_x, current_stack_y, current_stack_width
current_stack_x = 0
current_stack_y = 0
current_stack_width = 0
|
4db0d817bf89b851c4b898bffb228ccc6558d045
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/passlib/passlib/handlers/bcrypt.pyi
|
1823586f19369581461eaf898874f23bd7f79341
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 1,816
|
pyi
|
bcrypt.pyi
|
from typing import Any, ClassVar
import passlib.utils.handlers as uh
class _BcryptCommon(uh.SubclassBackendMixin, uh.TruncateMixin, uh.HasManyIdents, uh.HasRounds, uh.HasSalt, uh.GenericHandler): # type: ignore[misc]
name: ClassVar[str]
checksum_size: ClassVar[int]
checksum_chars: ClassVar[str]
default_ident: ClassVar[str]
ident_values: ClassVar[tuple[str, ...]]
ident_aliases: ClassVar[dict[str, str]]
min_salt_size: ClassVar[int]
max_salt_size: ClassVar[int]
salt_chars: ClassVar[str]
final_salt_chars: ClassVar[str]
default_rounds: ClassVar[int]
min_rounds: ClassVar[int]
max_rounds: ClassVar[int]
rounds_cost: ClassVar[str]
truncate_size: ClassVar[int | None]
@classmethod
def from_string(cls, hash): ...
@classmethod
def needs_update(cls, hash, **kwds): ...
@classmethod
def normhash(cls, hash): ...
class _NoBackend(_BcryptCommon): ...
class _BcryptBackend(_BcryptCommon): ...
class _BcryptorBackend(_BcryptCommon): ...
class _PyBcryptBackend(_BcryptCommon): ...
class _OsCryptBackend(_BcryptCommon): ...
class _BuiltinBackend(_BcryptCommon): ...
class bcrypt(_NoBackend, _BcryptCommon): # type: ignore[misc]
backends: ClassVar[tuple[str, ...]]
class _wrapped_bcrypt(bcrypt):
truncate_size: ClassVar[None]
class bcrypt_sha256(_wrapped_bcrypt):
name: ClassVar[str]
ident_values: ClassVar[tuple[str, ...]]
ident_aliases: ClassVar[dict[str, str]]
default_ident: ClassVar[str]
version: ClassVar[int]
@classmethod
def using(cls, version: Any | None = ..., **kwds): ... # type: ignore[override]
prefix: Any
@classmethod
def identify(cls, hash): ...
@classmethod
def from_string(cls, hash): ...
def __init__(self, version: Any | None = ..., **kwds) -> None: ...
|
bcf2f76f5f2af3ac43e1095adc5bbda4febe7913
|
cadb6dceb7bb67ce47ef48b2c83f480a65d6b01a
|
/s3prl/problem/diarization/__init__.py
|
2eb4ad13b195e3f942b1c4d1733f328333352bed
|
[
"Apache-2.0",
"CC-BY-NC-4.0"
] |
permissive
|
s3prl/s3prl
|
52ec2ae4df5a61c786c122085603aa9c5e8c2681
|
76a9432b824f6ae3eae09a35a67782c4ed582832
|
refs/heads/main
| 2023-08-17T02:26:57.524087
| 2023-06-10T17:12:27
| 2023-06-10T17:12:27
| 196,905,457
| 1,549
| 398
|
Apache-2.0
| 2023-09-14T13:07:05
| 2019-07-15T01:54:52
|
Python
|
UTF-8
|
Python
| false
| false
| 100
|
py
|
__init__.py
|
"""
Speaker Diarization recipes
"""
from .superb_sd import SuperbSD
__all__ = [
"SuperbSD",
]
|
1dd6a89044ac4500fd2c016fbe0970373b4c5774
|
a38bf459ae380f67e0de22f7106a8df4385a7076
|
/gapic/configurable_snippetgen/configured_snippet.py
|
6f13043b8fe695cc6cf7a738a18caf644f7ae03b
|
[
"Apache-2.0"
] |
permissive
|
googleapis/gapic-generator-python
|
73ce9d52f6f5bb2652d49b237b24263d6637b1da
|
4eee26181e8db9fb5144eef5a76f178c1594e48a
|
refs/heads/main
| 2023-09-04T11:12:14.728757
| 2023-09-02T10:34:44
| 2023-09-02T10:34:44
| 129,809,857
| 116
| 65
|
Apache-2.0
| 2023-09-12T18:57:01
| 2018-04-16T21:47:04
|
Python
|
UTF-8
|
Python
| false
| false
| 9,620
|
py
|
configured_snippet.py
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
from typing import List, Optional
import inflection
import libcst
from gapic.configurable_snippetgen import libcst_utils
from gapic.configurable_snippetgen import snippet_config_language_pb2
from gapic.schema import api
class _AppendToSampleFunctionBody(libcst.CSTTransformer):
def __init__(self, statement: libcst.BaseStatement):
self.statement = statement
def visit_IndentedBlock(self, node: libcst.IndentedBlock) -> bool:
# Do not visit any nested indented blocks.
return False
def leave_IndentedBlock(
self, original_node: libcst.IndentedBlock, updated_node: libcst.IndentedBlock
) -> libcst.IndentedBlock:
del original_node
# FunctionDef.body is an IndentedBlock, and IndentedBlock.body
# is the actual sequence of statements.
new_body = list(updated_node.body) + [self.statement]
return updated_node.with_changes(body=new_body)
@dataclasses.dataclass
class ConfiguredSnippet:
api_schema: api.API
config: snippet_config_language_pb2.SnippetConfig
api_version: str
is_sync: bool
def __post_init__(self) -> None:
self._module: libcst.Module = libcst_utils.empty_module()
self._sample_function_def: libcst.FunctionDef = libcst_utils.base_function_def(
function_name=self.sample_function_name, is_sync=self.is_sync
)
@property
def code(self) -> str:
"""The code of the configured snippet."""
return self._module.code
@property
def gapic_module_name(self) -> str:
"""The GAPIC module name.
For example:
"speech_v1"
"""
module_name = self.config.rpc.proto_package.split(".")[-1]
return f"{module_name}_{self.api_version}"
@property
def region_tag(self) -> str:
"""The region tag of the snippet.
For example:
"speech_v1_config_Adaptation_CreateCustomClass_Basic_async"
"""
service_name = self.config.rpc.service_name
rpc_name = self.config.rpc.rpc_name
config_id = self.config.metadata.config_id
sync_or_async = "sync" if self.is_sync else "async"
return f"{self.gapic_module_name}_config_{service_name}_{rpc_name}_{config_id}_{sync_or_async}"
@property
def sample_function_name(self) -> str:
"""The sample function's name.
For example:
"sample_create_custom_class_Basic"
"""
snippet_method_name = self.config.signature.snippet_method_name
config_id = self.config.metadata.config_id
return f"sample_{snippet_method_name}_{config_id}"
@property
def client_class_name(self) -> str:
"""The service client's class name.
For example:
"AdaptationClient"
"AdaptationAsyncClient"
"""
if self.is_sync:
client_class_name = f"{self.config.rpc.service_name}Client"
else:
client_class_name = f"{self.config.rpc.service_name}AsyncClient"
return client_class_name
@property
def filename(self) -> str:
"""The snippet's file name.
For example:
"speech_v1_generated_Adaptation_create_custom_class_Basic_async.py"
"""
service_name = self.config.rpc.service_name
snake_case_rpc_name = inflection.underscore(self.config.rpc.rpc_name)
config_id = self.config.metadata.config_id
sync_or_async = "sync" if self.is_sync else "async"
return f"{self.gapic_module_name}_generated_{service_name}_{snake_case_rpc_name}_{config_id}_{sync_or_async}.py"
@property
def api_endpoint(self) -> Optional[str]:
"""The api_endpoint in client_options."""
service_endpoint = (
self.config.snippet.service_client_initialization.custom_service_endpoint
)
if not service_endpoint.host:
return None
schema = service_endpoint.schema
host = service_endpoint.host
region = service_endpoint.region
port = service_endpoint.port
if port:
host_maybe_with_port = f"{host}:{port}"
else:
host_maybe_with_port = host
if region:
host_maybe_with_port_and_region = f"{region}-{host_maybe_with_port}"
else:
host_maybe_with_port_and_region = host_maybe_with_port
if (
schema
== snippet_config_language_pb2.Snippet.ClientInitialization.ServiceEndpoint.HTTP
):
return f"http://{host_maybe_with_port_and_region}"
else:
# Either the default or HTTPS, in which case the schema is not needed.
return host_maybe_with_port_and_region
def _extend_sample_function_def_body(
self, statements: List[libcst.BaseStatement]
) -> None:
"""Appends the statements to the current sample function def."""
for statement in statements:
transformer = _AppendToSampleFunctionBody(statement)
# The result of applying a transformer could be of a different type
# in general, but we will only update the sample function def here.
self._sample_function_def = self._sample_function_def.visit(
transformer
) # type: ignore
def _add_sample_function_parameters(self) -> None:
"""Adds sample function parameters.
Before:
def sample_create_custom_class_Basic():
...
After:
def sample_create_custom_class_Basic(parent = "projects/..."):
...
"""
# TODO: https://github.com/googleapis/gapic-generator-python/issues/1537, add typing annotation in sample function parameters.
params = []
for config_parameter in self.config.signature.parameters:
params.append(libcst_utils.convert_parameter(config_parameter))
parameters = libcst.Parameters(params=params)
self._sample_function_def = self._sample_function_def.with_changes(
params=parameters
)
def _get_service_client_initialization(self) -> List[libcst.BaseStatement]:
"""Returns the service client initialization statements.
Examples:
client = speech_v1.AdaptationClient()
client = speech_v1.AdaptationClient(client_options = {"api_endpoint": "us-speech.googleapis.com"})
"""
if self.api_endpoint is not None:
client_options_arg = libcst.Arg(
keyword=libcst.Name("client_options"),
value=libcst_utils.convert_py_dict(
[("api_endpoint", self.api_endpoint)]
),
)
service_client_initialization = libcst.helpers.parse_template_statement(
f"client = {self.gapic_module_name}.{self.client_class_name}({{arg}})",
arg=client_options_arg,
)
else:
service_client_initialization = libcst.parse_statement(
f"client = {self.gapic_module_name}.{self.client_class_name}()"
)
# TODO: https://github.com/googleapis/gapic-generator-python/issues/1539, support pre_client_initialization statements.
return [service_client_initialization]
def _get_standard_call(self) -> List[libcst.BaseStatement]:
"""Returns the standard call statements."""
# TODO: https://github.com/googleapis/gapic-generator-python/issues/1539, support standard call statements.
return []
def _get_call(self) -> List[libcst.BaseStatement]:
"""Returns the snippet call statements."""
call_type = self.config.snippet.WhichOneof("call")
if call_type == "standard":
return self._get_standard_call()
else:
raise ValueError(f"Snippet call type {call_type} not supported.")
def _build_sample_function(self) -> None:
# TODO: https://github.com/googleapis/gapic-generator-python/issues/1536, add return type.
# TODO: https://github.com/googleapis/gapic-generator-python/issues/1538, add docstring.
self._add_sample_function_parameters()
self._extend_sample_function_def_body(
self._get_service_client_initialization())
self._extend_sample_function_def_body(self._get_call())
def _add_sample_function(self) -> None:
self._module = self._module.with_changes(
body=[self._sample_function_def])
def generate(self) -> None:
"""Generates the snippet.
This is the main entrypoint of a ConfiguredSnippet instance, calling
other methods to update self._module.
"""
self._build_sample_function()
self._add_sample_function()
# TODO: https://github.com/googleapis/gapic-generator-python/issues/1535, add imports.
# TODO: https://github.com/googleapis/gapic-generator-python/issues/1534, add region tag.
# TODO: https://github.com/googleapis/gapic-generator-python/issues/1533, add header.
|
096f658bdc54726feea445556a51312c1d06efe5
|
9ffe3f09f0195e2f7e54eedec02b40374bffd5f9
|
/tests/test_errors.py
|
dd38245d743bf049471295c159b3eef7d567b2a5
|
[
"BSD-3-Clause"
] |
permissive
|
aromanovich/jsl
|
63d36d42cd93e63a0b319318ec8a88cf5854142c
|
1b0cb1d4b0c28993dd29a7156fd0aa63a25c5030
|
refs/heads/master
| 2020-05-31T01:47:15.307376
| 2016-11-14T16:31:57
| 2016-11-14T16:31:57
| 30,669,230
| 244
| 30
| null | 2017-07-24T18:25:35
| 2015-02-11T20:55:02
|
Python
|
UTF-8
|
Python
| false
| false
| 9,856
|
py
|
test_errors.py
|
import pytest
from jsl.document import Document
from jsl.fields import (ArrayField, StringField, IntField, BaseSchemaField,
DictField, OneOfField, AnyOfField, AllOfField, NotField,
DocumentField)
from jsl.roles import DEFAULT_ROLE, Var
from jsl.exceptions import (SchemaGenerationException, FieldStep, AttributeStep,
ItemStep, DocumentStep)
from jsl.resolutionscope import EMPTY_SCOPE
class FieldStub(BaseSchemaField):
ERROR_MESSAGE = 'FieldStub error'
def get_definitions_and_schema(self, role=DEFAULT_ROLE, res_scope=EMPTY_SCOPE,
ordered=False, ref_documents=None):
raise SchemaGenerationException(self.ERROR_MESSAGE)
def test_exceptions():
f_1 = StringField()
f_2 = StringField()
# test __eq__ and __ne__
assert FieldStep(f_1) == FieldStep(f_1)
assert FieldStep(f_1, role='role_1') != FieldStep(f_1)
assert FieldStep(f_1) != FieldStep(f_2)
assert FieldStep(f_1) != AttributeStep('fields')
assert not (FieldStep(f_1) == AttributeStep('fields'))
# test __repr__
r = repr(FieldStep(f_1, role='role_1'))
assert repr(f_1) in r
assert 'role_1' in r
message = 'Something went wrong'
e = SchemaGenerationException(message)
assert str(e) == message
step = FieldStep(f_1)
e.steps.appendleft(step)
assert str(e) == '{0}\nSteps: {1}'.format(message, step)
def test_error():
db_role_friends_field = ArrayField((StringField(), None))
request_role_friends_field = ArrayField(StringField())
class User(Document):
login = StringField()
friends = ArrayField(Var({
'db_role': db_role_friends_field,
'request_role': request_role_friends_field,
}))
class Users(Document):
users = ArrayField(DocumentField(User))
Users.get_schema()
role = 'db_role'
with pytest.raises(SchemaGenerationException) as e:
Users.get_schema(role=role)
e = e.value
assert list(e.steps) == [
DocumentStep(Users, role=role),
FieldStep(Users._backend, role=role),
AttributeStep('properties', role=role),
ItemStep('users', role=role),
FieldStep(Users.users, role=role),
AttributeStep('items', role=role),
FieldStep(Users.users.items, role=role),
DocumentStep(User, role=role),
FieldStep(User._backend, role=role),
AttributeStep('properties', role=role),
ItemStep('friends', role=role),
FieldStep(User.friends, role=role),
AttributeStep('items', role=role),
FieldStep(db_role_friends_field, role=role),
AttributeStep('items', role=role),
ItemStep(1, role=role)
]
assert e.message == 'None is not resolvable'
assert ("Steps: Users -> DocumentBackend.properties['users'] -> "
"ArrayField.items -> DocumentField -> User -> "
"DocumentBackend.properties['friends'] -> ArrayField.items -> "
"ArrayField.items[1]") in str(e)
def test_array_field():
f = ArrayField(items=())
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
assert list(e.value.steps) == [FieldStep(f), AttributeStep('items')]
f = ArrayField(items=(
Var({'role_x': StringField()}),
Var({'role_x': IntField()}),
))
role = 'role_y'
with pytest.raises(SchemaGenerationException) as e:
f.get_schema(role='role_y')
assert list(e.value.steps) == [FieldStep(f, role=role), AttributeStep('items', role=role)]
f = ArrayField(items=(None, None))
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
assert list(e.value.steps) == [FieldStep(f), AttributeStep('items'), ItemStep(0)]
f = ArrayField(items=object())
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
assert list(e.value.steps) == [FieldStep(f), AttributeStep('items')]
f = ArrayField(additional_items=object())
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
assert list(e.value.steps) == [FieldStep(f), AttributeStep('additional_items')]
f = ArrayField(items=FieldStub())
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert e.message == FieldStub.ERROR_MESSAGE
assert list(e.steps) == [FieldStep(f), AttributeStep('items')]
f = ArrayField(items=(FieldStub(),))
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert e.message == FieldStub.ERROR_MESSAGE
assert list(e.steps) == [FieldStep(f), AttributeStep('items'), ItemStep(0)]
f = ArrayField(additional_items=FieldStub())
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert e.message == FieldStub.ERROR_MESSAGE
assert list(e.steps) == [FieldStep(f), AttributeStep('additional_items')]
def test_dict_field():
f = DictField(properties={'a': object()})
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert 'not resolvable' in e.message
assert list(e.steps) == [FieldStep(f), AttributeStep('properties'), ItemStep('a')]
f = DictField(pattern_properties={'a.*': object()})
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert 'not resolvable' in e.message
assert list(e.steps) == [FieldStep(f), AttributeStep('pattern_properties'), ItemStep('a.*')]
f = DictField(additional_properties=object())
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert 'not a BaseField or a bool' in e.message
assert list(e.steps) == [FieldStep(f), AttributeStep('additional_properties')]
f = DictField(properties={'a': FieldStub()})
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert e.message == FieldStub.ERROR_MESSAGE
assert list(e.steps) == [FieldStep(f), AttributeStep('properties'), ItemStep('a')]
f = DictField(pattern_properties={'a.*': FieldStub()})
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert e.message == FieldStub.ERROR_MESSAGE
assert list(e.steps) == [FieldStep(f), AttributeStep('pattern_properties'), ItemStep('a.*')]
f = DictField(additional_properties=FieldStub())
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert e.message == FieldStub.ERROR_MESSAGE
assert list(e.steps) == [FieldStep(f), AttributeStep('additional_properties')]
for kwarg_value in (object(), Var({'role_x': object()})):
for kwarg in ('properties', 'pattern_properties'):
f = DictField(**{kwarg: kwarg_value})
with pytest.raises(SchemaGenerationException) as e:
f.get_schema(role='role_x')
e = e.value
assert 'not a dict' in e.message
assert list(e.steps) == [FieldStep(f, role='role_x'),
AttributeStep(kwarg, role='role_x')]
f = DictField(additional_properties=kwarg_value)
with pytest.raises(SchemaGenerationException) as e:
f.get_schema(role='role_x')
e = e.value
assert 'not a BaseField or a bool' in e.message
assert list(e.steps) == [FieldStep(f, role='role_x'),
AttributeStep('additional_properties', role='role_x')]
f = DictField(pattern_properties={'((((': StringField()})
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert 'unbalanced parenthesis' in e.message
assert list(e.steps) == [FieldStep(f), AttributeStep('pattern_properties')]
@pytest.mark.parametrize('field_cls', [OneOfField, AnyOfField, AllOfField])
def test_keyword_of_fields(field_cls):
f = field_cls(object())
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert 'not a list or a tuple' in e.message
assert list(e.steps) == [FieldStep(f), AttributeStep('fields')]
f = field_cls([])
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert 'empty' in e.message
assert list(e.steps) == [FieldStep(f), AttributeStep('fields')]
f = field_cls([object()])
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert 'not resolvable' in e.message
assert list(e.steps) == [FieldStep(f), AttributeStep('fields'), ItemStep(0)]
role = 'role_x'
f = field_cls([Var({role: object()})])
with pytest.raises(SchemaGenerationException) as e:
f.get_schema(role=role)
e = e.value
assert 'not a BaseField' in e.message
assert list(e.steps) == [FieldStep(f, role),
AttributeStep('fields', role),
ItemStep(0, role)]
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert 'empty' in e.message
assert list(e.steps) == [FieldStep(f), AttributeStep('fields')]
# test nested field errors
f = field_cls([FieldStub()])
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert e.message == FieldStub.ERROR_MESSAGE
assert list(e.steps) == [FieldStep(f), AttributeStep('fields'), ItemStep(0)]
def test_not_field():
for f in [NotField(object()), NotField(Var({'role_x': object()}))]:
with pytest.raises(SchemaGenerationException) as e:
f.get_schema()
e = e.value
assert 'not a BaseField' in e.message
assert list(e.steps) == [FieldStep(f), AttributeStep('field')]
|
6dd9897eac8e1c887a8d97e42a06b39249883ebc
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/_private/thirdparty/dacite/data.py
|
560cdf8beddb082122f26bb548329ec95f23581d
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 52
|
py
|
data.py
|
from typing import Dict, Any
Data = Dict[str, Any]
|
55bc350ad05e6f66b2abfbdab159f44896e9806c
|
df7b40e95718ac0f6071a0ba571b42efc81cf6de
|
/configs/_base_/models/cd_vit.py
|
c1513db0484f790549fbc12919b5b42b4a6892bc
|
[
"Apache-2.0"
] |
permissive
|
shinianzhihou/ChangeDetection
|
87fa2c498248e6124aeefb8f0ee8154bda36deee
|
354e71234bef38b6e142b6ba02f23db958582844
|
refs/heads/master
| 2023-01-23T20:42:31.017006
| 2023-01-09T11:37:24
| 2023-01-09T11:37:24
| 218,001,748
| 162
| 29
|
Apache-2.0
| 2022-11-03T04:11:00
| 2019-10-28T08:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 934
|
py
|
cd_vit.py
|
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
backbone=dict(
type='CDVit',
backbone_choice='resnet18',
num_images=2,
image_size=224,
feature_size=28,
patch_size=4,
in_channels=128,
out_channels=32,
encoder_dim=512,
encoder_heads=8,
encoder_dim_heads=64,
encoder_depth=4,
attn_dropout=0.0,
ff_dropout=0.0),
decode_head=dict(
type='CDVitHead',
in_channels=64,
in_index=0,
channels=32,
num_convs=2,
concat_input=False,
dropout_ratio=0.1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
|
167e69a0be7d29e02306f233ec68378bdb0faa44
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/refactoring/inlineFunction/simple/main.after.py
|
6427fa328e8f754f6c81e520ea3b26cfb5094f40
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
main.after.py
|
def foo(arg):
local = arg + arg
return local
local = 42 + 42
x = local
|
6f4133446d32f954cabcdcbfd9546a690a378d09
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/notification_template_branding_options.py
|
6813530914d5dad4525c1ebf947de9810efb3c97
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 889
|
py
|
notification_template_branding_options.py
|
from enum import Enum
class NotificationTemplateBrandingOptions(str, Enum):
# Indicates that no branding options are set in the message template.
None_ = "none",
# Indicates to include company logo in the message template.
IncludeCompanyLogo = "includeCompanyLogo",
# Indicates to include company name in the message template.
IncludeCompanyName = "includeCompanyName",
# Indicates to include contact information in the message template.
IncludeContactInformation = "includeContactInformation",
# Indicates to include company portal website link in the message template.
IncludeCompanyPortalLink = "includeCompanyPortalLink",
# Indicates to include device details in the message template.
IncludeDeviceDetails = "includeDeviceDetails",
# Evolvable enumeration sentinel value. Do not use.
UnknownFutureValue = "unknownFutureValue",
|
815136b98459d0a622518645be06bf0fc81f3703
|
de0c523399eaf24544f7c0cbd61fe8d3c69c84f8
|
/custom_mutators/examples/wrapper_afl_min.py
|
5cd6003188e714f864629bf64060aed0b16eb369
|
[
"Apache-2.0"
] |
permissive
|
AFLplusplus/AFLplusplus
|
d45b697b8f2f767d29f4d886a2f9c5bd9ad6c032
|
11a622c63d70c9ca16c9847418b88992139aa892
|
refs/heads/stable
| 2023-09-01T16:24:45.928708
| 2023-08-31T12:51:42
| 2023-08-31T12:51:42
| 189,044,704
| 3,470
| 872
|
Apache-2.0
| 2023-09-13T13:56:14
| 2019-05-28T14:29:06
|
C
|
UTF-8
|
Python
| false
| false
| 3,073
|
py
|
wrapper_afl_min.py
|
#!/usr/bin/env python
from XmlMutatorMin import XmlMutatorMin
# Default settings (production mode)
__mutator__ = None
__seed__ = "RANDOM"
__log__ = False
__log_file__ = "wrapper.log"
# AFL functions
def log(text):
"""
Logger
"""
global __seed__
global __log__
global __log_file__
if __log__:
with open(__log_file__, "a") as logf:
logf.write("[%s] %s\n" % (__seed__, text))
def init(seed):
"""
Called once when AFL starts up. Seed is used to identify the AFL instance in log files
"""
global __mutator__
global __seed__
# Get the seed
__seed__ = seed
# Create a global mutation class
try:
__mutator__ = XmlMutatorMin(__seed__, verbose=__log__)
log("init(): Mutator created")
except RuntimeError as e:
log("init(): Can't create mutator: %s" % e.message)
def fuzz(buf, add_buf, max_size):
"""
Called for each fuzzing iteration.
"""
global __mutator__
# Do we have a working mutator object?
if __mutator__ is None:
log("fuzz(): Can't fuzz, no mutator available")
return buf
# Try to use the AFL buffer
via_buffer = True
# Interpret the AFL buffer (an array of bytes) as a string
if via_buffer:
try:
buf_str = str(buf)
log("fuzz(): AFL buffer converted to a string")
except Exception:
via_buffer = False
log("fuzz(): Can't convert AFL buffer to a string")
# Load XML from the AFL string
if via_buffer:
try:
__mutator__.init_from_string(buf_str)
log(
"fuzz(): Mutator successfully initialized with AFL buffer (%d bytes)"
% len(buf_str)
)
except Exception:
via_buffer = False
log("fuzz(): Can't initialize mutator with AFL buffer")
# If init from AFL buffer wasn't succesful
if not via_buffer:
log("fuzz(): Returning unmodified AFL buffer")
return buf
# Sucessful initialization -> mutate
try:
__mutator__.mutate(max=5)
log("fuzz(): Input mutated")
except Exception:
log("fuzz(): Can't mutate input => returning buf")
return buf
# Convert mutated data to a array of bytes
try:
data = bytearray(__mutator__.save_to_string())
log("fuzz(): Mutated data converted as bytes")
except Exception:
log("fuzz(): Can't convert mutated data to bytes => returning buf")
return buf
# Everything went fine, returning mutated content
log("fuzz(): Returning %d bytes" % len(data))
return data
# Main (for debug)
if __name__ == "__main__":
__log__ = True
__log_file__ = "/dev/stdout"
__seed__ = "RANDOM"
init(__seed__)
in_1 = bytearray(
"<foo ddd='eeee'>ffff<a b='c' d='456' eee='ffffff'>zzzzzzzzzzzz</a><b yyy='YYY' zzz='ZZZ'></b></foo>"
)
in_2 = bytearray("<abc abc123='456' abcCBA='ppppppppppppppppppppppppppppp'/>")
out = fuzz(in_1, in_2)
print(out)
|
49d0ec0d7de01a857191c8e5a51651bf60ec7d32
|
20f125a17856c1251727314c571091a59bc770f0
|
/Chapter 09/9.09_chat_server.py
|
c8ef4ea7ab2b40f5c98598d071c3c577d22a79ea
|
[
"MIT"
] |
permissive
|
PacktPublishing/Tkinter-GUI-Application-Development-Blueprints-Second-Edition
|
310983285d54c59bdd02e69b9a913aa9372c869a
|
1e160c0575028e446295c121a84142164ee5ced2
|
refs/heads/master
| 2023-07-10T05:34:39.159752
| 2023-01-30T09:20:16
| 2023-01-30T09:20:16
| 123,231,531
| 142
| 94
|
MIT
| 2023-07-03T23:09:32
| 2018-02-28T04:59:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,777
|
py
|
9.09_chat_server.py
|
"""
Code illustration: 9.09
Chat server
Tkinter GUI Application Development Blueprints
"""
import socket
import threading
class ChatServer:
clients_list = []
last_received_message = ""
def __init__(self):
self.create_listening_server()
def create_listening_server(self):
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_ip = '127.0.0.1'
local_port = 10319
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind((local_ip, local_port))
print("Listening for incoming messages..")
self.server_socket.listen(5)
self.receive_messages_in_a_new_thread()
def receive_messages(self, so):
while True:
incoming_buffer = so.recv(256)
if not incoming_buffer:
break
self.last_received_message = incoming_buffer.decode('utf-8')
self.broadcast_to_all_clients(so)
so.close()
def broadcast_to_all_clients(self, senders_socket):
for client in self.clients_list:
socket, (ip, port) = client
if socket is not senders_socket:
socket.sendall(self.last_received_message.encode('utf-8'))
def receive_messages_in_a_new_thread(self):
while 1:
client = so, (ip, port) = self.server_socket.accept()
self.add_to_clients_list(client)
print ('Connected to ', ip, ':', str(port))
t = threading.Thread(target=self.receive_messages, args=(so,))
t.start()
def add_to_clients_list(self, client):
if client not in self.clients_list:
self.clients_list.append(client)
if __name__ == "__main__":
ChatServer()
|
f42190bc172ba7887797e613eacd2baaace5d765
|
7f24023d365e013ec0924844c1a872edfb0c75b4
|
/tests/trac/test-issue-0043.py
|
b849824ca9672211dae999f73ea1be87d74349ed
|
[
"Python-2.0",
"MIT",
"Apache-2.0"
] |
permissive
|
pabigot/pyxb
|
cd42c024607572c6363682d389e9296caf3f2857
|
5ee5ba54c9f702dc9c9efc2731ee547ecd4dae4a
|
refs/heads/next
| 2023-05-11T03:23:19.599756
| 2023-04-29T20:38:15
| 2023-04-29T20:45:13
| 20,547,850
| 130
| 63
|
Apache-2.0
| 2021-08-19T16:52:18
| 2014-06-06T01:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 877
|
py
|
test-issue-0043.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.binding.basis
import pyxb.utils.domutils
from pyxb.exceptions_ import *
import unittest
class TestIssue0043 (unittest.TestCase):
def testFloat (self):
self.assertEqual('INF', xs.float('Infinity').xsdLiteral())
self.assertEqual('-INF', xs.float('-Infinity').xsdLiteral())
self.assertEqual('NaN', xs.float('nan').xsdLiteral())
def testDouble (self):
self.assertEqual('INF', xs.double('Infinity').xsdLiteral())
self.assertEqual('-INF', xs.double('-Infinity').xsdLiteral())
self.assertEqual('NaN', xs.double('nan').xsdLiteral())
if __name__ == '__main__':
unittest.main()
|
1219520caeaad38b6288b62d2a4b7ffd503594e7
|
171cff4f64d99807199c830aed3863d0ac191de4
|
/examples/django_nyc_demo/noodles/views.py
|
7c4f398eab14638338c4274d068c7f48eea0a148
|
[
"MIT"
] |
permissive
|
hendrix/hendrix
|
5e75bd61da2636659788c16e47b1172fe4d71f9c
|
abab0984cfd82b4513bebb05252e28c7df4cccd7
|
refs/heads/main
| 2023-08-30T03:58:34.636196
| 2022-11-17T15:48:57
| 2022-11-17T15:48:57
| 12,119,499
| 311
| 42
|
MIT
| 2023-02-16T07:00:13
| 2013-08-14T21:06:32
|
Python
|
UTF-8
|
Python
| false
| false
| 805
|
py
|
views.py
|
import time
from django.shortcuts import render
from hendrix.experience import crosstown_traffic, hey_joe
def my_noodles(request):
llama = "Another noodle"
@crosstown_traffic()
def my_long_thing():
for i in range(5):
print("another noodle on the python console")
time.sleep(1)
hey_joe.send(llama, topic="noodly_messages")
hey_joe.broadcast("Notice to everybody: finished noodling.")
if request.META.get("wsgi.url_scheme") == "https":
websocket_prefix = "wss"
websocket_port = 9443
else:
websocket_prefix = "ws"
websocket_port = 9000
return render(request, 'noodles.html', {"websocket_prefix": websocket_prefix,
"websocket_port": websocket_port})
|
6513a614abf5678c971b66e78f1f7a44df52740d
|
6c29f457a5e787309b344fec53c133845d8985e8
|
/fairscale/experimental/nn/distributed_pipeline/partition_handler.py
|
9ef0d919a306a852290dd13ff998d2ac6e675d0a
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
facebookresearch/fairscale
|
eb378e44cca951e242fb58f82522d9ba8e87d732
|
164cc0f3170b4a3951dd84dda29c3e1504ac4d6e
|
refs/heads/main
| 2023-09-04T12:48:14.924836
| 2023-04-20T03:41:53
| 2023-04-20T03:41:53
| 277,899,703
| 2,553
| 257
|
NOASSERTION
| 2023-08-28T19:02:48
| 2020-07-07T19:02:01
|
Python
|
UTF-8
|
Python
| false
| false
| 12,228
|
py
|
partition_handler.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from threading import Condition
from types import TracebackType
from typing import Dict, List, Optional, Tuple, Type, Union, cast
import torch
from torch import Tensor
from torch.autograd.profiler import record_function
from torch.distributed import rpc
from fairscale.nn.pipe import microbatch
from fairscale.nn.pipe.checkpoint import Checkpointing, TensorOrTensors
from fairscale.nn.pipe.dependency import fork, join
from fairscale.nn.pipe.microbatch import Batch
from fairscale.nn.pipe.stream import as_cuda, current_stream, is_cuda, use_device, use_stream
from fairscale.nn.pipe.worker import Task, create_workers
from .data import DataConsumer
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
class DistributedPipelineRecord:
"""A class for storing a single mini-batch (consisting of multiple micro-batches) as input to
a single partition.
Args:
device: the local device that runs the partition.
rank: the rank of the partition in the pipeline.
chunks: number of micro-batches in a mini-batch
num_inputs: number of inputs to the partition.
consumers: list of consumers of outputs of the partition. Each consumer in the list is a tuple
(remote_partition_rref, input_idx, output_idx) where remote_partition_rref points to a
remote DistributedPipelineRecord for consumer partiton for this mini-batch. The output number
output_idx of this partition will be used as the input number input_idx of that partition.
"""
# Need to use Union due to https://github.com/python/mypy/issues/7866
DataConsumer = Union[DataConsumer[rpc.RRef]]
def __init__(
self,
device: torch.device,
rank: int,
chunks: int,
num_inputs: int,
num_outputs: Optional[int],
consumers: List[DataConsumer],
) -> None:
self.ready_cv = Condition()
# Each chunk consists of num_inputs tensors. self.tensors stores these individual tensors.
self.tensors: List[List[Optional[Tensor]]] = [[None] * num_inputs for _ in range(chunks)]
# For each tensor in self.tensors, we record a cuda event in corrsponding tensorpipe stream in self.recv_events,
# and later the stream that processes that tensor will wait on that event.
self.recv_events = [[None] * num_inputs for _ in range(chunks)]
# Once all num_inputs tensors of a given chunk are recieved, they are assembled as a batch and stored in
# self.batches
self.batches: List[Optional[Batch]] = [None] * chunks
# For each tensor of each chunk, we fork a phony tensor, which will be used for injecting dependency between
# different chunks in backward path.
if num_outputs is None:
num_outputs = 1
self.forwarded_phony: List[List[List[rpc.RRef]]] = [[[] for j in range(num_outputs)] for i in range(chunks)]
self.consumers = consumers
self.rank = rank
self.device = device
def __getstate__(self) -> Dict:
# avoid pickling failure.
return {}
def feed(self, chunk: int, input_idx: int, input: Tensor) -> Tensor:
"""This function is called remotely to provide individual tensors of a given chunk."""
if input.device.type == "cpu":
input = input.to(self.device)
cuda_stream = torch.cuda.current_stream(input.device) if input.device.type == "cuda" else None
with self.ready_cv:
assert self.tensors[chunk][input_idx] is None
input, phony = fork(input)
self.recv_events[chunk][input_idx] = (
cuda_stream.record_event() if cuda_stream is not None else None # type: ignore
)
self.tensors[chunk][input_idx] = input
self.ready_cv.notify_all()
return phony
def wait_for(self, chunk: int) -> None:
"""Waits until all elements of given chunk is populated in self.tensors.
Then it constructs self.batches[chunk] if it is not constructed yet.
"""
with self.ready_cv:
while self.batches[chunk] is None and any(b is None for b in self.tensors[chunk]):
self.ready_cv.wait()
if self.batches[chunk] is None:
tensors = cast(List[Tensor], self.tensors[chunk])
self.batches[chunk] = Batch(tuple(tensors), chunk)
def fence(self, chunk: int) -> None:
"""Prepares micro-batches for computation."""
# Ensure that batches[chunk-1] is executed after batches[chunk] in
# backpropagation by an explicit dependency.
# TODO: This dependency injection causes deadlock if this partition
# gets its input from model input. 1) Figure out why 2) If we need to live
# with this constraint, replace the condition 'self.rank > 0' below with
# a more accurate one.
if chunk != 0 and self.consumers and self.rank > 0:
batch = self.batches[chunk]
assert batch is not None
dependant_tensors = list(batch.tensors)
for remote_ph_list in self.forwarded_phony[chunk - 1]:
for remote_ph in remote_ph_list:
phony = remote_ph.to_here()
dependant_tensors[0] = join(dependant_tensors[0], phony)
self.batches[chunk] = Batch(tuple(dependant_tensors), chunk)
def sync_stream(self, chunk: int, stream: torch.cuda.Stream) -> None:
"""syncs the stream with cuda events associated with transmission of the chunck to the cuda device."""
for e in self.recv_events[chunk]:
if e is not None:
stream.wait_event(e)
def forward_results(self, chunk: int) -> None:
"""Forward outputs of processing the chunk in this parition for processing by next partition."""
for consumer in self.consumers:
v = self.get_batch(chunk).value[consumer.output_idx]
self.forwarded_phony[chunk][consumer.output_idx].append(
consumer.consumer.remote().feed(chunk, consumer.consumer_input_idx, v)
)
def get_batch(self, chunk: int) -> Batch:
batch = self.batches[chunk]
assert batch is not None
return batch
class PartitionHandler:
"""This class processes a single partition of the pipeline.
Args:
module_rref: RRef to the nn.Module for this partition. It should be on the local rpc worker.
device: The device that holds the module.
num_inputs: Numer of inputs to the module
num_outputs: Number of outputs of the module. If the module output is not a tuple (and it is a
single tensor), num_outputs should be None.
rank: The rank of the partition
chunks: Number of micor-batches in a mini-batch
checkpoint_stop:: Checkpointing is done only for the first checkpoint_stop chunks of a mini-batch.
"""
def __init__(
self,
module_rref: rpc.RRef,
device: str,
num_inputs: int,
num_outputs: Optional[int],
rank: int,
chunks: int,
checkpoint_stop: int,
) -> None:
self.module = module_rref.local_value()
self.chunks = chunks
self.device = torch.device(device)
self.checkpoint_stop = checkpoint_stop
self.rank = rank
self.num_inputs = num_inputs
self.num_outputs = num_outputs
(self.in_queue,), (self.out_queue,) = create_workers([self.device])
def __getstate__(self) -> Dict:
# avoid pickling failure.
return {}
def local_parameter_rrefs(self) -> List[rpc.RRef]:
r"""
Create one RRef for each parameter in the given local module, and return a
list of RRefs.
"""
return [rpc.RRef(p) for p in self.module.parameters()]
def make_pipeline_record(self, consumers: List[DataConsumer]) -> DistributedPipelineRecord:
return DistributedPipelineRecord(
self.device, self.rank, self.chunks, self.num_inputs, self.num_outputs, consumers
)
def run(self, pipeline_record: DistributedPipelineRecord) -> None:
"""Runs pipeline parallelism. It modifies the given batches in place."""
m = len(pipeline_record.batches)
self.stream = current_stream(self.device)
for chunk in range(m):
with record_function("feed"):
pipeline_record.wait_for(chunk)
pipeline_record.fence(chunk)
self.compute(pipeline_record, chunk)
with use_stream(self.stream):
pipeline_record.forward_results(chunk)
def compute(self, pipeline_record: DistributedPipelineRecord, chunk: int) -> None:
"""Runs tasks with synchronization to tensor-pipe streams."""
checkpoint_stop = self.checkpoint_stop
# Disable checkpointing if in eval mode.
if not self.module.training:
checkpoint_stop = 0
exc_info: Optional[ExcInfo] = None
batch = pipeline_record.get_batch(chunk)
if is_cuda(self.stream):
pipeline_record.sync_stream(chunk, as_cuda(self.stream))
# Determine whether checkpointing or not.
checkpoint = chunk < checkpoint_stop
if checkpoint:
def function(input: TensorOrTensors, chunk_id: int = chunk) -> TensorOrTensors:
with record_function("chunk%d-rank%d" % (chunk_id, pipeline_record.rank)):
result = self.module(*input)
if self.num_outputs is None:
result = (result,)
return tuple(result)
chk = Checkpointing(function, batch)
task = Task(self.stream, compute=chk.checkpoint, finalize=chk.recompute)
del function, chk
else:
def compute(
batch: Batch = batch,
chunk_id: int = chunk,
rank: int = pipeline_record.rank if pipeline_record is not None else -1,
) -> Batch:
with record_function("chunk%d-rank%d" % (chunk_id, pipeline_record.rank)):
result = self.module(*batch.tensors)
if self.num_outputs is None:
result = (result,)
return Batch(result, chunk_id)
task = Task(self.stream, compute=compute, finalize=None)
del compute
self.in_queue.put(task)
ok, payload = self.out_queue.get()
# Hold the first exception.
if exc_info is not None:
pass
elif not ok:
exc_info = cast(ExcInfo, payload)
else:
task, batch = cast(Tuple[Task, Batch], payload)
with use_device(self.device):
task.finalize(batch)
pipeline_record.batches[chunk] = batch
if exc_info is not None:
raise exc_info[0].with_traceback(exc_info[1], exc_info[2])
def run_pipeline(self, pipeline_record_rref: rpc.RRef) -> Optional[Tensor]:
"""Processes a min-batch on this partition.
If this is the last partition (pipeline_record has no consumer), concatenates results of processing
all chunks and returns the result as the output of the model on the whole mini-batch.
"""
pipeline_record = pipeline_record_rref.local_value()
self.run(pipeline_record)
result: Optional[Tensor] = None
if not pipeline_record.consumers:
gather_result = microbatch.gather(pipeline_record.batches)
assert len(gather_result) == 1
result = gather_result[0]
s0 = current_stream(result.device)
if is_cuda(s0):
# TODO. Investigate why this is needed and remove it if possible.
as_cuda(s0).synchronize()
# TODO: There seems to be a memory leak that is solved by following line.
# Investigate why is it needed.
del pipeline_record.batches
return result
|
cab3f9d5b7db6354f1fe9b897fd5f000cd96225b
|
1154075d79d5d24aa4093daf3f886b5a7e446a30
|
/dnachisel/builtin_specifications/AvoidBlastMatches.py
|
3069d018acdd28b4231703721ede5dfa5cd4942f
|
[
"MIT"
] |
permissive
|
Edinburgh-Genome-Foundry/DnaChisel
|
8548f48aec9d2f57b25bb301dda476e48696e27f
|
4be08c2696710387ae6d5b0346e6680a69d88c64
|
refs/heads/master
| 2023-08-28T12:50:05.308622
| 2023-06-06T15:30:28
| 2023-06-06T15:30:28
| 102,761,506
| 172
| 44
|
MIT
| 2023-05-30T02:20:23
| 2017-09-07T16:37:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,665
|
py
|
AvoidBlastMatches.py
|
"""Implementation of AvoidBlastMatches."""
from ..Specification import Specification, SpecEvaluation
# from .VoidSpecification import VoidSpecification
from ..biotools import blast_sequence
from ..Location import Location
class AvoidBlastMatches(Specification):
"""Enforce that the sequence has no BLAST matches with a given database.
WARNING: try using AvoidMatches instead, it is much better!!
Uses NCBI Blast+. Only local BLAST is supported/tested as for now
Parameters
----------
blast_db
Path to a local BLAST database. These databases can be obtained with
NCBI's `makeblastdb`. Omit the extension, e.g. `ecoli_db/ecoli_db`.
word_size
Word size used by the BLAST algorithm
perc_identity
Minimal percentage of identity for BLAST matches. 100 means that only
perfect matches are considered.
num_alignments
Number alignments
num_threads
Number of threads/CPU cores to use for the BLAST algorithm.
min_align_length
Minimal length that an alignment should have to be considered.
"""
priority = -2
best_possible_score = 0
blasts_paths = {}
def __init__(
self,
blast_db=None,
sequences=None,
word_size=4,
perc_identity=100,
num_alignments=100000,
num_threads=3,
min_align_length=20,
ungapped=True,
e_value=1e80,
culling_limit=1,
location=None,
):
"""Initialize."""
self.blast_db = blast_db
self.sequences = sequences
self.word_size = word_size
self.perc_identity = perc_identity
self.num_alignments = num_alignments
self.num_threads = num_threads
self.min_align_length = min_align_length
self.location = Location.from_data(location)
self.e_value = e_value
self.ungapped = ungapped
self.culling_limit = culling_limit
def initialized_on_problem(self, problem, role=None):
return self._copy_with_full_span_if_no_location(problem)
def evaluate(self, problem):
"""Score as (-total number of blast identities in matches)."""
location = self.location
if location is None:
location = Location(0, len(problem.sequence))
sequence = location.extract_sequence(problem.sequence)
blast_record = blast_sequence(
sequence,
blast_db=self.blast_db,
subject_sequences=self.sequences,
word_size=self.word_size,
perc_identity=self.perc_identity,
num_alignments=self.num_alignments,
num_threads=self.num_threads,
ungapped=self.ungapped,
e_value=self.e_value,
culling_limit=self.culling_limit,
task="megablast"
)
if isinstance(blast_record, list):
alignments = [
alignment
for rec in blast_record
for alignment in rec.alignments
]
else:
alignments = blast_record.alignments
query_hits = [
(
min(hit.query_start, hit.query_end) + location.start - 1,
max(hit.query_start, hit.query_end) + location.start,
1 - 2 * (hit.query_start > hit.query_end),
hit.identities,
)
for alignment in alignments
for hit in alignment.hsps
]
locations = sorted(
[
(start, end, ids)
for (start, end, strand, ids) in query_hits
if (end - start) >= self.min_align_length
]
)
score = -sum([ids for start, end, ids in locations])
locations = [Location(start, end) for start, end, ids in locations]
if locations == []:
return SpecEvaluation(
self, problem, score=1, message="Passed: no BLAST match found"
)
return SpecEvaluation(
self,
problem,
score=score,
locations=locations,
message="Failed - %s matches at %s" % (len(locations), locations),
)
def localized(self, location, problem=None, with_righthand=True):
"""Localize the evaluation."""
new_location = self.location.overlap_region(location)
if new_location is None:
return None
new_location = location.extended(
self.min_align_length - 1, right=with_righthand
)
return self.copy_with_changes(location=new_location)
def feature_label_parameters(self):
return [self.blast_db]
|
60e3af325026fe247206f62eb1f0e7913914d895
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/Traceroute/cli/equal/golden_output1_expected.py
|
dbc23cbf9d109a1a1524861e8543f5d12fdd7f40
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,332
|
py
|
golden_output1_expected.py
|
expected_output = {
"traceroute": {
"172.16.166.253": {
"address": "172.16.166.253",
"hops": {
"9": {
"paths": {
1: {
"address": "10.2.1.2",
"vrf_out_id": "2001",
"vrf_out_name": "blue",
"vrf_in_id": "1001",
"vrf_in_name": "red",
}
}
},
"4": {
"paths": {
1: {
"address": "192.168.15.1",
"label_info": {"MPLS": {"label": "24133", "exp": 0}},
"probe_msec": ["6", "7", "64"],
}
}
},
"5": {
"paths": {
1: {
"address": "10.80.241.86",
"label_info": {"MPLS": {"label": "24147", "exp": 0}},
"probe_msec": ["69", "65", "111"],
}
}
},
"8": {
"paths": {
1: {
"address": "10.1.1.2",
"vrf_out_id": "2001",
"vrf_out_name": "blue",
"vrf_in_id": "1001",
"vrf_in_name": "red",
}
}
},
"2": {
"paths": {
1: {
"address": "10.0.9.1",
"label_info": {"MPLS": {"label": "300678", "exp": 0}},
"probe_msec": ["177", "150", "9"],
}
}
},
"1": {
"paths": {
1: {
"address": "172.31.255.125",
"label_info": {"MPLS": {"label": "624", "exp": 0}},
"probe_msec": ["70", "200", "19"],
}
}
},
"6": {
"paths": {
1: {
"address": "10.90.135.110",
"label_info": {"MPLS": {"label": "24140", "exp": 0}},
"probe_msec": ["21", "4", "104"],
}
}
},
"3": {
"paths": {
1: {
"address": "192.168.14.61",
"label_info": {"MPLS": {"label": "302537", "exp": 0}},
"probe_msec": ["134", "1", "55"],
}
}
},
"7": {
"paths": {
1: {
"address": "172.31.166.10",
"probe_msec": ["92", "51", "148"],
}
}
},
},
}
}
}
|
ab1e53f342c28ddf924556db1bce28453817af4c
|
8ce4f36e0d1ea49ca21c648f660eb6cc7839e565
|
/testing/components/distributions/categorical_test.py
|
9c99267c302a325ea02744c1735195d7047f67fc
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
amzn/MXFusion
|
9f76783609303b555148be0dd7c5c48550e2b497
|
af6223e9636b055d029d136dd7ae023b210b4560
|
refs/heads/master
| 2023-04-13T18:35:57.454425
| 2019-05-30T06:43:07
| 2019-05-30T06:43:07
| 143,047,851
| 109
| 38
|
Apache-2.0
| 2023-04-10T09:52:55
| 2018-07-31T17:47:44
|
Python
|
UTF-8
|
Python
| false
| false
| 5,676
|
py
|
categorical_test.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import pytest
import mxnet as mx
import numpy as np
from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples
from mxfusion.components.distributions import Categorical
from mxfusion.util.testutils import numpy_array_reshape
from mxfusion.util.testutils import MockMXNetRandomGenerator
@pytest.mark.usefixtures("set_seed")
class TestCategoricalDistribution(object):
@pytest.mark.parametrize("dtype, log_prob, log_prob_isSamples, rv, rv_isSamples, num_samples, one_hot_encoding, normalization", [
(np.float64, np.random.rand(5,4,3)+1e-2, True, np.random.randint(0,3,size=(5,4,1)), True, 5, False, True),
(np.float64, np.random.rand(4,3)+1e-2, False, np.random.randint(0,3,size=(4,1)), False, 1, False, False),
(np.float64, np.random.rand(5,4,3)+1e-2, True, np.random.randint(0,3,size=(4,1)), False, 5, False, True),
(np.float64, np.random.rand(4,3)+1e-2, False, np.random.randint(0,3,size=(5,4,1)), True, 5, False, False),
(np.float64, np.random.rand(5,4,3)+1e-2, True, np.identity(3)[np.random.randint(0,3,size=(4,))], False, 5, True, True),
(np.float64, np.random.rand(4,3)+1e-2, False, np.identity(3)[np.random.randint(0,3,size=(5,4))], True, 5, True, False),
])
def test_log_pdf(self, dtype, log_prob, log_prob_isSamples, rv, rv_isSamples, num_samples, one_hot_encoding, normalization):
rv_shape = rv.shape[1:] if rv_isSamples else rv.shape
n_dim = 1 + len(rv.shape) if not rv_isSamples else len(rv.shape)
log_prob_np = numpy_array_reshape(log_prob, log_prob_isSamples, n_dim)
rv_np = numpy_array_reshape(rv, rv_isSamples, n_dim)
rv_full_shape = (num_samples,)+rv_shape
rv_np = np.broadcast_to(rv_np, rv_full_shape)
log_prob_np = np.broadcast_to(log_prob_np, rv_full_shape[:-1]+(3,))
if normalization:
log_pdf_np = np.log(np.exp(log_prob_np)/np.exp(log_prob_np).sum(-1, keepdims=True)).reshape(-1, 3)
else:
log_pdf_np = log_prob_np.reshape(-1, 3)
if one_hot_encoding:
log_pdf_np = (rv_np.reshape(-1, 3)*log_pdf_np).sum(-1).reshape(rv_np.shape[:-1])
else:
bool_idx = np.arange(3)[None,:] == rv_np.reshape(-1,1)
log_pdf_np = log_pdf_np[bool_idx].reshape(rv_np.shape[:-1])
cat = Categorical.define_variable(0, num_classes=3, one_hot_encoding=one_hot_encoding, normalization=normalization, shape=rv_shape, dtype=dtype).factor
log_prob_mx = mx.nd.array(log_prob, dtype=dtype)
if not log_prob_isSamples:
log_prob_mx = add_sample_dimension(mx.nd, log_prob_mx)
rv_mx = mx.nd.array(rv, dtype=dtype)
if not rv_isSamples:
rv_mx = add_sample_dimension(mx.nd, rv_mx)
variables = {cat.log_prob.uuid: log_prob_mx, cat.random_variable.uuid: rv_mx}
log_pdf_rt = cat.log_pdf(F=mx.nd, variables=variables)
assert np.issubdtype(log_pdf_rt.dtype, dtype)
assert get_num_samples(mx.nd, log_pdf_rt) == num_samples
assert np.allclose(log_pdf_np, log_pdf_rt.asnumpy())
@pytest.mark.parametrize(
"dtype, log_prob, log_prob_isSamples, rv_shape, num_samples, one_hot_encoding, normalization",[
(np.float64, np.random.rand(5,4,3)+1e-2, True, (4,1), 5, False, True),
(np.float64, np.random.rand(4,3)+1e-2, False, (4,1), 5, False, True),
(np.float64, np.random.rand(5,4,3)+1e-2, True, (4,3), 5, True, True),
(np.float64, np.random.rand(4,3)+1e-2, False, (4,3), 5, True, True),
])
def test_draw_samples(self, dtype, log_prob, log_prob_isSamples, rv_shape, num_samples, one_hot_encoding, normalization):
n_dim = 1 + len(rv_shape)
log_prob_np = numpy_array_reshape(log_prob, log_prob_isSamples, n_dim)
rv_full_shape = (num_samples,) + rv_shape
log_prob_np = np.broadcast_to(log_prob_np, rv_full_shape[:-1] + (3,))
rand_np = np.random.randint(0, 3, size=rv_full_shape[:-1])
rand_gen = MockMXNetRandomGenerator(mx.nd.array(rand_np.flatten(), dtype=dtype))
if one_hot_encoding:
rand_np = np.identity(3)[rand_np].reshape(*rv_full_shape)
else:
rand_np = np.expand_dims(rand_np, axis=-1)
rv_samples_np = rand_np
cat = Categorical.define_variable(0, num_classes=3, one_hot_encoding=one_hot_encoding, normalization=normalization, shape=rv_shape, rand_gen=rand_gen, dtype=dtype).factor
log_prob_mx = mx.nd.array(log_prob, dtype=dtype)
if not log_prob_isSamples:
log_prob_mx = add_sample_dimension(mx.nd, log_prob_mx)
variables = {cat.log_prob.uuid: log_prob_mx}
rv_samples_rt = cat.draw_samples(
F=mx.nd, variables=variables, num_samples=num_samples)
assert array_has_samples(mx.nd, rv_samples_rt)
assert get_num_samples(mx.nd, rv_samples_rt) == num_samples
assert np.allclose(rv_samples_np, rv_samples_rt.asnumpy())
|
bcc24c7c988cf60428b96f562c7634a6b8ab7832
|
ad635776bcd66a23e8047c292c3a30034034eb30
|
/datafiles/tests/test_converters.py
|
2aa91cbdfe337f6193236cf064debe6314bf849a
|
[
"MIT"
] |
permissive
|
jacebrowning/datafiles
|
535456db853157b514faf23a235099ada567292a
|
cb9de0f59bd437e78d867d77ba22a4020057af2d
|
refs/heads/main
| 2023-09-01T04:22:55.766516
| 2023-08-21T22:43:33
| 2023-08-21T22:43:34
| 137,429,206
| 180
| 21
|
MIT
| 2023-07-25T22:03:26
| 2018-06-15T01:58:39
|
Python
|
UTF-8
|
Python
| false
| false
| 16,339
|
py
|
test_converters.py
|
# pylint: disable=unused-variable
from dataclasses import dataclass
from enum import Enum
from typing import ByteString, Dict, List, Mapping, Optional, Set, TypedDict, Union
import pytest
from ruamel.yaml.scalarstring import LiteralScalarString
from datafiles import converters, settings
from . import xfail_without_pep_604, xfail_without_type_names
@dataclass
class MyDataclass:
foobar: int
flag: bool = False
@dataclass
class MyNestedDataclass:
name: str
dc: MyDataclass
class MyTypedDict(TypedDict):
title: str
salary: int
class MyNonDataclass:
pass
class MyCustomString:
pass
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
IntegerList = converters.List.of_type(converters.Integer)
StringList = converters.List.of_type(converters.String)
MyDict = converters.Dictionary.of_mapping(converters.String, converters.Integer)
IntegerSet = converters.Set.of_type(converters.Integer)
StringSet = converters.Set.of_type(converters.String)
MyDataclassConverter = converters.map_type(MyDataclass)
MyDataclassConverterList = converters.map_type(List[MyDataclass])
MyTypedDictConverter = converters.map_type(MyTypedDict)
def describe_map_type():
def it_handles_extended_types(expect):
converter = converters.map_type(converters.Number)
expect(converter.__name__) == "Number"
def it_handles_list_annotations(expect):
converter = converters.map_type(List[str])
expect(converter.__name__) == "StringList"
expect(converter.CONVERTER) == converters.String
def it_handles_list_annotations_of_dataclasses(expect):
converter = converters.map_type(List[MyDataclass])
expect(converter.__name__) == "MyDataclassConverterList"
expect(converter.CONVERTER.__name__) == "MyDataclassConverter"
def it_requires_list_annotations_to_have_a_type(expect):
with expect.raises(TypeError, "Type is required with 'List' annotation"):
converters.map_type(List)
def it_handles_set_annotations(expect):
converter = converters.map_type(Set[str])
expect(converter.__name__) == "StringSet"
expect(converter.CONVERTER) == converters.String
def it_handles_set_annotations_of_dataclasses(expect):
converter = converters.map_type(Set[MyDataclass])
expect(converter.__name__) == "MyDataclassConverterSet"
expect(converter.CONVERTER.__name__) == "MyDataclassConverter"
def it_requires_set_annotations_to_have_a_type(expect):
with expect.raises(TypeError, "Type is required with 'Set' annotation"):
converters.map_type(Set)
def it_handles_dict_annotations(expect):
converter = converters.map_type(Dict[str, int])
expect(converter.__name__) == "StringIntegerDict"
def it_requires_dict_annotations_to_have_types(expect):
with expect.raises(TypeError, "Types are required with 'Dict' annotation"):
converters.map_type(Dict)
@xfail_without_type_names
def it_handles_typed_dict_annotations(expect):
converter = converters.map_type(MyTypedDict)
expect(converter.__name__) == "StringAnyDict"
def it_handles_abstract_mapping_types(expect):
converter = converters.map_type(Mapping[str, int])
expect(converter.__name__) == "StringIntegerDict"
def it_handles_dataclasses(expect):
converter = converters.map_type(MyDataclass)
expect(converter.__name__) == "MyDataclassConverter"
expect(converter.CONVERTERS) == {
"foobar": converters.Integer,
"flag": converters.Boolean,
}
@pytest.mark.parametrize(
("cls", "converter"),
[
(list, converters.List),
(dict, converters.Dictionary),
],
)
def it_handles_container_literals(expect, cls, converter):
expect(converters.map_type(cls)) == converter
def it_handles_enums(expect):
converter = converters.map_type(Color)
expect(converter.__name__) == "ColorConverter"
@xfail_without_pep_604
def it_handles_optionals(expect):
converter = converters.map_type(str | None) # type: ignore
expect(converter.__name__) == "OptionalString"
expect(converter.TYPE) == str
expect(converter.DEFAULT).is_(None)
def it_handles_optionals_with_legacy_syntax(expect):
converter = converters.map_type(Optional[str])
expect(converter.__name__) == "OptionalString"
expect(converter.TYPE) == str
expect(converter.DEFAULT).is_(None)
def it_handles_unions_with_strings(expect):
converter = converters.map_type(Union[int, str])
expect(converter.TYPE) == str
def it_handles_unions_with_numbers(expect):
converter = converters.map_type(Union[int, float])
expect(converter.TYPE) == float
def it_handles_string_type_annotations(expect):
converter = converters.map_type("float")
expect(converter.TYPE) == float
def it_handles_string_type_annotations_for_extensions(expect):
converter = converters.map_type("Number")
expect(converter.TYPE) == float
expect(converter.__name__) == "Number"
def it_rejects_unknown_types(expect):
with expect.raises(
TypeError,
"Could not map type: <class 'datafiles.tests.test_converters.MyNonDataclass'>",
):
converters.map_type(MyNonDataclass)
def it_rejects_non_types(expect):
with expect.raises(TypeError, "Annotation is not a type: 'foobar'"):
converters.map_type("foobar")
def it_rejects_unhandled_type_annotations(expect):
with expect.raises(
TypeError,
"Unsupported container type: <class 'collections.abc.ByteString'>",
):
converters.map_type(ByteString)
def describe_converter():
def describe_to_python_value():
@pytest.mark.parametrize(
"converter, data, value",
[
(converters.Boolean, "1", True),
(converters.Boolean, "0", False),
(converters.Boolean, "enabled", True),
(converters.Boolean, "disabled", False),
(converters.Boolean, "T", True),
(converters.Boolean, "F", False),
(converters.Boolean, "true", True),
(converters.Boolean, "false", False),
(converters.Boolean, "Y", True),
(converters.Boolean, "N", False),
(converters.Boolean, "yes", True),
(converters.Boolean, "no", False),
(converters.Boolean, "on", True),
(converters.Boolean, "off", False),
(converters.Boolean, 0, False),
(converters.Boolean, 1, True),
(converters.Float, 4, 4.0),
(converters.Integer, 4.2, 4),
(converters.String, 4.2, "4.2"),
(converters.String, 42, "42"),
(converters.String, True, "True"),
(converters.String, False, "False"),
],
)
def when_immutable(expect, converter, data, value):
expect(converter.to_python_value(data)) == value
@pytest.mark.parametrize(
"converter, data, value",
[
(IntegerList, [], []),
(IntegerList, "1, 2.3", [1, 2]),
(IntegerList, "42", [42]),
(IntegerList, 42, [42]),
(IntegerList, None, []),
(IntegerList, [42], [42]),
(IntegerList, [None], []),
(IntegerList, [None, None], []),
(MyDict, None, {}),
(MyDict, {}, {}),
(MyDict, {"a": 1}, {"a": 1}),
(IntegerSet, set(), set()),
(IntegerSet, "1, 1.1, 1.9, 2.3, 3", {1, 2, 3}),
(IntegerSet, "42", {42}),
(IntegerSet, 42, {42}),
(IntegerSet, None, set()),
(IntegerSet, [None], set()),
(MyDataclassConverter, None, MyDataclass(foobar=0)),
(MyDataclassConverter, MyDataclass(42), MyDataclass(foobar=42)),
(MyDataclassConverterList, None, []),
(MyDataclassConverterList, 42, [MyDataclass(foobar=0)]),
(MyTypedDictConverter, None, {}),
(MyTypedDictConverter, {}, {}),
(MyTypedDictConverter, {"a": 1}, {"a": 1}),
],
)
def when_mutable(expect, converter, data, value):
expect(converter.to_python_value(data, target_object=None)) == value
def when_number(expect):
convert = converters.Number.to_python_value
expect(convert(1.23)).isinstance(float)
expect(convert(42)).isinstance(int)
def when_text(expect):
convert = converters.Text.to_python_value
expect(convert("")) == ""
expect(convert("Hello, world!")) == "Hello, world!"
expect(convert("Line 1\nLine 2\n")) == "Line 1\nLine 2\n"
def when_enum(expect):
convert = converters.map_type(Color).to_python_value
expect(convert(1)).is_(Color.RED)
with expect.raises(ValueError):
convert(42)
def when_optional_enum(expect):
convert = converters.map_type(Optional[Color]).to_python_value
expect(convert(None)).is_(None)
def when_invalid(expect):
message = "invalid literal for int() with base 10: 'a'"
with expect.raises(ValueError, message):
converters.Integer.to_python_value("a")
def when_list_of_dataclasses(expect):
converter = converters.map_type(List[MyDataclass])
data = [{"foobar": 1}, {"foobar": 2}, {"foobar": 3}]
value = [MyDataclass(1), MyDataclass(2), MyDataclass(3)]
expect(converter.to_python_value(data, target_object=None)) == value
def with_existing_list(expect):
original = [1, 2]
value = IntegerList.to_python_value("3, 4", target_object=original)
expect(value) == [3, 4]
expect(id(value)) == id(original)
def when_existing_dict(expect):
original = {"a": 1}
value = MyDict.to_python_value({"b": 2}, target_object=original)
expect(value) == {"b": 2}
expect(id(value)) == id(original)
def with_existing_dataclass(expect):
original = MyDataclass(foobar=1)
value = MyDataclassConverter.to_python_value(
{"foobar": 2}, target_object=original
)
expect(value) == MyDataclass(foobar=2)
expect(id(value)) == id(original)
def describe_to_preserialization_data():
@pytest.mark.parametrize(
"converter, value, data",
[
# Builtins
(converters.Boolean, None, False),
(converters.Float, None, 0.0),
(converters.Integer, None, 0),
(converters.String, None, ""),
# Lists
(StringList, "ab", ["ab"]),
(StringList, ("b", 1, "A"), ["b", "1", "A"]),
(StringList, {"b", 1, "A"}, ["1", "A", "b"]),
(StringList, 42, ["42"]),
(StringList, [123, True, False], ["123", "True", "False"]),
(StringList, [], [None]),
(StringList, None, [None]),
# Dicts
(MyTypedDictConverter, None, {}),
(MyTypedDictConverter, {}, {}),
(MyTypedDictConverter, {"a": 1}, {"a": 1}),
# Sets
(StringSet, "ab", ["ab"]),
(StringSet, ("b", 1, "A"), ["b", "1", "A"]),
(StringSet, {"b", 1, "A"}, ["1", "A", "b"]),
(StringSet, 42, ["42"]),
(StringSet, [123, True, False], ["123", "True", "False"]),
(StringSet, [], [None]),
(StringSet, None, [None]),
# Dataclasses
(MyDataclassConverter, None, {"foobar": 0, "flag": False}),
(MyDataclassConverter, {"foobar": 42}, {"foobar": 42, "flag": False}),
(MyDataclassConverterList, None, [None]),
(MyDataclassConverterList, 42, [{"foobar": 0, "flag": False}]),
],
)
def when_nominal(expect, converter, value, data):
expect(converter.to_preserialization_data(value)) == data
def when_number(expect):
convert = converters.Number.to_preserialization_data
expect(convert(1.23)).isinstance(float)
expect(convert(42)).isinstance(int)
def when_text(expect):
convert = converters.Text.to_preserialization_data
expect(convert("")) == ""
expect(convert("Hello, world!")) == "Hello, world!"
expect(convert("Line 1\nLine 2")) == "Line 1\nLine 2\n"
expect(convert("Line 1\nLine 2")).isinstance(LiteralScalarString)
def when_enum(expect):
convert = converters.map_type(Color).to_preserialization_data
expect(convert(Color.RED)) == 1
def when_optional_enum(expect):
convert = converters.map_type(Optional[Color]).to_preserialization_data
expect(convert(None)).is_(None)
def when_invalid(expect):
message = "invalid literal for int() with base 10: 'a'"
with expect.raises(ValueError, message):
converters.Integer.to_preserialization_data("a")
def when_list_of_dataclasses(expect):
converter = converters.map_type(List[MyDataclass])
value = [MyDataclass(1), MyDataclass(2), MyDataclass(3)]
data = [
{"foobar": 1, "flag": False},
{"foobar": 2, "flag": False},
{"foobar": 3, "flag": False},
]
expect(converter.to_preserialization_data(value)) == data
expect(converter.to_preserialization_data(data)) == data
def when_list_with_default(expect):
data = IntegerList.to_preserialization_data([1], default_to_skip=[1])
expect(data) == [None]
data = IntegerList.to_preserialization_data([2], default_to_skip=[1])
expect(data) == [2]
def when_Set_with_default(expect):
data = IntegerSet.to_preserialization_data([1], default_to_skip=[1])
expect(data) == [None]
data = IntegerSet.to_preserialization_data([2], default_to_skip=[1])
expect(data) == [2]
def when_dict_with_default(expect):
data = MyDict.to_preserialization_data({"a": 1}, default_to_skip={"a": 1})
expect(data) == {}
data = MyDict.to_preserialization_data({"b": 2}, default_to_skip={"a": 1})
expect(data) == {"b": 2}
def when_dataclass_with_default(expect):
data = MyDataclassConverter.to_preserialization_data(
MyDataclass(1), default_to_skip=MyDataclass(1)
)
expect(data) == {}
data = MyDataclassConverter.to_preserialization_data(
MyDataclass(2), default_to_skip=MyDataclass(1)
)
expect(data) == {"foobar": 2}
data = MyDataclassConverter.to_preserialization_data(
MyDataclass(1, flag=True), default_to_skip=MyDataclass(1)
)
expect(data) == {"flag": True}
def when_empty_list_and_diff_minimization_disabled(expect, monkeypatch):
monkeypatch.setattr(settings, "MINIMAL_DIFFS", False)
data = StringList.to_preserialization_data([])
expect(data) == []
def when_empty_set_and_diff_minimization_disabled(expect, monkeypatch):
monkeypatch.setattr(settings, "MINIMAL_DIFFS", False)
data = StringSet.to_preserialization_data([])
expect(data) == []
def describe_register():
def with_new_type(expect):
converters.register(MyCustomString, converters.String)
converter = converters.map_type(MyCustomString)
expect(converter) == converters.String
|
8698e62c73596cd7e7db2d00ca4dd29d37125ca0
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/minigame/VoteResultsTrolleyPanel.py
|
8877dc1d97900573893d91ea47c8ab670e1d87e0
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 12,121
|
py
|
VoteResultsTrolleyPanel.py
|
from direct.gui.DirectGui import DirectFrame, DGG, DirectLabel
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from panda3d.core import Point3, TextNode, Vec4
from toontown.minigame import TravelGameGlobals
from toontown.toonbase import TTLocalizer
from direct.interval.IntervalGlobal import Parallel, Sequence, LerpFunc, Func, Wait, SoundInterval
from otp.otpbase.PythonUtil import pdir
class VoteResultsTrolleyPanel(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('VoteResultsTrolleyPanel')
def __init__(self, numPlayers, avIdList, votes, directions, namesList, disconnectedList, directionToGo, directionReason, directionTotals, *args, **kwargs):
opts = {'relief': None,
'geom': DGG.getDefaultDialogGeom(),
'geom_color': ToontownGlobals.GlobalDialogColor[:3] + (0.8,),
'geom_scale': (1.75, 1, 0.25),
'pos': (0, 0, 0.825)}
opts.update(kwargs)
DirectFrame.__init__(self, *args, **opts)
self.initialiseoptions(VoteResultsTrolleyPanel)
listMultiplier = 1
if TravelGameGlobals.SpoofFour:
listMultiplier = 4
self.avIdList = avIdList * listMultiplier
self.numPlayers = numPlayers * listMultiplier
self.votes = votes * listMultiplier
self.directions = directions * listMultiplier
self.namesList = namesList * listMultiplier
self.disconnectedList = disconnectedList * listMultiplier
self.directionToGo = directionToGo
self.directionReason = directionReason
self.directionTotals = directionTotals
self.entryList = []
self.rowFrame = []
self.upDownFrame = DirectFrame(parent=self, relief=None, pos=self.getRowPos(-1))
self.upLabel = DirectLabel(parent=self, relief=None, pos=(-0.5, 0, 0.06), text=TTLocalizer.TravelGameDirections[0] + ':', text_fg=(0.0, 0.0, 1.0, 1.0), text_scale=0.05, text_align=TextNode.ARight)
self.downLabel = DirectLabel(parent=self, relief=None, pos=(0.5, 0, 0.06), text=TTLocalizer.TravelGameDirections[1] + ':', text_fg=(1.0, 0.0, 0.0, 1.0), text_scale=0.05, text_align=TextNode.ARight)
self.totalVotesUpLabel = DirectLabel(parent=self.upLabel, relief=None, pos=(0.2, 0, 0.0), text='0', text_fg=(0.0, 0.0, 1.0, 1.0), text_scale=0.05, text_align=TextNode.ARight)
self.totalVotesDownLabel = DirectLabel(parent=self.downLabel, relief=None, pos=(0.2, 0, 0.0), text='0', text_fg=(1.0, 0.0, 0.0, 1.0), text_scale=0.05, text_align=TextNode.ARight)
self.totalFrame = DirectFrame(parent=self, relief=None, pos=self.getRowPos(self.numPlayers))
self.totalVotesLabels = [self.totalVotesUpLabel, self.totalVotesDownLabel]
self.resultFrame = DirectFrame(parent=self, relief=None, pos=self.getRowPos(0.5))
self.resultLabel = DirectLabel(parent=self.resultFrame, text='', text_scale=0.06, pos=(0.7, 0, 0.0), text_align=TextNode.ACenter)
self.setupResultLabel()
for index in range(self.numPlayers):
frame = DirectFrame(parent=self, relief=None, pos=self.getRowPos(index))
self.rowFrame.append(frame)
nameLabel = DirectFrame(parent=frame, relief=None, pos=(0.46, 0.0, 0.0), text=self.namesList[index], text_fg=(0.0, 0.0, 0.0, 1.0), text_scale=0.05, text_align=TextNode.ACenter, text_font=DGG.getDefaultFont())
votesUpLabel = DirectLabel(parent=frame, relief=None, pos=(1.2, 0.0, 0.0), text='', text_fg=(0, 0, 1, 1), text_scale=0.05, text_align=TextNode.ARight, text_font=DGG.getDefaultFont())
votesDownLabel = DirectLabel(parent=frame, relief=None, pos=(1.43, 0.0, 0.0), text='', text_fg=(1, 0, 0, 1), text_scale=0.05, text_align=TextNode.ARight, text_font=DGG.getDefaultFont())
nameLabel.hide()
self.entryList.append((nameLabel, votesUpLabel, votesDownLabel))
self.avVotesLabel = {}
self.avArrows = {}
matchingGameGui = loader.loadModel('phase_3.5/models/gui/matching_game_gui')
minnieArrow = matchingGameGui.find('**/minnieArrow')
from toontown.minigame.DistributedTravelGame import map3dToAspect2d
for index in range(self.numPlayers):
avId = self.avIdList[index]
av = base.cr.doId2do.get(avId)
if av:
height = av.getHeight()
avPos = av.getPos(render)
avPos.setZ(av.getZ() + 5)
labelPos = map3dToAspect2d(render, avPos)
if not labelPos:
continue
labelPos.setZ(labelPos.getZ() + 0.3)
arrow = None
if self.votes[index] > 0:
arrow = aspect2d.attachNewNode('avArrow')
minnieArrow.copyTo(arrow)
arrow.setScale(1.1, 1, 1.15)
arrow.setPos(labelPos)
if self.directions[index] == 0:
arrow.setR(-90)
arrow.setColorScale(0, 0, 1, 1)
else:
arrow.setR(90)
arrow.setColorScale(1, 0, 0, 1)
arrow.wrtReparentTo(self.resultFrame)
arrow.hide()
self.avArrows[index] = arrow
fgColor = Vec4(0, 0, 0, 1)
if self.votes[index] > 0:
if self.directions[index] == 0:
fgColor = Vec4(0, 0, 1, 1)
else:
fgColor = Vec4(1, 0, 0, 1)
if self.votes[index] > 0:
newLabel = DirectLabel(parent=aspect2d, relief=None, pos=labelPos, text='test', text_fg=(1, 1, 1, 1), text_scale=0.1, text_align=TextNode.ACenter, text_font=ToontownGlobals.getSignFont(), text_pos=(0, -0.01, 0))
else:
newLabel = DirectLabel(parent=aspect2d, geom=DGG.getDefaultDialogGeom(), geom_scale=(0.2, 1, 0.2), relief=None, pos=labelPos, text='test', text_fg=(0.5, 0.5, 0.5, 1), text_scale=0.1, text_align=TextNode.ACenter, text_font=ToontownGlobals.getSignFont(), text_pos=(0, -0.035, 0))
newLabel.wrtReparentTo(self.resultFrame)
newLabel.hide()
self.avVotesLabel[index] = newLabel
matchingGameGui.removeNode()
self.curArrowSfxIndex = 0
self.upArrowSfx = []
self.downArrowSfx = []
for i in range(5):
self.upArrowSfx.append(base.loader.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_blue_arrow.ogg'))
self.downArrowSfx.append(base.loader.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_red_arrow.ogg'))
self.winVoteSfx = base.loader.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_win_vote.ogg')
self.noVoteSfx = base.loader.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_square_no_vote_1.ogg')
self.loseVoteSfx = base.loader.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_lose_vote.ogg')
self.localAvatarWon = False
self.localAvatarLost = False
localIndex = self.avIdList.index(base.localAvatar.doId)
localDirection = self.directions[localIndex]
localVotes = self.votes[localIndex]
if localVotes:
if localDirection == self.directionToGo:
if not TravelGameGlobals.ReverseWin:
self.localAvatarWon = True
else:
self.localAvatarLost = True
elif not TravelGameGlobals.ReverseWin:
self.localAvatarLost = True
else:
self.localAvatarWon = True
return
def getRowPos(self, place):
return Point3(-0.72, -0.01, 0.0 - place * 0.1)
def setupResultLabel(self):
reasonStr = ''
if self.directionReason == TravelGameGlobals.ReasonVote:
if self.directionToGo == 0:
losingDirection = 1
else:
losingDirection = 0
diffVotes = self.directionTotals[self.directionToGo] - self.directionTotals[losingDirection]
if diffVotes > 1:
reasonStr = TTLocalizer.TravelGameReasonVotesPlural % {'dir': TTLocalizer.TravelGameDirections[self.directionToGo],
'numVotes': diffVotes}
else:
reasonStr = TTLocalizer.TravelGameReasonVotesSingular % {'dir': TTLocalizer.TravelGameDirections[self.directionToGo],
'numVotes': diffVotes}
elif self.directionReason == TravelGameGlobals.ReasonRandom:
reasonStr = TTLocalizer.TravelGameReasonRandom % {'dir': TTLocalizer.TravelGameDirections[self.directionToGo],
'numVotes': self.directionTotals[self.directionToGo]}
elif self.directionReason == TravelGameGlobals.ReasonPlaceDecider:
reasonStr = TravelGameReasonPlace % {'name': 'TODO NAME',
'dir': TTLocalizer.TravelGameDirections[self.directionToGo]}
self.resultLabel['text'] = reasonStr
self.resultLabel.hide()
def createOnePlayerSequence(self, index, duration):
numVotes = self.votes[index]
direction = self.directions[index]
def ticketTicker(t, label = self.entryList[index][direction + 1], startVotes = 0, endVotes = numVotes):
label['text'] = str(int(t * endVotes + startVotes))
track = Parallel()
startVotes = 0
for prev in range(index):
if self.directions[prev] == direction:
startVotes += self.votes[prev]
def totalTicker(t, label = self.totalVotesLabels[direction], startVotes = startVotes, additionalVotes = numVotes):
label['text'] = str(int(t * additionalVotes + startVotes))
track.append(LerpFunc(totalTicker, duration=duration, name='countTotal %d' % index))
if index in self.avVotesLabel:
def avVotesTicker(t, label = self.avVotesLabel[index], startVotes = 0, endVotes = numVotes, direction = direction):
oldValue = label['text']
newValue = int(t * endVotes + startVotes)
label['text'] = str(newValue)
if not oldValue == label['text']:
if newValue:
if direction == 0:
self.upArrowSfx[self.curArrowSfxIndex].play()
else:
self.downArrowSfx[self.curArrowSfxIndex].play()
self.curArrowSfxIndex += 1
if self.curArrowSfxIndex >= len(self.upArrowSfx):
self.curArrowSfxIndex = 0
label = self.avVotesLabel[index]
track.append(Func(self.avVotesLabel[index].show, name='showName %d' % index))
if index in self.avArrows:
track.append(Func(self.avArrows[index].show, name='showArrow %d' % index))
if direction == 0 and numVotes:
pass
elif direction == 1 and numVotes:
pass
else:
track.append(SoundInterval(self.noVoteSfx))
track.append(LerpFunc(avVotesTicker, duration=duration, name='countAvVotes %d' % index))
return track
def startMovie(self):
self.movie = Sequence()
for index in range(self.numPlayers):
track = self.createOnePlayerSequence(index, 1.25)
self.movie.append(track)
self.movie.append(Wait(0.75))
self.movie.append(Func(self.resultLabel.show))
soundAndWait = Parallel()
soundAndWait.append(Wait(2.0))
if self.localAvatarWon:
soundAndWait.append(SoundInterval(self.winVoteSfx))
elif self.localAvatarLost:
soundAndWait.append(SoundInterval(self.loseVoteSfx, duration=0.43))
self.movie.append(soundAndWait)
self.movie.start()
def destroy(self):
self.movie.finish()
del self.movie
del self.winVoteSfx
del self.noVoteSfx
del self.upArrowSfx
del self.loseVoteSfx
del self.downArrowSfx
DirectFrame.destroy(self)
|
75edd35370ae1d06927e0153b8b16d4fb17c8676
|
4129d5b10c0ac8288db205f91ed45a40b812ef5c
|
/photutils/isophote/harmonics.py
|
c57568d55a1c33c6919dc986c55fba7410bdcd90
|
[
"BSD-3-Clause"
] |
permissive
|
astropy/photutils
|
163762aa560fd13c8a4a49aff2d6b0a522cedbcc
|
a6d629774c52cc82af18d0444c6e5584e5d0b492
|
refs/heads/main
| 2023-09-01T20:51:05.823954
| 2023-08-28T19:35:54
| 2023-08-28T19:35:54
| 2,640,766
| 204
| 130
|
BSD-3-Clause
| 2023-09-13T22:46:41
| 2011-10-25T02:39:28
|
Python
|
UTF-8
|
Python
| false
| false
| 4,261
|
py
|
harmonics.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides tools for computing and fitting harmonic functions.
"""
import numpy as np
__all__ = ['first_and_second_harmonic_function',
'fit_first_and_second_harmonics', 'fit_upper_harmonic']
def _least_squares_fit(optimize_func, parameters):
# call the least squares fitting
# function and handle the result.
from scipy.optimize import leastsq
solution = leastsq(optimize_func, parameters, full_output=True)
if solution[4] > 4:
raise RuntimeError("Error in least squares fit: " + solution[3])
# return coefficients and covariance matrix
return (solution[0], solution[1])
def first_and_second_harmonic_function(phi, c):
r"""
Compute the harmonic function value used to calculate the
corrections for ellipse fitting.
This function includes simultaneously both the first and second
order harmonics:
.. math::
f(phi) = c[0] + c[1]*\sin(phi) + c[2]*\cos(phi) +
c[3]*\sin(2*phi) + c[4]*\cos(2*phi)
Parameters
----------
phi : float or `~numpy.ndarray`
The angle(s) along the elliptical path, going towards the positive
y axis, starting coincident with the position angle. That is, the
angles are defined from the semimajor axis that lies in
the positive x quadrant.
c : `~numpy.ndarray` of shape (5,)
Array containing the five harmonic coefficients.
Returns
-------
result : float or `~numpy.ndarray`
The function value(s) at the given input angle(s).
"""
return (c[0] + c[1] * np.sin(phi) + c[2] * np.cos(phi)
+ c[3] * np.sin(2 * phi) + c[4] * np.cos(2 * phi))
def fit_first_and_second_harmonics(phi, intensities):
r"""
Fit the first and second harmonic function values to a set of
(angle, intensity) pairs.
This function is used to compute corrections for ellipse fitting:
.. math::
f(phi) = y0 + a1*\sin(phi) + b1*\cos(phi) + a2*\sin(2*phi) +
b2*\cos(2*phi)
Parameters
----------
phi : float or `~numpy.ndarray`
The angle(s) along the elliptical path, going towards the positive
y axis, starting coincident with the position angle. That is, the
angles are defined from the semimajor axis that lies in
the positive x quadrant.
intensities : `~numpy.ndarray`
The intensities measured along the elliptical path, at the
angles defined by the ``phi`` parameter.
Returns
-------
y0, a1, b1, a2, b2 : float
The fitted harmonic coefficient values.
"""
a1 = b1 = a2 = b2 = 1.0
def optimize_func(x):
return first_and_second_harmonic_function(
phi, np.array([x[0], x[1], x[2], x[3], x[4]])) - intensities
return _least_squares_fit(optimize_func, [np.mean(intensities), a1, b1,
a2, b2])
def fit_upper_harmonic(phi, intensities, order):
r"""
Fit upper harmonic function to a set of (angle, intensity) pairs.
With ``order`` set to 3 or 4, the resulting amplitudes, divided by
the semimajor axis length and local gradient, measure the deviations
from perfect ellipticity.
The harmonic function that is fit is:
.. math::
y(phi, order) = y0 + An*\sin(order*phi) + Bn*\cos(order*phi)
Parameters
----------
phi : float or `~numpy.ndarray`
The angle(s) along the elliptical path, going towards the positive
y axis, starting coincident with the position angle. That is, the
angles are defined from the semimajor axis that lies in
the positive x quadrant.
intensities : `~numpy.ndarray`
The intensities measured along the elliptical path, at the
angles defined by the ``phi`` parameter.
order : int
The order of the harmonic to be fitted.
Returns
-------
y0, An, Bn : float
The fitted harmonic values.
"""
an = bn = 1.0
def optimize_func(x):
return (x[0] + x[1] * np.sin(order * phi)
+ x[2] * np.cos(order * phi) - intensities)
return _least_squares_fit(optimize_func, [np.mean(intensities), an, bn])
|
aeb2e4abc7a8f50ee7f8cca31fddb8df0b485115
|
99fa07ff170c4b5f880013a58f20a6412bd88dbf
|
/tests/test_loky_module.py
|
b9734eac4322110d120b88ec750b3720181e71a3
|
[
"BSD-3-Clause"
] |
permissive
|
joblib/loky
|
72df8afddfc55a6d2575a13730a1973bd71a49bb
|
05da9a84b6bae8dd4370f553ffcd06df99b54f86
|
refs/heads/master
| 2023-08-23T00:35:06.989283
| 2023-06-29T13:07:28
| 2023-06-29T13:07:28
| 48,578,152
| 244
| 32
|
BSD-3-Clause
| 2023-06-29T13:43:09
| 2015-12-25T11:16:10
|
Python
|
UTF-8
|
Python
| false
| false
| 6,512
|
py
|
test_loky_module.py
|
import multiprocessing as mp
import os
import sys
import shutil
import tempfile
import warnings
from subprocess import check_output
import pytest
import loky
from loky import cpu_count
from loky.backend.context import _cpu_count_user, _MAX_WINDOWS_WORKERS
def test_version():
assert hasattr(
loky, "__version__"
), "There are no __version__ argument on the loky module"
def test_cpu_count():
cpus = cpu_count()
assert type(cpus) is int
assert cpus >= 1
cpus_physical = cpu_count(only_physical_cores=True)
assert type(cpus_physical) is int
assert 1 <= cpus_physical <= cpus
# again to check that it's correctly cached
cpus_physical = cpu_count(only_physical_cores=True)
assert type(cpus_physical) is int
assert 1 <= cpus_physical <= cpus
@pytest.mark.skipif(sys.platform != "win32", reason="Windows specific test")
def test_windows_max_cpu_count():
assert cpu_count() <= _MAX_WINDOWS_WORKERS
cpu_count_cmd = (
"from loky.backend.context import cpu_count;" "print(cpu_count({args}))"
)
def test_cpu_count_os_sched_getaffinity():
if not hasattr(os, "sched_getaffinity") or not hasattr(shutil, "which"):
pytest.skip()
taskset_bin = shutil.which("taskset")
python_bin = shutil.which("python")
if taskset_bin is None or python_bin is None:
raise pytest.skip()
try:
os.sched_getaffinity(0)
except NotImplementedError:
pytest.skip()
res = check_output(
[
taskset_bin,
"-c",
"0",
python_bin,
"-c",
cpu_count_cmd.format(args=""),
],
text=True,
)
res_physical = check_output(
[
taskset_bin,
"-c",
"0",
python_bin,
"-c",
cpu_count_cmd.format(args="only_physical_cores=True"),
],
text=True,
)
assert res.strip() == "1"
assert res_physical.strip() == "1"
def test_cpu_count_psutil_affinity():
psutil = pytest.importorskip("psutil")
p = psutil.Process()
if not hasattr(p, "cpu_affinity"):
pytest.skip("psutil does not provide cpu_affinity on this platform")
original_affinity = p.cpu_affinity()
assert cpu_count() <= len(original_affinity)
try:
new_affinity = original_affinity[:1]
p.cpu_affinity(new_affinity)
assert cpu_count() == 1
finally:
p.cpu_affinity(original_affinity)
def test_cpu_count_cgroup_limit():
if sys.platform == "win32":
pytest.skip()
if not hasattr(shutil, "which"):
pytest.skip()
docker_bin = shutil.which("docker")
if docker_bin is None:
raise pytest.skip("docker is required to run this test")
loky_module_path = os.path.abspath(os.path.dirname(loky.__file__))
loky_project_path = os.path.abspath(
os.path.join(loky_module_path, os.pardir)
)
# The following will always run using the Python 3.7 docker image.
# We mount the loky source as /loky inside the container,
# so it can be imported when running commands under /
# Tell docker to configure the Cgroup quota to use 0.5 CPU, loky will
# always detect 1 CPU because it rounds up to the next integer.
res_500_mCPU = int(
check_output(
f"{docker_bin} run --rm --cpus 0.5 -v {loky_project_path}:/loky python:3.10 "
f"/bin/bash -c 'pip install --quiet -e /loky ; "
f"python -c \"{cpu_count_cmd.format(args='')}\"'",
shell=True,
text=True,
).strip()
)
assert res_500_mCPU == 1
# Limiting to 1.5 CPUs can lead to 1 if there is only 1 CPU on the machine or
# 2 if there are 2 CPUs or more.
res_1500_mCPU = int(
check_output(
f"{docker_bin} run --rm --cpus 1.5 -v {loky_project_path}:/loky python:3.10 "
f"/bin/bash -c 'pip install --quiet -e /loky ; "
f"python -c \"{cpu_count_cmd.format(args='')}\"'",
shell=True,
text=True,
).strip()
)
assert res_1500_mCPU in (1, 2)
# By default there is no limit: use all available CPUs.
res_default = int(
check_output(
f"{docker_bin} run --rm -v {loky_project_path}:/loky python:3.10 "
f"/bin/bash -c 'pip install --quiet -e /loky ; "
f"python -c \"{cpu_count_cmd.format(args='')}\"'",
shell=True,
text=True,
).strip()
)
assert res_default >= res_1500_mCPU
def test_only_physical_cores_error():
# Check the warning issued by cpu_count(only_physical_cores=True) when
# unable to retrieve the number of physical cores.
if sys.platform != "linux":
pytest.skip()
# if number of available cpus is already restricted, cpu_count will return
# that value and no warning is issued even if only_physical_cores == True.
# (tested in another test: test_only_physical_cores_with_user_limitation
cpu_count_mp = mp.cpu_count()
if _cpu_count_user(cpu_count_mp) < cpu_count_mp:
pytest.skip()
with tempfile.TemporaryDirectory() as tmp_dir:
# Write bad lscpu program
lscpu_path = f"{tmp_dir}/lscpu"
with open(lscpu_path, "w") as f:
f.write("#!/bin/sh\n" "exit(1)")
os.chmod(lscpu_path, 0o777)
try:
old_path = os.environ["PATH"]
os.environ["PATH"] = f"{tmp_dir}:{old_path}"
# clear the cache otherwise the warning is not triggered
import loky.backend.context
loky.backend.context.physical_cores_cache = None
with pytest.warns(
UserWarning,
match="Could not find the number of" " physical cores",
):
cpu_count(only_physical_cores=True)
# Should not warn the second time
with warnings.catch_warnings():
warnings.simplefilter("error")
cpu_count(only_physical_cores=True)
finally:
os.environ["PATH"] = old_path
def test_only_physical_cores_with_user_limitation():
# Check that user limitation for the available number of cores is
# respected even if only_physical_cores == True
cpu_count_mp = mp.cpu_count()
cpu_count_user = _cpu_count_user(cpu_count_mp)
if cpu_count_user < cpu_count_mp:
assert cpu_count() == cpu_count_user
assert cpu_count(only_physical_cores=True) == cpu_count_user
|
e2f37ce1e8cdccd53b22ff106364e44163bc8808
|
6ff85b80c6fe1b3ad5416a304b93551a5e80de10
|
/Python/String/GetASCIICode.py
|
9ff9b981a1bbcb2388b380770200278576870628
|
[
"MIT"
] |
permissive
|
maniero/SOpt
|
c600cc2333e0a47ce013be3516bbb8080502ff2a
|
5d17e1a9cbf115eaea6d30af2079d0c92ffff7a3
|
refs/heads/master
| 2023-08-10T16:48:46.058739
| 2023-08-10T13:42:17
| 2023-08-10T13:42:17
| 78,631,930
| 1,002
| 136
|
MIT
| 2023-01-28T12:10:01
| 2017-01-11T11:19:24
|
C#
|
UTF-8
|
Python
| false
| false
| 279
|
py
|
GetASCIICode.py
|
def DeslocaASCII(texto):
novoTexto = ''
for letra in texto:
numero = ord(letra)
novoTexto += chr(numero + (-23 if numero > 87 else 3))
return novoTexto
print(DeslocaASCII('ABCDEFGHIJKLMNOPQRSTUVWXYZ'))
#https://pt.stackoverflow.com/q/253730/101
|
78f5c68ea393218413695b9c061936cb3a9c69f3
|
1fc0cc24dfb0231f35a99ae6ced7b01bb2814830
|
/tests/allure_pytest/acceptance/step/test_step_with_several_step_inside_thread.py
|
88304eeeeb9dafcc67ee944b77e180a393d84f1a
|
[
"Apache-2.0"
] |
permissive
|
allure-framework/allure-python
|
54dd07bbde954c0a75b47443ea5fa892a730da29
|
200b7175218f27fef576d77c2ec791d0ea5ebe90
|
refs/heads/master
| 2023-08-23T09:14:15.292120
| 2023-08-08T16:19:36
| 2023-08-08T16:19:36
| 79,346,720
| 672
| 261
|
Apache-2.0
| 2023-08-25T09:58:13
| 2017-01-18T14:17:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,546
|
py
|
test_step_with_several_step_inside_thread.py
|
from hamcrest import assert_that
from tests.allure_pytest.pytest_runner import AllurePytestRunner
from allure_commons_test.report import has_test_case
from allure_commons_test.result import has_step
def test_step_with_thread(allure_pytest_runner: AllurePytestRunner):
"""
>>> from concurrent.futures import ThreadPoolExecutor
>>> import allure
>>> @allure.step("thread {x}")
... def parallel_step(x=1):
... with allure.step("Sub-step in thread"):
... pass
>>> def test_thread():
... with allure.step("Start in thread"):
... with ThreadPoolExecutor(max_workers=2) as executor:
... executor.map(parallel_step, [1, 2])
"""
allure_results = allure_pytest_runner.run_docstring()
assert_that(
allure_results,
has_test_case(
"test_thread",
has_step(
"Start in thread",
has_step(
"thread 1",
has_step("Sub-step in thread")
),
has_step("thread 2")
)
)
)
def test_step_with_reused_threads(allure_pytest_runner: AllurePytestRunner):
"""
>>> from concurrent.futures import ThreadPoolExecutor
>>> from threading import Event
>>> from random import shuffle
>>> from time import sleep
>>> import allure
>>> def parallel_step(data):
... event, index = data
... with allure.step(f"thread {index}"):
... event.wait()
>>> def __execute_randomly(executor):
... events = [Event() for i in range(1, 4)]
... executor.map(parallel_step, zip(events, range(1, 4)))
... shuffle(events)
... for e in events:
... e.set()
>>> def test_thread():
... with ThreadPoolExecutor(max_workers=2) as executor:
... __execute_randomly(executor)
... with allure.step("Reuse previous threads"):
... with ThreadPoolExecutor(max_workers=2) as executor:
... __execute_randomly(executor)
"""
allure_results = allure_pytest_runner.run_docstring()
assert_that(
allure_results,
has_test_case(
"test_thread",
has_step("thread 1"),
has_step("thread 2"),
has_step("thread 3"),
has_step(
"Reuse previous threads",
has_step("thread 1"),
has_step("thread 2"),
has_step("thread 3"),
)
)
)
|
a305f74daeea8d335e60d414ece09ad2fa1f5d5d
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/braintree/braintree/subscription_gateway.pyi
|
766dc57e6728f98e44895e0649de21ae2646f5ca
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 958
|
pyi
|
subscription_gateway.pyi
|
from typing import Any
from braintree.error_result import ErrorResult as ErrorResult
from braintree.exceptions.not_found_error import NotFoundError as NotFoundError
from braintree.resource import Resource as Resource
from braintree.resource_collection import ResourceCollection as ResourceCollection
from braintree.subscription import Subscription as Subscription
from braintree.successful_result import SuccessfulResult as SuccessfulResult
from braintree.transaction import Transaction as Transaction
class SubscriptionGateway:
gateway: Any
config: Any
def __init__(self, gateway) -> None: ...
def cancel(self, subscription_id): ...
def create(self, params: Any | None = ...): ...
def find(self, subscription_id): ...
def retry_charge(self, subscription_id, amount: Any | None = ..., submit_for_settlement: bool = ...): ...
def search(self, *query): ...
def update(self, subscription_id, params: Any | None = ...): ...
|
5da286def5a0ed836f5e96adec75a4165be06df4
|
f766f1e264361c0518ae18fcc6472bc991482c06
|
/tests/parse_datetime_test.py
|
e40507a8253e4f3c799dfeb25182c0057bd78a0c
|
[
"MIT"
] |
permissive
|
pimutils/khal
|
583c10fe4ac280255bab904f0fe083b150c19a13
|
8fcfc8f34ffa541de4579c6e74da1a3b8cb9905e
|
refs/heads/master
| 2023-08-31T08:53:50.534014
| 2023-08-15T06:46:28
| 2023-08-15T08:50:07
| 12,357,974
| 2,041
| 206
|
MIT
| 2023-09-13T05:41:29
| 2013-08-25T11:22:27
|
Python
|
UTF-8
|
Python
| false
| false
| 21,766
|
py
|
parse_datetime_test.py
|
import datetime as dt
from collections import OrderedDict
import pytest
from freezegun import freeze_time
from khal.exceptions import DateTimeParseError, FatalError
from khal.icalendar import new_vevent
from khal.parse_datetime import (
construct_daynames,
eventinfofstr,
guessdatetimefstr,
guessrangefstr,
guesstimedeltafstr,
timedelta2str,
weekdaypstr,
)
from .utils import (
LOCALE_BERLIN,
LOCALE_FLOATING,
LOCALE_NEW_YORK,
_replace_uid,
normalize_component,
)
def _create_testcases(*cases):
return [(userinput, ('\r\n'.join(output) + '\r\n').encode('utf-8'))
for userinput, output in cases]
def _construct_event(info, locale,
defaulttimelen=60, defaultdatelen=1, description=None,
location=None, categories=None, repeat=None, until=None,
alarm=None, **kwargs):
info = eventinfofstr(' '.join(info), locale,
dt.timedelta(days=1),
dt.timedelta(hours=1),
adjust_reasonably=True,
)
if description is not None:
info["description"] = description
event = new_vevent(
locale=locale, location=location,
categories=categories, repeat=repeat, until=until,
alarms=alarm,
**info)
return event
def _create_vevent(*args):
"""
Adapt and return a default vevent for testing.
Accepts an arbitrary amount of strings like 'DTSTART;VALUE=DATE:2013015'.
Updates the default vevent if the key (the first word) is found and
appends the value otherwise.
"""
def_vevent = OrderedDict(
[('BEGIN', 'BEGIN:VEVENT'),
('SUMMARY', 'SUMMARY:Äwesöme Event'),
('DTSTART', 'DTSTART;VALUE=DATE:20131025'),
('DTEND', 'DTEND;VALUE=DATE:20131026'),
('DTSTAMP', 'DTSTAMP:20140216T120000Z'),
('UID', 'UID:E41JRQX2DB4P1AQZI86BAT7NHPBHPRIIHQKA')])
for row in args:
key = row.replace(':', ';').split(';')[0]
def_vevent[key] = row
def_vevent['END'] = 'END:VEVENT'
return list(def_vevent.values())
class TestTimeDelta2Str:
def test_single(self):
assert timedelta2str(dt.timedelta(minutes=10)) == '10m'
def test_negative(self):
assert timedelta2str(dt.timedelta(minutes=-10)) == '-10m'
def test_days(self):
assert timedelta2str(dt.timedelta(days=2)) == '2d'
def test_multi(self):
assert timedelta2str(
dt.timedelta(days=6, hours=-3, minutes=10, seconds=-3)
) == '5d 21h 9m 57s'
def test_weekdaypstr():
for string, weekdayno in [
('monday', 0),
('tue', 1),
('wednesday', 2),
('thursday', 3),
('fri', 4),
('saturday', 5),
('sun', 6),
]:
assert weekdaypstr(string) == weekdayno
def test_weekdaypstr_invalid():
with pytest.raises(ValueError):
weekdaypstr('foobar')
def test_construct_daynames():
with freeze_time('2016-9-19'):
assert construct_daynames(dt.date(2016, 9, 19)) == 'Today'
assert construct_daynames(dt.date(2016, 9, 20)) == 'Tomorrow'
assert construct_daynames(dt.date(2016, 9, 21)) == 'Wednesday'
class TestGuessDatetimefstr:
@freeze_time('2016-9-19T8:00')
def test_today(self):
assert (dt.datetime(2016, 9, 19, 13), False) == \
guessdatetimefstr(['today', '13:00'], LOCALE_BERLIN)
assert dt.date.today() == guessdatetimefstr(['today'], LOCALE_BERLIN)[0].date()
@freeze_time('2016-9-19T8:00')
def test_tomorrow(self):
assert (dt.datetime(2016, 9, 20, 16), False) == \
guessdatetimefstr('tomorrow 16:00 16:00'.split(), locale=LOCALE_BERLIN)
@freeze_time('2016-9-19T8:00')
def test_time_tomorrow(self):
assert (dt.datetime(2016, 9, 20, 16), False) == \
guessdatetimefstr(
'16:00'.split(), locale=LOCALE_BERLIN, default_day=dt.date(2016, 9, 20))
@freeze_time('2016-9-19T8:00')
def test_time_yesterday(self):
assert (dt.datetime(2016, 9, 18, 16), False) == guessdatetimefstr(
'Yesterday 16:00'.split(),
locale=LOCALE_BERLIN,
default_day=dt.datetime.today())
@freeze_time('2016-9-19')
def test_time_weekday(self):
assert (dt.datetime(2016, 9, 23, 16), False) == guessdatetimefstr(
'Friday 16:00'.split(),
locale=LOCALE_BERLIN,
default_day=dt.datetime.today())
@freeze_time('2016-9-19 17:53')
def test_time_now(self):
assert (dt.datetime(2016, 9, 19, 17, 53), False) == guessdatetimefstr(
'now'.split(), locale=LOCALE_BERLIN, default_day=dt.datetime.today())
@freeze_time('2016-12-30 17:53')
def test_long_not_configured(self):
"""long version is not configured, but short contains the year"""
locale = {
'timeformat': '%H:%M',
'dateformat': '%Y-%m-%d',
'longdateformat': '',
'datetimeformat': '%Y-%m-%d %H:%M',
'longdatetimeformat': '',
}
assert (dt.datetime(2017, 1, 1), True) == guessdatetimefstr(
'2017-1-1'.split(), locale=locale, default_day=dt.datetime.today())
assert (dt.datetime(2017, 1, 1, 16, 30), False) == guessdatetimefstr(
'2017-1-1 16:30'.split(), locale=locale, default_day=dt.datetime.today())
@freeze_time('2016-12-30 17:53')
def test_short_format_contains_year(self):
"""if the non long versions of date(time)format contained a year, the
current year would be used instead of the given one, see #545"""
locale = {
'timeformat': '%H:%M',
'dateformat': '%Y-%m-%d',
'longdateformat': '%Y-%m-%d',
'datetimeformat': '%Y-%m-%d %H:%M',
'longdatetimeformat': '%Y-%m-%d %H:%M',
}
assert (dt.datetime(2017, 1, 1), True) == guessdatetimefstr(
'2017-1-1'.split(), locale=locale, default_day=dt.datetime.today())
assert (dt.datetime(2017, 1, 1, 16, 30), False) == guessdatetimefstr(
'2017-1-1 16:30'.split(), locale=locale, default_day=dt.datetime.today())
class TestGuessTimedeltafstr:
def test_single(self):
assert dt.timedelta(minutes=10) == guesstimedeltafstr('10m')
def test_seconds(self):
assert dt.timedelta(seconds=10) == guesstimedeltafstr('10s')
def test_negative(self):
assert dt.timedelta(minutes=-10) == guesstimedeltafstr('-10m')
def test_multi(self):
assert dt.timedelta(days=1, hours=-3, minutes=10) == \
guesstimedeltafstr(' 1d -3H 10min ')
def test_multi_nospace(self):
assert dt.timedelta(days=1, hours=-3, minutes=10) == \
guesstimedeltafstr('1D-3hour10m')
def test_garbage(self):
with pytest.raises(ValueError):
guesstimedeltafstr('10mbar')
def test_moregarbage(self):
with pytest.raises(ValueError):
guesstimedeltafstr('foo10m')
def test_same(self):
assert dt.timedelta(minutes=20) == \
guesstimedeltafstr('10min 10minutes')
class TestGuessRangefstr:
@freeze_time('2016-9-19')
def test_today(self):
assert (dt.datetime(2016, 9, 19, 13), dt.datetime(2016, 9, 19, 14), False) == \
guessrangefstr('13:00 14:00', locale=LOCALE_BERLIN)
assert (dt.datetime(2016, 9, 19), dt.datetime(2016, 9, 21), True) == \
guessrangefstr('today tomorrow', LOCALE_BERLIN)
@freeze_time('2016-9-19 16:34')
def test_tomorrow(self):
# XXX remove this funtionality, we shouldn't support this anyway
assert (dt.datetime(2016, 9, 19), dt.datetime(2016, 9, 21, 16), True) == \
guessrangefstr('today tomorrow 16:00', locale=LOCALE_BERLIN)
@freeze_time('2016-9-19 13:34')
def test_time_tomorrow(self):
assert (dt.datetime(2016, 9, 19, 16), dt.datetime(2016, 9, 19, 17), False) == \
guessrangefstr('16:00', locale=LOCALE_BERLIN)
assert (dt.datetime(2016, 9, 19, 16), dt.datetime(2016, 9, 19, 17), False) == \
guessrangefstr('16:00 17:00', locale=LOCALE_BERLIN)
def test_start_and_end_date(self):
assert (dt.datetime(2016, 1, 1), dt.datetime(2017, 1, 2), True) == \
guessrangefstr('1.1.2016 1.1.2017', locale=LOCALE_BERLIN)
def test_start_and_no_end_date(self):
assert (dt.datetime(2016, 1, 1), dt.datetime(2016, 1, 2), True) == \
guessrangefstr('1.1.2016', locale=LOCALE_BERLIN)
def test_start_and_end_date_time(self):
assert (dt.datetime(2016, 1, 1, 10), dt.datetime(2017, 1, 1, 22), False) == \
guessrangefstr(
'1.1.2016 10:00 1.1.2017 22:00', locale=LOCALE_BERLIN)
def test_start_and_eod(self):
start, end = dt.datetime(2016, 1, 1, 10), dt.datetime(2016, 1, 1, 23, 59, 59, 999999)
assert (start, end, False) == guessrangefstr('1.1.2016 10:00 eod', locale=LOCALE_BERLIN)
def test_start_and_week(self):
assert (dt.datetime(2015, 12, 28), dt.datetime(2016, 1, 5), True) == \
guessrangefstr('1.1.2016 week', locale=LOCALE_BERLIN)
def test_start_and_delta_1d(self):
assert (dt.datetime(2016, 1, 1), dt.datetime(2016, 1, 2), True) == \
guessrangefstr('1.1.2016 1d', locale=LOCALE_BERLIN)
def test_start_and_delta_3d(self):
assert (dt.datetime(2016, 1, 1), dt.datetime(2016, 1, 4), True) == \
guessrangefstr('1.1.2016 3d', locale=LOCALE_BERLIN)
def test_start_dt_and_delta(self):
assert (dt.datetime(2016, 1, 1, 10), dt.datetime(2016, 1, 4, 10), False) == \
guessrangefstr('1.1.2016 10:00 3d', locale=LOCALE_BERLIN)
def test_start_allday_and_delta_datetime(self):
with pytest.raises(FatalError):
guessrangefstr('1.1.2016 3d3m', locale=LOCALE_BERLIN)
def test_start_zero_day_delta(self):
with pytest.raises(FatalError):
guessrangefstr('1.1.2016 0d', locale=LOCALE_BERLIN)
@freeze_time('20160216')
def test_week(self):
assert (dt.datetime(2016, 2, 15), dt.datetime(2016, 2, 23), True) == \
guessrangefstr('week', locale=LOCALE_BERLIN)
def test_invalid(self):
with pytest.raises(DateTimeParseError):
guessrangefstr('3d', locale=LOCALE_BERLIN)
with pytest.raises(DateTimeParseError):
guessrangefstr('35.1.2016', locale=LOCALE_BERLIN)
with pytest.raises(DateTimeParseError):
guessrangefstr('1.1.2016 2x', locale=LOCALE_BERLIN)
with pytest.raises(DateTimeParseError):
guessrangefstr('1.1.2016x', locale=LOCALE_BERLIN)
with pytest.raises(DateTimeParseError):
guessrangefstr('xxx yyy zzz', locale=LOCALE_BERLIN)
@freeze_time('2016-12-30 17:53')
def test_short_format_contains_year(self):
"""if the non long versions of date(time)format contained a year, the
current year would be used instead of the given one, see #545
same as above, but for guessrangefstr
"""
locale = {
'timeformat': '%H:%M',
'dateformat': '%Y-%m-%d',
'longdateformat': '%Y-%m-%d',
'datetimeformat': '%Y-%m-%d %H:%M',
'longdatetimeformat': '%Y-%m-%d %H:%M',
}
assert (dt.datetime(2017, 1, 1), dt.datetime(2017, 1, 2), True) == \
guessrangefstr('2017-1-1 2017-1-1', locale=locale)
test_set_format_de = _create_testcases(
# all-day-events
# one day only
('25.10.2013 Äwesöme Event',
_create_vevent('DTSTART;VALUE=DATE:20131025',
'DTEND;VALUE=DATE:20131026')),
# 2 day
('15.08.2014 16.08. Äwesöme Event',
_create_vevent('DTSTART;VALUE=DATE:20140815',
'DTEND;VALUE=DATE:20140817')), # XXX
# end date in next year and not specified
('29.12.2014 03.01. Äwesöme Event',
_create_vevent('DTSTART;VALUE=DATE:20141229',
'DTEND;VALUE=DATE:20150104')),
# end date in next year
('29.12.2014 03.01.2015 Äwesöme Event',
_create_vevent('DTSTART;VALUE=DATE:20141229',
'DTEND;VALUE=DATE:20150104')),
# datetime events
# start and end date same, no explicit end date given
('25.10.2013 18:00 20:00 Äwesöme Event',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20131025T180000',
'DTEND;TZID=Europe/Berlin:20131025T200000')),
# start and end date same, ends 24:00 which should be 00:00 (start) of next
# day
('25.10.2013 18:00 24:00 Äwesöme Event',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20131025T180000',
'DTEND;TZID=Europe/Berlin:20131026T000000')),
# start and end date same, explicit end date (but no year) given
('25.10.2013 18:00 26.10. 20:00 Äwesöme Event',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20131025T180000',
'DTEND;TZID=Europe/Berlin:20131026T200000')),
('30.12.2013 18:00 2.1. 20:00 Äwesöme Event',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20131230T180000',
'DTEND;TZID=Europe/Berlin:20140102T200000')),
# only start date given (no year, past day and month)
('25.01. 18:00 20:00 Äwesöme Event',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20150125T180000',
'DTEND;TZID=Europe/Berlin:20150125T200000')),
# date ends next day, but end date not given
('25.10.2013 23:00 0:30 Äwesöme Event',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20131025T230000',
'DTEND;TZID=Europe/Berlin:20131026T003000')),
('2.2. 23:00 0:30 Äwesöme Event',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20150202T230000',
'DTEND;TZID=Europe/Berlin:20150203T003000')),
# only start datetime given
('25.10.2013 06:00 Äwesöme Event',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20131025T060000',
'DTEND;TZID=Europe/Berlin:20131025T070000')),
# timezone given
('25.10.2013 06:00 America/New_York Äwesöme Event',
_create_vevent(
'DTSTART;TZID=America/New_York:20131025T060000',
'DTEND;TZID=America/New_York:20131025T070000'))
)
@freeze_time('20140216T120000')
def test_construct_event_format_de():
for data_list, vevent_expected in test_set_format_de:
vevent = _construct_event(data_list.split(), locale=LOCALE_BERLIN)
assert _replace_uid(vevent).to_ical() == vevent_expected
test_set_format_us = _create_testcases(
('1999/12/31-06:00 Äwesöme Event',
_create_vevent(
'DTSTART;TZID=America/New_York:19991231T060000',
'DTEND;TZID=America/New_York:19991231T070000')),
('2014/12/18 2014/12/20 Äwesöme Event',
_create_vevent('DTSTART;VALUE=DATE:20141218',
'DTEND;VALUE=DATE:20141221')),
)
@freeze_time('2014-02-16 12:00:00')
def test__construct_event_format_us():
for data_list, vevent in test_set_format_us:
event = _construct_event(data_list.split(), locale=LOCALE_NEW_YORK)
assert _replace_uid(event).to_ical() == vevent
test_set_format_de_complexer = _create_testcases(
# now events where the start date has to be inferred, too
# today
('8:00 Äwesöme Event',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20140216T080000',
'DTEND;TZID=Europe/Berlin:20140216T090000')),
# today until tomorrow
('22:00 1:00 Äwesöme Event',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20140216T220000',
'DTEND;TZID=Europe/Berlin:20140217T010000')),
# other timezone
('22:00 1:00 Europe/London Äwesöme Event',
_create_vevent(
'DTSTART;TZID=Europe/London:20140216T220000',
'DTEND;TZID=Europe/London:20140217T010000')),
('15.06. Äwesöme Event',
_create_vevent('DTSTART;VALUE=DATE:20140615',
'DTEND;VALUE=DATE:20140616')),
)
@freeze_time('2014-02-16 12:00:00')
def test__construct_event_format_de_complexer():
for data_list, vevent in test_set_format_de_complexer:
event = _construct_event(data_list.split(), locale=LOCALE_BERLIN)
assert _replace_uid(event).to_ical() == vevent
test_set_leap_year = _create_testcases(
('29.02. Äwesöme Event',
_create_vevent(
'DTSTART;VALUE=DATE:20160229',
'DTEND;VALUE=DATE:20160301',
'DTSTAMP:20160101T202122Z')),
)
def test_leap_year():
for data_list, vevent in test_set_leap_year:
with freeze_time('1999-1-1'):
with pytest.raises(DateTimeParseError):
event = _construct_event(data_list.split(), locale=LOCALE_BERLIN)
with freeze_time('2016-1-1 20:21:22'):
event = _construct_event(data_list.split(), locale=LOCALE_BERLIN)
assert _replace_uid(event).to_ical() == vevent
test_set_description = _create_testcases(
# now events where the start date has to be inferred, too
# today
('8:00 Äwesöme Event :: this is going to be awesome',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20140216T080000',
'DTEND;TZID=Europe/Berlin:20140216T090000',
'DESCRIPTION:this is going to be awesome')),
# today until tomorrow
('22:00 1:00 Äwesöme Event :: Will be even better',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20140216T220000',
'DTEND;TZID=Europe/Berlin:20140217T010000',
'DESCRIPTION:Will be even better')),
('15.06. Äwesöme Event :: and again',
_create_vevent('DTSTART;VALUE=DATE:20140615',
'DTEND;VALUE=DATE:20140616',
'DESCRIPTION:and again')),
)
def test_description():
for data_list, vevent in test_set_description:
with freeze_time('2014-02-16 12:00:00'):
event = _construct_event(data_list.split(), locale=LOCALE_BERLIN)
assert _replace_uid(event).to_ical() == vevent
test_set_repeat_floating = _create_testcases(
# now events where the start date has to be inferred, too
# today
('8:00 Äwesöme Event',
_create_vevent(
'DTSTART:20140216T080000',
'DTEND:20140216T090000',
'DESCRIPTION:please describe the event',
'RRULE:FREQ=DAILY;UNTIL=20150604T000000')))
def test_repeat_floating():
for data_list, vevent in test_set_repeat_floating:
with freeze_time('2014-02-16 12:00:00'):
event = _construct_event(data_list.split(),
description='please describe the event',
repeat='daily',
until='04.06.2015',
locale=LOCALE_FLOATING)
assert normalize_component(_replace_uid(event).to_ical()) == \
normalize_component(vevent)
test_set_repeat_localized = _create_testcases(
# now events where the start date has to be inferred, too
# today
('8:00 Äwesöme Event',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20140216T080000',
'DTEND;TZID=Europe/Berlin:20140216T090000',
'DESCRIPTION:please describe the event',
'RRULE:FREQ=DAILY;UNTIL=20150604T230000Z')))
def test_repeat_localized():
for data_list, vevent in test_set_repeat_localized:
with freeze_time('2014-02-16 12:00:00'):
event = _construct_event(data_list.split(),
description='please describe the event',
repeat='daily',
until='05.06.2015',
locale=LOCALE_BERLIN)
assert normalize_component(_replace_uid(event).to_ical()) == \
normalize_component(vevent)
test_set_alarm = _create_testcases(
('8:00 Äwesöme Event',
['BEGIN:VEVENT',
'SUMMARY:Äwesöme Event',
'DTSTART;TZID=Europe/Berlin:20140216T080000',
'DTEND;TZID=Europe/Berlin:20140216T090000',
'DTSTAMP:20140216T120000Z',
'UID:E41JRQX2DB4P1AQZI86BAT7NHPBHPRIIHQKA',
'DESCRIPTION:please describe the event',
'BEGIN:VALARM',
'ACTION:DISPLAY',
'DESCRIPTION:please describe the event',
'TRIGGER:-PT23M',
'END:VALARM',
'END:VEVENT']))
@freeze_time('2014-02-16 12:00:00')
def test_alarm():
for data_list, vevent in test_set_alarm:
event = _construct_event(data_list.split(),
description='please describe the event',
alarm='23m',
locale=LOCALE_BERLIN)
assert _replace_uid(event).to_ical() == vevent
test_set_description_and_location_and_categories = _create_testcases(
# now events where the start date has to be inferred, too
# today
('8:00 Äwesöme Event',
_create_vevent(
'DTSTART;TZID=Europe/Berlin:20140216T080000',
'DTEND;TZID=Europe/Berlin:20140216T090000',
'CATEGORIES:boring meeting',
'DESCRIPTION:please describe the event',
'LOCATION:in the office')))
@freeze_time('2014-02-16 12:00:00')
def test_description_and_location_and_categories():
for data_list, vevent in test_set_description_and_location_and_categories:
event = _construct_event(data_list.split(),
description='please describe the event',
location='in the office',
categories=['boring meeting'],
locale=LOCALE_BERLIN)
assert _replace_uid(event).to_ical() == vevent
|
4948903651582314a75534da4ac5255ae77d5b20
|
8da41ffa2ccb09e04f95db0f211e0ed69a42a352
|
/courses/machine_learning/deepdive2/end_to_end_ml/labs/serving/application/lib/rsa/randnum.py
|
310acaa62074b42830b020ab3fba76135dcc5a46
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/training-data-analyst
|
808af9b09a0e5f5657c4ca76cdd205f808d76d89
|
975a95032ce5b7012d1772c7f1f5cfe606eae839
|
refs/heads/master
| 2023-09-05T19:50:59.722334
| 2023-09-04T14:25:33
| 2023-09-04T14:25:33
| 56,459,948
| 7,311
| 5,917
|
Apache-2.0
| 2023-09-13T21:45:54
| 2016-04-17T21:39:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,643
|
py
|
randnum.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for generating random numbers."""
# Source inspired by code by Yesudeep Mangalapilly <yesudeep@gmail.com>
import os
from rsa import common, transform
from rsa._compat import byte
def read_random_bits(nbits):
"""Reads 'nbits' random bits.
If nbits isn't a whole number of bytes, an extra byte will be appended with
only the lower bits set.
"""
nbytes, rbits = divmod(nbits, 8)
# Get the random bytes
randomdata = os.urandom(nbytes)
# Add the remaining random bits
if rbits > 0:
randomvalue = ord(os.urandom(1))
randomvalue >>= (8 - rbits)
randomdata = byte(randomvalue) + randomdata
return randomdata
def read_random_int(nbits):
"""Reads a random integer of approximately nbits bits.
"""
randomdata = read_random_bits(nbits)
value = transform.bytes2int(randomdata)
# Ensure that the number is large enough to just fill out the required
# number of bits.
value |= 1 << (nbits - 1)
return value
def read_random_odd_int(nbits):
"""Reads a random odd integer of approximately nbits bits.
>>> read_random_odd_int(512) & 1
1
"""
value = read_random_int(nbits)
# Make sure it's odd
return value | 1
def randint(maxvalue):
"""Returns a random integer x with 1 <= x <= maxvalue
May take a very long time in specific situations. If maxvalue needs N bits
to store, the closer maxvalue is to (2 ** N) - 1, the faster this function
is.
"""
bit_size = common.bit_size(maxvalue)
tries = 0
while True:
value = read_random_int(bit_size)
if value <= maxvalue:
break
if tries % 10 == 0 and tries:
# After a lot of tries to get the right number of bits but still
# smaller than maxvalue, decrease the number of bits by 1. That'll
# dramatically increase the chances to get a large enough number.
bit_size -= 1
tries += 1
return value
|
dbc5b9375a91dad64a55bdedeec3e4a1424eabab
|
7bea5adf7d6284fbad0131d665e957d58adfe7c7
|
/allauth/socialaccount/providers/notion/provider.py
|
77e2bdb67b17a36a415ec1b1a16a112da4304618
|
[
"MIT"
] |
permissive
|
pennersr/django-allauth
|
50c9e71c3666785368e92ed9e19ea0f6a5438cd2
|
6b8911a5ebbabda0d446f2743bd4d00d250ed500
|
refs/heads/main
| 2023-09-03T16:48:10.988418
| 2023-09-02T08:00:53
| 2023-09-02T08:00:53
| 976,994
| 7,719
| 3,481
|
MIT
| 2023-09-14T15:06:57
| 2010-10-10T20:10:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
provider.py
|
from allauth.account.models import EmailAddress
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class NotionAccount(ProviderAccount):
def get_user(self):
return self.account.extra_data["owner"]["user"]
def get_name(self):
return self.get_user()["name"]
def get_avatar_url(self):
return self.get_user()["avatar_url"]
def get_workspace_name(self):
return self.account.extra_data["workspace_name"]
def get_workspace_icon(self):
return self.account.extra_data["workspace_icon"]
def to_str(self):
name = self.get_name()
workspace = self.get_workspace_name()
return f"{name} ({workspace})"
class NotionProvider(OAuth2Provider):
id = "notion"
name = "Notion"
account_class = NotionAccount
def extract_uid(self, data):
"""
The unique identifer for Notion is a combination of the User ID
and the Workspace ID they have authorized the application with.
"""
user_id = data["owner"]["user"]["id"]
workspace_id = data["workspace_id"]
return "user-%s_workspace-%s" % (user_id, workspace_id)
def extract_common_fields(self, data):
user = data["owner"]["user"]
user["email"] = user["person"]["email"]
return user
def extract_email_addresses(self, data):
user = data["owner"]["user"]
email = user["person"]["email"]
return [EmailAddress(email=email, verified=True, primary=True)]
provider_classes = [NotionProvider]
|
9e52814e677ee64a470d6091862ab8f714bf29d4
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/elasticache/get_cluster.py
|
673f34216f7a80b4bfd8829afa9c6b30899c0de6
|
[
"MPL-2.0",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 18,099
|
py
|
get_cluster.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetClusterResult',
'AwaitableGetClusterResult',
'get_cluster',
'get_cluster_output',
]
@pulumi.output_type
class GetClusterResult:
"""
A collection of values returned by getCluster.
"""
def __init__(__self__, arn=None, availability_zone=None, cache_nodes=None, cluster_address=None, cluster_id=None, configuration_endpoint=None, engine=None, engine_version=None, id=None, ip_discovery=None, log_delivery_configurations=None, maintenance_window=None, network_type=None, node_type=None, notification_topic_arn=None, num_cache_nodes=None, parameter_group_name=None, port=None, preferred_outpost_arn=None, replication_group_id=None, security_group_ids=None, security_group_names=None, snapshot_retention_limit=None, snapshot_window=None, subnet_group_name=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if availability_zone and not isinstance(availability_zone, str):
raise TypeError("Expected argument 'availability_zone' to be a str")
pulumi.set(__self__, "availability_zone", availability_zone)
if cache_nodes and not isinstance(cache_nodes, list):
raise TypeError("Expected argument 'cache_nodes' to be a list")
pulumi.set(__self__, "cache_nodes", cache_nodes)
if cluster_address and not isinstance(cluster_address, str):
raise TypeError("Expected argument 'cluster_address' to be a str")
pulumi.set(__self__, "cluster_address", cluster_address)
if cluster_id and not isinstance(cluster_id, str):
raise TypeError("Expected argument 'cluster_id' to be a str")
pulumi.set(__self__, "cluster_id", cluster_id)
if configuration_endpoint and not isinstance(configuration_endpoint, str):
raise TypeError("Expected argument 'configuration_endpoint' to be a str")
pulumi.set(__self__, "configuration_endpoint", configuration_endpoint)
if engine and not isinstance(engine, str):
raise TypeError("Expected argument 'engine' to be a str")
pulumi.set(__self__, "engine", engine)
if engine_version and not isinstance(engine_version, str):
raise TypeError("Expected argument 'engine_version' to be a str")
pulumi.set(__self__, "engine_version", engine_version)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_discovery and not isinstance(ip_discovery, str):
raise TypeError("Expected argument 'ip_discovery' to be a str")
pulumi.set(__self__, "ip_discovery", ip_discovery)
if log_delivery_configurations and not isinstance(log_delivery_configurations, list):
raise TypeError("Expected argument 'log_delivery_configurations' to be a list")
pulumi.set(__self__, "log_delivery_configurations", log_delivery_configurations)
if maintenance_window and not isinstance(maintenance_window, str):
raise TypeError("Expected argument 'maintenance_window' to be a str")
pulumi.set(__self__, "maintenance_window", maintenance_window)
if network_type and not isinstance(network_type, str):
raise TypeError("Expected argument 'network_type' to be a str")
pulumi.set(__self__, "network_type", network_type)
if node_type and not isinstance(node_type, str):
raise TypeError("Expected argument 'node_type' to be a str")
pulumi.set(__self__, "node_type", node_type)
if notification_topic_arn and not isinstance(notification_topic_arn, str):
raise TypeError("Expected argument 'notification_topic_arn' to be a str")
pulumi.set(__self__, "notification_topic_arn", notification_topic_arn)
if num_cache_nodes and not isinstance(num_cache_nodes, int):
raise TypeError("Expected argument 'num_cache_nodes' to be a int")
pulumi.set(__self__, "num_cache_nodes", num_cache_nodes)
if parameter_group_name and not isinstance(parameter_group_name, str):
raise TypeError("Expected argument 'parameter_group_name' to be a str")
pulumi.set(__self__, "parameter_group_name", parameter_group_name)
if port and not isinstance(port, int):
raise TypeError("Expected argument 'port' to be a int")
pulumi.set(__self__, "port", port)
if preferred_outpost_arn and not isinstance(preferred_outpost_arn, str):
raise TypeError("Expected argument 'preferred_outpost_arn' to be a str")
pulumi.set(__self__, "preferred_outpost_arn", preferred_outpost_arn)
if replication_group_id and not isinstance(replication_group_id, str):
raise TypeError("Expected argument 'replication_group_id' to be a str")
pulumi.set(__self__, "replication_group_id", replication_group_id)
if security_group_ids and not isinstance(security_group_ids, list):
raise TypeError("Expected argument 'security_group_ids' to be a list")
pulumi.set(__self__, "security_group_ids", security_group_ids)
if security_group_names and not isinstance(security_group_names, list):
raise TypeError("Expected argument 'security_group_names' to be a list")
pulumi.set(__self__, "security_group_names", security_group_names)
if snapshot_retention_limit and not isinstance(snapshot_retention_limit, int):
raise TypeError("Expected argument 'snapshot_retention_limit' to be a int")
pulumi.set(__self__, "snapshot_retention_limit", snapshot_retention_limit)
if snapshot_window and not isinstance(snapshot_window, str):
raise TypeError("Expected argument 'snapshot_window' to be a str")
pulumi.set(__self__, "snapshot_window", snapshot_window)
if subnet_group_name and not isinstance(subnet_group_name, str):
raise TypeError("Expected argument 'subnet_group_name' to be a str")
pulumi.set(__self__, "subnet_group_name", subnet_group_name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> str:
"""
Availability Zone for the cache cluster.
"""
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter(name="cacheNodes")
def cache_nodes(self) -> Sequence['outputs.GetClusterCacheNodeResult']:
"""
List of node objects including `id`, `address`, `port`, `availability_zone` and `outpost_arn`.
Referenceable e.g., as `${data.aws_elasticache_cluster.bar.cache_nodes.0.address}`
"""
return pulumi.get(self, "cache_nodes")
@property
@pulumi.getter(name="clusterAddress")
def cluster_address(self) -> str:
"""
(Memcached only) DNS name of the cache cluster without the port appended.
"""
return pulumi.get(self, "cluster_address")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> str:
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="configurationEndpoint")
def configuration_endpoint(self) -> str:
"""
(Memcached only) Configuration endpoint to allow host discovery.
"""
return pulumi.get(self, "configuration_endpoint")
@property
@pulumi.getter
def engine(self) -> str:
"""
Name of the cache engine.
"""
return pulumi.get(self, "engine")
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> str:
"""
Version number of the cache engine.
"""
return pulumi.get(self, "engine_version")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipDiscovery")
def ip_discovery(self) -> str:
"""
The IP version advertised in the discovery protocol.
"""
return pulumi.get(self, "ip_discovery")
@property
@pulumi.getter(name="logDeliveryConfigurations")
def log_delivery_configurations(self) -> Sequence['outputs.GetClusterLogDeliveryConfigurationResult']:
"""
Redis [SLOWLOG](https://redis.io/commands/slowlog) or Redis [Engine Log](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Log_Delivery.html#Log_contents-engine-log) delivery settings.
"""
return pulumi.get(self, "log_delivery_configurations")
@property
@pulumi.getter(name="maintenanceWindow")
def maintenance_window(self) -> str:
"""
Specifies the weekly time range for when maintenance
on the cache cluster is performed.
"""
return pulumi.get(self, "maintenance_window")
@property
@pulumi.getter(name="networkType")
def network_type(self) -> str:
"""
The IP versions for cache cluster connections.
"""
return pulumi.get(self, "network_type")
@property
@pulumi.getter(name="nodeType")
def node_type(self) -> str:
"""
The cluster node type.
"""
return pulumi.get(self, "node_type")
@property
@pulumi.getter(name="notificationTopicArn")
def notification_topic_arn(self) -> str:
"""
An ARN of an
SNS topic that ElastiCache notifications get sent to.
"""
return pulumi.get(self, "notification_topic_arn")
@property
@pulumi.getter(name="numCacheNodes")
def num_cache_nodes(self) -> int:
"""
The number of cache nodes that the cache cluster has.
"""
return pulumi.get(self, "num_cache_nodes")
@property
@pulumi.getter(name="parameterGroupName")
def parameter_group_name(self) -> str:
"""
Name of the parameter group associated with this cache cluster.
"""
return pulumi.get(self, "parameter_group_name")
@property
@pulumi.getter
def port(self) -> int:
"""
The port number on which each of the cache nodes will
accept connections.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="preferredOutpostArn")
def preferred_outpost_arn(self) -> str:
"""
The outpost ARN in which the cache cluster was created if created in outpost.
"""
return pulumi.get(self, "preferred_outpost_arn")
@property
@pulumi.getter(name="replicationGroupId")
def replication_group_id(self) -> str:
"""
The replication group to which this cache cluster belongs.
"""
return pulumi.get(self, "replication_group_id")
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> Sequence[str]:
"""
List VPC security groups associated with the cache cluster.
"""
return pulumi.get(self, "security_group_ids")
@property
@pulumi.getter(name="securityGroupNames")
def security_group_names(self) -> Sequence[str]:
"""
List of security group names associated with this cache cluster.
"""
warnings.warn("""With the retirement of EC2-Classic the security_group_names attribute has been deprecated and will be removed in a future version.""", DeprecationWarning)
pulumi.log.warn("""security_group_names is deprecated: With the retirement of EC2-Classic the security_group_names attribute has been deprecated and will be removed in a future version.""")
return pulumi.get(self, "security_group_names")
@property
@pulumi.getter(name="snapshotRetentionLimit")
def snapshot_retention_limit(self) -> int:
"""
The number of days for which ElastiCache will
retain automatic cache cluster snapshots before deleting them.
"""
return pulumi.get(self, "snapshot_retention_limit")
@property
@pulumi.getter(name="snapshotWindow")
def snapshot_window(self) -> str:
"""
Daily time range (in UTC) during which ElastiCache will
begin taking a daily snapshot of the cache cluster.
"""
return pulumi.get(self, "snapshot_window")
@property
@pulumi.getter(name="subnetGroupName")
def subnet_group_name(self) -> str:
"""
Name of the subnet group associated to the cache cluster.
"""
return pulumi.get(self, "subnet_group_name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Tags assigned to the resource
"""
return pulumi.get(self, "tags")
class AwaitableGetClusterResult(GetClusterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetClusterResult(
arn=self.arn,
availability_zone=self.availability_zone,
cache_nodes=self.cache_nodes,
cluster_address=self.cluster_address,
cluster_id=self.cluster_id,
configuration_endpoint=self.configuration_endpoint,
engine=self.engine,
engine_version=self.engine_version,
id=self.id,
ip_discovery=self.ip_discovery,
log_delivery_configurations=self.log_delivery_configurations,
maintenance_window=self.maintenance_window,
network_type=self.network_type,
node_type=self.node_type,
notification_topic_arn=self.notification_topic_arn,
num_cache_nodes=self.num_cache_nodes,
parameter_group_name=self.parameter_group_name,
port=self.port,
preferred_outpost_arn=self.preferred_outpost_arn,
replication_group_id=self.replication_group_id,
security_group_ids=self.security_group_ids,
security_group_names=self.security_group_names,
snapshot_retention_limit=self.snapshot_retention_limit,
snapshot_window=self.snapshot_window,
subnet_group_name=self.subnet_group_name,
tags=self.tags)
def get_cluster(cluster_id: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:
"""
Use this data source to get information about an ElastiCache Cluster
## Example Usage
```python
import pulumi
import pulumi_aws as aws
my_cluster = aws.elasticache.get_cluster(cluster_id="my-cluster-id")
```
:param str cluster_id: Group identifier.
:param Mapping[str, str] tags: Tags assigned to the resource
"""
__args__ = dict()
__args__['clusterId'] = cluster_id
__args__['tags'] = tags
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:elasticache/getCluster:getCluster', __args__, opts=opts, typ=GetClusterResult).value
return AwaitableGetClusterResult(
arn=pulumi.get(__ret__, 'arn'),
availability_zone=pulumi.get(__ret__, 'availability_zone'),
cache_nodes=pulumi.get(__ret__, 'cache_nodes'),
cluster_address=pulumi.get(__ret__, 'cluster_address'),
cluster_id=pulumi.get(__ret__, 'cluster_id'),
configuration_endpoint=pulumi.get(__ret__, 'configuration_endpoint'),
engine=pulumi.get(__ret__, 'engine'),
engine_version=pulumi.get(__ret__, 'engine_version'),
id=pulumi.get(__ret__, 'id'),
ip_discovery=pulumi.get(__ret__, 'ip_discovery'),
log_delivery_configurations=pulumi.get(__ret__, 'log_delivery_configurations'),
maintenance_window=pulumi.get(__ret__, 'maintenance_window'),
network_type=pulumi.get(__ret__, 'network_type'),
node_type=pulumi.get(__ret__, 'node_type'),
notification_topic_arn=pulumi.get(__ret__, 'notification_topic_arn'),
num_cache_nodes=pulumi.get(__ret__, 'num_cache_nodes'),
parameter_group_name=pulumi.get(__ret__, 'parameter_group_name'),
port=pulumi.get(__ret__, 'port'),
preferred_outpost_arn=pulumi.get(__ret__, 'preferred_outpost_arn'),
replication_group_id=pulumi.get(__ret__, 'replication_group_id'),
security_group_ids=pulumi.get(__ret__, 'security_group_ids'),
security_group_names=pulumi.get(__ret__, 'security_group_names'),
snapshot_retention_limit=pulumi.get(__ret__, 'snapshot_retention_limit'),
snapshot_window=pulumi.get(__ret__, 'snapshot_window'),
subnet_group_name=pulumi.get(__ret__, 'subnet_group_name'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_cluster)
def get_cluster_output(cluster_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetClusterResult]:
"""
Use this data source to get information about an ElastiCache Cluster
## Example Usage
```python
import pulumi
import pulumi_aws as aws
my_cluster = aws.elasticache.get_cluster(cluster_id="my-cluster-id")
```
:param str cluster_id: Group identifier.
:param Mapping[str, str] tags: Tags assigned to the resource
"""
...
|
09e77eec51dee7dd35a2a8205f5e23285188e30d
|
dd616b888494f2d848447d360d883c9c9b4bdbf9
|
/cantoolz/modules/vircar/ecu_engine.py
|
20221c36c2435a7910de1738295479f9232738fa
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
CANToolz/CANToolz
|
2b25730770ac6a97c671067bf975a52d58f1066c
|
82d330b835b90598f7289cdfe083f7f66309f915
|
refs/heads/master
| 2023-05-14T14:36:54.135516
| 2021-02-24T07:26:06
| 2021-02-24T07:26:06
| 99,575,229
| 247
| 49
|
NOASSERTION
| 2023-05-07T13:18:09
| 2017-08-07T12:24:35
|
Python
|
UTF-8
|
Python
| false
| false
| 12,965
|
py
|
ecu_engine.py
|
import time
import random
from cantoolz.can import CANMessage
from cantoolz.uds import UDSMessage
from cantoolz.isotp import ISOTPMessage
from cantoolz.module import CANModule
class ecu_engine(CANModule):
name = "Engine emulator for vircar"
help = """
This module emulating car engine.
Init params (example):
{
'id_report': 0x79,
'id_uds': 0x701,
'uds_shift': 0x08,
'id_command': 0x71,
'vin_id': 0x811,
'vin':'NLXXX6666CW006666',
'start_uniq_key':'aXdloDoospOidd78%',
'uds_key':'secret_uds_auth',
'commands': {
'rpm_up':'01',
'rpm_down':'02',
'init': '',
'stop': '00'
}
'reports_delay': 0.6
}
"""
_active = True
def do_init(self, params):
self._status2 = params
self._vin = self._status2.get("vin", "NLXXX6666CW006666")
self._auth = self._status2.get("start_uniq_key", "tGh&ujKnf5$rFgvc1")
self._uds_auth = self._status2.get("uds_key", "secret_uds_auth1")
self._seed = None
self._uds_auth_done = False
self._status2.update({'rpm': 0, 'status': 0})
self.rpm_up = 0
self.rpm_down = 0
self.default = 500
self.current = 0
self.frames = []
self.init_sess = None
self.init_sess2 = None
self._last_sent = time.process_time()
self.vin_gen = [time.process_time(), time.process_time(), 0.7, 2.9]
left = len(self._vin)
count = int(left / 7) + 1
if (left % 7) > 0:
count += 1
curr = 0
self.vin = []
self.count_part = 0
while left > 0:
if curr == 0:
self.vin.append([0, 0, 0, 0, count, 0, 0, left])
curr += 1
else:
part = [ord(byt) for byt in (self._vin[(curr - 1) * 7: (curr - 1) * 7 + 7])]
self.vin.append([curr] + list(part + list(bytes(7 - len(part)))))
left += -7
curr += 1
def generate_rpm(self):
if self._status2['status'] == 1:
self.current += self.rpm_up - self.rpm_down
self._status2['rpm'] = self.current + random.randrange(-20, 20)
if self._status2['rpm'] < 150:
self._status2['status'] = 2 # dead
elif self._status2['rpm'] > 4500:
self._status2['status'] = 2 # also dead
elif self._status2['status'] == 3:
self._status2['rpm'] = 0
elif self._status2['status'] == 2:
self._status2['rpm'] = 0
self.rpm_up = 0
self.rpm_down = 0
if self._status2['status'] in [2, 3, 1]:
if (time.process_time() - self._last_sent) >= self._status2.get('reports_delay', 0.5):
self.frames.append(
CANMessage(
self._status2.get('id_report', 0xffff),
3,
self._status2['status'].to_bytes(1, byteorder='big') + self._status2['rpm'].to_bytes(2, byteorder='big'), False,
CANMessage.DataFrame))
self._last_sent = time.process_time()
def generate_vin(self):
curr_t = time.process_time()
if self.vin_gen[0] == 0 or curr_t - self.vin_gen[0] > self.vin_gen[2]:
if self.vin_gen[1] == 0 or curr_t - self.vin_gen[1] > self.vin_gen[3]:
self.frames.append(CANMessage.init_data(self._status2.get('vin_id', 1), len(self.vin[self.count_part]), self.vin[self.count_part]))
self.count_part += 1
self.vin_gen[1] = curr_t
if self.count_part == len(self.vin):
self.vin_gen[0] = curr_t
self.count_part = 0
# Effect (could be fuzz operation, sniff, filter or whatever)
def do_effect(self, can_msg, args):
if self._status2['status'] > 0:
self.generate_rpm()
self.generate_vin()
if args['action'] == 'read' and can_msg.CANData: # READ
if self._status2['id_command'] == can_msg.CANFrame.frame_id:
for cmd, value in self._status2['commands'].items():
len_cmd = int(len(str(value)) / 2)
if self._status2['status'] != 0 and cmd == "rpm_up":
len_cmd2 = len_cmd + 1
if can_msg.CANFrame.frame_length == len_cmd2 and can_msg.CANFrame.frame_raw_data[0:len_cmd] == bytes.fromhex(value)[0:len_cmd] and self._status2['status'] == 1:
self.rpm_up += ord(can_msg.CANFrame.frame_raw_data[len_cmd:len_cmd2])
elif self._status2['status'] != 0 and cmd == "rpm_down":
len_cmd2 = len_cmd + 1
if can_msg.CANFrame.frame_length == len_cmd2 and can_msg.CANFrame.frame_raw_data[0:len_cmd] == bytes.fromhex(value)[0:len_cmd] and self._status2['status'] == 1:
self.rpm_down += ord(can_msg.CANFrame.frame_raw_data[len_cmd:len_cmd2])
elif self._status2['status'] != 0 and cmd == "stop":
if can_msg.CANFrame.frame_length == len_cmd and can_msg.CANFrame.frame_raw_data[0:len_cmd] == bytes.fromhex(value)[0:len_cmd]:
self._status2['status'] = 3
self.current = 0
elif cmd == "init":
if not self.init_sess:
self.init_sess = ISOTPMessage(can_msg.CANFrame.frame_id)
ret = self.init_sess.add_can(can_msg.CANFrame)
if ret < 0:
self.init_sess = None
elif ret == 1:
if self.init_sess.message_length != 17:
self.init_sess = None
else:
key = self.init_sess.message_data
i = 0
vin_x = ""
for byte_k in key:
vin_x += chr(byte_k ^ ord(self._auth[i])) # XOR with KEY (to get VIN)
i += 1
if vin_x != self._vin: # auth failed, send error back
self.frames.append(
CANMessage(
self._status2.get('id_report', 0xffff),
2,
b'\xff\xff', False,
CANMessage.DataFrame
)
)
else: # Auth complite
self._status2['status'] = 1
self.frames.append(
CANMessage(
self._status2.get('id_report', 0xffff),
2,
b'\xff\x01', False,
CANMessage.DataFrame
)
)
if self.current < 150:
self.current = self.default
self.init_sess = None
elif self._status2['id_uds'] == can_msg.CANFrame.frame_id:
if not self.init_sess2:
self.init_sess2 = ISOTPMessage(can_msg.CANFrame.frame_id)
ret = self.init_sess2.add_can(can_msg.CANFrame)
if ret < 0:
self.init_sess2 = None
elif ret == 1:
uds_msg = UDSMessage(self._status2.get('uds_shift', 8))
uds_msg.add_raw_request(self.init_sess2)
if can_msg.CANFrame.frame_id in uds_msg.sessions:
if 0x27 in uds_msg.sessions[can_msg.CANFrame.frame_id]: # Check service
if 1 in uds_msg.sessions[can_msg.CANFrame.frame_id][0x27]: # Check sub: SEED request
self._seed = [random.randrange(1, 100), random.randrange(1, 100), random.randrange(1, 100), random.randrange(1, 100)]
# Generate UDS reponse
self.frames.extend(uds_msg.add_request(
self._status2['id_uds'] + self._status2['uds_shift'], # ID
0x27 + 0x40, # Service
0x01, # Sub function
self._seed)) # data
elif 2 in uds_msg.sessions[can_msg.CANFrame.frame_id][0x27]: # Check sub: KEY enter
if self._seed is not None:
key_x = ""
key = uds_msg.sessions[can_msg.CANFrame.frame_id][0x27][2]['data'] # Read key
i = 0
for byte_k in key:
key_x += chr(byte_k ^ self._seed[i % 4])
i += 1
if key_x == self._uds_auth:
self._uds_auth_done = True
self.frames.extend(uds_msg.add_request(
self._status2['id_uds'] + self._status2['uds_shift'], # ID
0x27 + 0x40, # Service
0x02, # Sub function
[0xFF])) # data
else:
self._uds_auth_done = False
self.frames.extend(uds_msg.add_request(
self._status2['id_uds'] + self._status2['uds_shift'], # ID
0x27 + 0x40, # Service
0x02, # Sub function
[0x00])) # data
self._seed = None
elif 0x2e in uds_msg.sessions[can_msg.CANFrame.frame_id] and 0x55 in uds_msg.sessions[can_msg.CANFrame.frame_id][0x2e] and uds_msg.sessions[can_msg.CANFrame.frame_id][0x2e][0x55]['data'][0] == 0x55:
if self._uds_auth_done:
new_key = ''.join(chr(x) for x in uds_msg.sessions[can_msg.CANFrame.frame_id][0x2e][0x55]['data'][1:])
if len(new_key) == 17:
self._uds_auth_done = False
self._auth = new_key
self.frames.extend(uds_msg.add_request(
self._status2['id_uds'] + self._status2['uds_shift'], # ID
0x2e + 0x40, # Service
0x55, # Sub function
[0x55])) # data
else:
self._uds_auth_done = False
self.frames.extend(uds_msg.add_request(
self._status2['id_uds'] + self._status2['uds_shift'], # ID
0x2e + 0x40, # Service
0x00, # Sub function
[0x00]))
else:
self._uds_auth_done = False
self.frames.extend(uds_msg.add_request(
self._status2['id_uds'] + self._status2['uds_shift'], # ID
0x2e + 0x40, # Service
0x00, # Sub function
[0x00])) # data
self.init_sess2 = None
elif args['action'] == 'write' and not can_msg.CANData:
if len(self.frames) > 0:
can_msg.CANFrame = self.frames.pop(0)
can_msg.CANData = True
can_msg.bus = self._bus
return can_msg
|
5e5685c019634917a9c0bb3c0ed79fe146f7b013
|
cbc33d3a1e59885077bc8ec95a92127afea3d000
|
/test/test_natural.py
|
6335526f0ae10d2328dfa454415701ef8148eb3b
|
[] |
no_license
|
cortesi/scurve
|
4031f03d131f79021cbc18124334629e9a449709
|
a59e8335c48a7cda7043fbd1b28bcae1abc9645d
|
refs/heads/master
| 2020-10-18T15:05:08.377065
| 2015-03-03T08:59:56
| 2015-03-03T08:59:56
| 455,059
| 373
| 57
| null | 2021-09-22T11:27:53
| 2010-01-01T08:25:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,109
|
py
|
test_natural.py
|
from scurve import natural, utils
import tutils
class TestNatural:
def test_point(self):
tutils.is_complete(natural.Natural(1, 1))
tutils.is_complete(natural.Natural(1, 3))
tutils.is_complete(natural.Natural(2, 3))
tutils.is_complete(natural.Natural(3, 3))
tutils.is_complete(natural.Natural(3, 12))
tutils.is_complete(natural.Natural(4, 3))
tutils.is_complete(natural.Natural(4, 4))
def test_index(self):
tutils.symmetry(natural.Natural(1, 1))
tutils.symmetry(natural.Natural(2, 3))
tutils.symmetry(natural.Natural(2, 4))
tutils.symmetry(natural.Natural(3, 2))
tutils.symmetry(natural.Natural(3, 12))
tutils.symmetry(natural.Natural(4, 4))
def test_fromSize(self):
z = natural.Natural(2, 3)
z2 = natural.Natural.fromSize(2, len(z))
assert z.dimension == z2.dimension
assert z.size == z2.size
z = natural.Natural(3, 256)
z2 = natural.Natural.fromSize(3, len(z))
assert z.dimension == z2.dimension
assert z.size == z2.size
|
173030f097852a80f2fcb67688f5d10b9589a939
|
30c78b7de2d7580916a83507cb01e20432e2ce32
|
/lab_classes/gprs/load_bgplvm_dimension_select.py
|
d1bd9212667829f5218deb739105b4466629310e
|
[
"BSD-3-Clause"
] |
permissive
|
SheffieldML/notebook
|
cc3611117cc7604061758a2bd08f7a5faa96e31e
|
4051844f4b79246c5fd628f6109a551d1c45c863
|
refs/heads/master
| 2023-04-06T07:36:30.839922
| 2023-04-02T20:43:56
| 2023-04-02T20:43:56
| 19,849,949
| 158
| 118
|
BSD-3-Clause
| 2023-04-02T20:43:57
| 2014-05-16T08:38:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
load_bgplvm_dimension_select.py
|
import urllib2, os, sys
model_path = 'digit_bgplvm_demo.pickle' # local name for model file
status = ""
re = 0
if len(sys.argv) == 2:
re = 1
if re or not os.path.exists(model_path): # only download the model new, if it was not already
url = 'http://staffwww.dcs.sheffield.ac.uk/people/J.Hensman/digit_bgplvm_demo.pickle'
with open(model_path, 'w') as f:
u = urllib2.urlopen(url)
f = open(model_path, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s" % (model_path)
file_size_dl = 0
block_sz = 8192
while True:
buff = u.read(block_sz)
if not buff:
break
file_size_dl += len(buff)
f.write(buff)
sys.stdout.write(" "*(len(status)) + "\r")
status = r"{:7.3f}/{:.3f}MB [{: >7.2%}]".format(file_size_dl/(1.*1e6), file_size/(1.*1e6), float(file_size_dl)/file_size)
sys.stdout.write(status)
sys.stdout.flush()
sys.stdout.write(" "*(len(status)) + "\r")
print status
else:
print "Already cached, to reload run with 'reload' as the only argument"
|
2896ed4db7be67ba3eff489d8e54ffb830b53251
|
4594a6e6ad395e253a7d9f77dcd7d2e201ef5519
|
/examples/simulations/regression_sim.py
|
a4126ce95e5b1151b6ee9d4f2fb5bb4d8b3106cb
|
[
"Apache-2.0"
] |
permissive
|
stanfordmlgroup/ngboost
|
eb6e7f0a5c365d037bcbf21e2dcdb1eec419624e
|
00954f31fcc09ee342898fdba5e1e9668739ea60
|
refs/heads/master
| 2023-08-27T17:37:10.749035
| 2023-08-22T23:20:07
| 2023-08-22T23:20:07
| 138,227,534
| 1,543
| 259
|
Apache-2.0
| 2023-09-08T21:29:52
| 2018-06-21T22:22:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,565
|
py
|
regression_sim.py
|
from argparse import ArgumentParser
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import r2_score
from ngboost.distns import Normal
from ngboost.evaluation import *
from ngboost.learners import default_linear_learner
from ngboost.ngboost import NGBoost
if __name__ == "__main__":
argparser = ArgumentParser()
argparser.add_argument("--lr", type=float, default=0.1)
argparser.add_argument("--dataset", type=str, default="simulated")
argparser.add_argument("--noise-lvl", type=float, default=0.25)
argparser.add_argument("--distn", type=str, default="Normal")
argparser.add_argument("--natural", action="store_true")
argparser.add_argument("--score", type=str, default="CRPS")
args = argparser.parse_args()
np.random.seed(123)
m, n = 1200, 50
noise = np.random.randn(*(m, 1))
beta1 = np.random.randn(n, 1)
X = np.random.randn(m, n) / np.sqrt(n)
Y = (X @ beta1 + args.noise_lvl * noise).squeeze()
print(X.shape, Y.shape)
X_train, X_test = (
X[:1000, :],
X[
1000:,
],
)
Y_train, Y_test = Y[:1000], Y[1000:]
ngb = NGBoost(
n_estimators=400,
learning_rate=args.lr,
Dist=Normal,
Base=default_linear_learner,
natural_gradient=args.natural,
minibatch_frac=1.0,
Score=eval(args.score)(),
verbose=True,
verbose_eval=100,
)
losses = ngb.fit(X_train, Y_train)
forecast = ngb.pred_dist(X_test)
print("R2:", r2_score(Y_test, forecast.loc))
|
15ba9467e3ef87716de4173d7e16fa630e8a5e5d
|
36dfd21c845f37d1b01c093715d6688513aec704
|
/src/clusto/test/drivers/resourcetests.py
|
567cbefc83b97b412a9a0a2e8ee826e5e5bfb271
|
[] |
permissive
|
clusto/clusto
|
f992040ef935cc43a9f967d1412888f56ec82f71
|
7ac64e94482cc71075227dacda48953439f46dab
|
refs/heads/master
| 2021-07-15T21:51:23.247277
| 2021-03-13T13:23:54
| 2021-03-13T13:23:54
| 1,432,240
| 246
| 63
|
BSD-3-Clause
| 2021-03-13T13:12:45
| 2011-03-02T20:17:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,816
|
py
|
resourcetests.py
|
from clusto.test import testbase
from clusto.drivers import *
class ResourceManagerTests(testbase.ClustoTestBase):
def testAllocate(self):
rm = ResourceManager('test')
d = Driver('d')
rm.allocate(d, 'foo')
self.assertEqual(rm.owners('foo'), [d])
def testResourceCount(self):
rm = ResourceManager('test')
d = Driver('d')
rm.allocate(d, 'foo')
rm.allocate(d, 'bar')
self.assertEqual(rm.count, 2)
def testDeallocate(self):
rm = ResourceManager('test')
d = Driver('d')
rm.allocate(d, 'foo')
rm.allocate(d, 'bar')
self.assertEqual(rm.count, 2)
rm.deallocate(d, 'foo')
self.assertEqual(rm.count, 1)
self.assertEqual(rm.owners('foo'), [])
rm.deallocate(d, 'bar')
self.assertEqual(rm.count, 0)
self.assertEqual(rm.owners('bar'), [])
def testGeneralDeallocate(self):
rm1 = ResourceManager('test1')
rm2 = ResourceManager('test2')
d = Driver('d')
rm1.allocate(d, 'foo1')
rm1.allocate(d, 'bar1')
rm2.allocate(d, 'foo2')
rm2.allocate(d, 'bar2')
self.assertEqual(rm1.count, 2)
self.assertEqual(rm2.count, 2)
self.assertEqual(sorted([x.value for x in rm1.resources(d)]),
sorted(['foo1', 'bar1', 'foo2', 'bar2']))
rm1.deallocate(d)
self.assertEqual(rm1.count, 0)
self.assertEqual(rm2.count, 2)
rm2.deallocate(d)
self.assertEqual(rm2.count, 0)
self.assertEqual(sorted(ResourceManager.resources(d)),
sorted([]))
def testResourceAttrs(self):
rm = ResourceManager('test')
d = Driver('d')
rm.allocate(d, 'foo')
rm.allocate(d, 'bar')
rm.add_resource_attr(d, 'foo', 'attr1', 10)
self.assertEqual(rm.get_resource_attr_values(d, 'foo', 'attr1'), [10])
rm.add_resource_attr(d, 'foo', 'attr1', 20)
self.assertEqual(sorted(rm.get_resource_attr_values(d, 'foo', 'attr1')),
sorted([10, 20]))
rm.del_resource_attr(d, 'foo', 'attr1')
self.assertEqual(rm.get_resource_attr_values(d, 'foo', 'attr1'), [])
rm.set_resource_attr(d,'bar', 'attr2', 1)
self.assertEqual(rm.get_resource_attr_values(d, 'bar', 'attr2'), [1])
rm.set_resource_attr(d,'bar', 'attr2', 2)
self.assertEqual(rm.get_resource_attr_values(d, 'bar', 'attr2'), [2])
def testReserveResource(self):
rm = ResourceManager('test')
d = Driver('d')
rm.allocate(d, 'foo')
rm.allocate(rm, 'bar')
self.assertRaises(ResourceException, rm.allocate, d, 'bar')
|
6121cbddbf7a357306e9c86063f11e5b742dabd8
|
37e88c82b29bb92819ee7f82d6d24d778f78ab99
|
/core/string_utils.py
|
3826365c9c040180635be16dacdd4a8fe37c313a
|
[
"Apache-2.0"
] |
permissive
|
phage-nz/ph0neutria
|
134fc27b074618bd4b6a7685235ec5dd525eebbb
|
865aae37d8503d3f580f6762aa67f65958355ba7
|
refs/heads/master
| 2022-07-04T15:14:29.136172
| 2020-04-24T23:37:23
| 2020-04-24T23:37:23
| 70,286,606
| 281
| 65
|
Apache-2.0
| 2020-04-12T06:46:57
| 2016-10-07T22:12:20
|
Python
|
UTF-8
|
Python
| false
| false
| 6,722
|
py
|
string_utils.py
|
#!/usr/bin/python3
from .config_utils import get_base_config
from fuzzywuzzy import fuzz
from .log_utils import get_module_logger
from tldextract import extract, TLDExtract
from urllib.parse import urlparse
import Levenshtein
import os
import re
import statistics
import string
import sys
CDIR = os.path.dirname(os.path.realpath(__file__))
ROOTDIR = os.path.abspath(os.path.join(CDIR, os.pardir))
BASECONFIG = get_base_config(ROOTDIR)
LOGGING = get_module_logger(__name__)
SCORE_THRESHOLD_NORMAL = 100
SCORE_THRESHOLD_FAST = 90
def truncate_string(input_string, length):
"""Truncate a string.
Params:
- in_string: (type: string) string to truncate.
- length: (type: int) length of output string.
Returns:
- result: (type: string) truncated string.
"""
return (input_string[:length] +
'..') if len(input_string) > 1024 else input_string
def clean_url(url):
"""Remove extraneous characters from URL.
Params:
- url: (type: string) URL to clean.
Returns:
- url: (type: string) clean URL.
"""
if url is None:
return None
if '??' in url:
url = url.split('??')[0]
if url.endswith('?'):
url = url[:-1]
if '`' in url:
url = url.replace('`', '')
return url
def get_host_from_url(url):
"""Extract the host name from a URL.
Params:
- url: (type: string) URL to parse.
Returns:
- host_name: (type: string) host name.
"""
host_name = urlparse(url).hostname
if ':' in host_name:
host_name = host_name.split(':')[0]
return host_name
def remove_tld(domain):
"""Remove the TLD from a domain name.
Params:
- domain: (type: string) FQDN.
Returns:
- domain: (type: string) FQDN without TLD.
"""
try:
tld = extract(domain).suffix
domain = ''.join(domain.rsplit(tld, 1)).strip('.')
except Exception as e:
LOGGING.warning(
'Error stripping TLD ({0}): {1}'.format(
domain, str(e)))
return domain
def extract_address(input_string):
"""Extracts an IP address from a blob of text.
Params:
- input_string: (type: string) string to parse.
Returns:
- result: (type: string) extracted IP address.
"""
if input_string:
addr_search = re.search(
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(?:/\d{1,2}|)',
input_string)
if bool(addr_search):
return addr_search.group()
return None
def extract_url(input_string):
"""Extracts a URL from a blob of text.
Params:
- input_string: (type: string) string to parse.
Returns:
- result: (type: string) extracted URL.
"""
if input_string:
url_search = re.search(
r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
input_string)
if bool(url_search):
return url_search.group()
return None
def fuzzy_score_string(first_string, second_string):
"""Produce a similarity score for two strings (using Levenshtein distance).
Params:
- first_string: (type: string) first string.
- second_string: (type: string) second string.
Returns:
- result: (type: int) score.
"""
score = 0
if len(first_string) < len(second_string):
shorter, longer = (first_string, second_string)
window_length = len(shorter)
num_iterations = len(longer) - len(shorter) + 1
for position in range(0, num_iterations):
window = longer[position:position + window_length]
l_ratio = Levenshtein.ratio(window, shorter) * 100
if l_ratio > 60:
result = statistics.mean(
[100 - Levenshtein.distance(window, shorter) * 15, l_ratio, l_ratio])
else:
result = l_ratio
if result > score:
score = result
else:
l_ratio = Levenshtein.ratio(first_string, second_string) * 100
score = statistics.mean(
[100 - Levenshtein.distance(first_string, second_string) * 15, l_ratio, l_ratio])
simple = fuzz.ratio(first_string, second_string)
partial = fuzz.partial_ratio(first_string, second_string)
sort = fuzz.token_sort_ratio(first_string, second_string)
set_ratio = fuzz.token_set_ratio(first_string, second_string)
score = max([score, simple, partial, sort, set_ratio])
if score < 75:
score = 0
return score * 0.85
def score_match(first_string, second_string, domain_score=False):
"""Produce a similarity score for two strings.
Params:
- first_string: (type: string) first string.
- second_string: (type: string) second string.
- domain_score: (type: bool) whether the comparison is of two domains.
Returns:
- result: (type: int) score.
"""
score = 0
if first_string == second_string:
return SCORE_THRESHOLD_NORMAL
if domain_score:
if remove_tld(first_string) == remove_tld(second_string):
return SCORE_THRESHOLD_NORMAL
if second_string in first_string:
return SCORE_THRESHOLD_NORMAL
if domain_score:
first_string = remove_tld(first_string)
second_string = remove_tld(second_string)
l_distance = Levenshtein.distance(first_string, second_string)
fuzz_ratio = fuzz.token_sort_ratio(first_string, second_string)
if l_distance <= 2:
score = 50 + 25 * (2 - l_distance)
elif fuzz_ratio > 80:
score = fuzz_ratio - 25
first_len = len(first_string)
second_len = len(second_string)
if first_len > second_len / 2 and first_len > 4:
score += fuzzy_score_string(first_string, second_string)
return score
def similar_string(first_string, second_string):
"""Determine if two strings are similar.
Params:
- first_string: (type: string) first string.
- second_string: (type: string) second string.
Returns:
- result: (type: bool) match result.
"""
score = score_match(first_string, second_string)
if score >= SCORE_THRESHOLD_NORMAL:
return True
return False
def similar_string_fast(first_string, second_string):
"""Determine if two strings are similar (using two most effective methods).
Params:
- first_string: (type: string) first string.
- second_string: (type: string) second string.
Returns:
- result: (type: bool) match result.
"""
partial_score = fuzz.ratio(first_string, second_string)
token_score = fuzz.token_set_ratio(first_string, second_string)
if max(partial_score, token_score) >= SCORE_THRESHOLD_FAST:
return True
return False
|
66a27f7cd68f2d7681553ea9fd4f40caae47c8ee
|
d91d19da3589c3f69a834bbb9834386e80f100e0
|
/datashader/data_libraries/dask_cudf.py
|
9e0f7634d46116463cbdd561670b470e96d52b68
|
[] |
permissive
|
holoviz/datashader
|
11d518371e974c02ba3843871e3e0905e0c83956
|
b510594eb771d14cff3b69efca8ddd37ca3a1046
|
refs/heads/main
| 2023-08-18T13:55:24.214980
| 2023-08-17T08:45:48
| 2023-08-17T08:45:48
| 48,504,165
| 1,040
| 133
|
BSD-3-Clause
| 2023-09-11T09:51:30
| 2015-12-23T18:02:20
|
Python
|
UTF-8
|
Python
| false
| false
| 368
|
py
|
dask_cudf.py
|
from __future__ import annotations
from datashader.data_libraries.dask import dask_pipeline
from datashader.core import bypixel
import dask_cudf
@bypixel.pipeline.register(dask_cudf.DataFrame)
def dask_cudf_pipeline(df, schema, canvas, glyph, summary, *, antialias=False):
return dask_pipeline(df, schema, canvas, glyph, summary, antialias=antialias, cuda=True)
|
0360c3bd38473c122f40a00bf5831bde7f468814
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/core/ext-py3/boto-2.49.0/tests/mturk/test_disable_hit.py
|
2d9bd9bfc00c30086683c94f601a93df3b6bf79b
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 324
|
py
|
test_disable_hit.py
|
from tests.mturk.support import unittest
from common import MTurkCommon
from boto.mturk.connection import MTurkRequestError
class TestDisableHITs(MTurkCommon):
def test_disable_invalid_hit(self):
self.assertRaises(MTurkRequestError, self.conn.disable_hit, 'foo')
if __name__ == '__main__':
unittest.main()
|
c9bb9af526a8d131e9be067a97ce5966f5a7775f
|
db0e49a94c2554ec8853133d09afca65d697eb62
|
/ucp/benchmarks/utils.py
|
d52facc21e5af58d6f267b7f17ddd21168fd997c
|
[
"BSD-3-Clause"
] |
permissive
|
rapidsai/ucx-py
|
415c631039c9c6ceb8d90b04e872d5a61a12eb0f
|
9ba056f9f2b1af169c6312b178e9853b066928bd
|
refs/heads/branch-0.34
| 2023-09-02T21:33:25.839513
| 2023-08-28T13:40:30
| 2023-08-28T13:40:30
| 149,822,197
| 103
| 45
|
BSD-3-Clause
| 2023-09-08T18:41:12
| 2018-09-21T21:53:38
|
Python
|
UTF-8
|
Python
| false
| false
| 11,722
|
py
|
utils.py
|
import asyncio
import json
import logging
import multiprocessing as mp
import os
import pickle
import threading
from types import ModuleType
import numpy as np
from ucp._libs.utils import get_address
logger = logging.getLogger("ucx")
def _ensure_cuda_device(devs, rank):
import numba.cuda
dev_id = devs[rank % len(devs)]
os.environ["CUDA_VISIBLE_DEVICES"] = str(dev_id)
logger.debug(f"{dev_id=}, {rank=}")
numba.cuda.current_context()
def get_allocator(
object_type: str, rmm_init_pool_size: int, rmm_managed_memory: bool
) -> ModuleType:
"""
Initialize and return array-allocator based on arguments passed.
Parameters
----------
object_type: str
The type of object the allocator should return. Options are: "numpy", "cupy"
or "rmm".
rmm_init_pool_size: int
If the object type is "rmm" (implies usage of RMM pool), define the initial
pool size.
rmm_managed_memory: bool
If the object type is "rmm", use managed memory if `True`, or default memory
otherwise.
Returns
-------
A handle to a module, one of ``numpy`` or ``cupy`` (if device memory is requested).
If the object type is ``rmm``, then ``cupy`` is configured to use RMM as an
allocator.
"""
if object_type == "numpy":
import numpy as xp
elif object_type == "cupy":
import cupy as xp
else:
import cupy as xp
import rmm
from rmm.allocators.cupy import rmm_cupy_allocator
rmm.reinitialize(
pool_allocator=True,
managed_memory=rmm_managed_memory,
initial_pool_size=rmm_init_pool_size,
)
xp.cuda.set_allocator(rmm_cupy_allocator)
return xp
async def send_pickled_msg(ep, obj):
msg = pickle.dumps(obj)
await ep.send_obj(msg)
async def recv_pickled_msg(ep):
msg = await ep.recv_obj()
return pickle.loads(msg)
def _server_process(
q,
server_file,
n_workers,
ucx_options_list,
):
import ucp
if ucx_options_list is not None:
ucp.init(ucx_options_list)
import sys
async def run():
lock = threading.Lock()
eps = {}
results = {}
async def server_handler(ep):
worker_rank, worker_ip, worker_port = await recv_pickled_msg(ep)
with lock:
eps[worker_rank] = (worker_ip, worker_port)
while len(eps) != n_workers:
await asyncio.sleep(0.1)
await send_pickled_msg(ep, eps)
worker_results = await recv_pickled_msg(ep)
with lock:
results[worker_rank] = worker_results
lf = ucp.create_listener(server_handler)
if server_file is None:
fp = open(sys.stdout.fileno(), mode="w", closefd=False)
else:
fp = open(server_file, mode="w")
with fp:
json.dump({"address": get_address(), "port": lf.port}, fp)
while len(results) != n_workers:
await asyncio.sleep(0.1)
return results
loop = asyncio.new_event_loop()
ret = loop.run_until_complete(run())
for rank in range(n_workers):
q.put(ret[rank])
def _run_cluster_server(
server_file,
n_workers,
ucx_options_list=None,
):
"""
Create a server that synchronizes workers.
The server will wait for all `n_workers` to connect and communicate their
endpoint information, then send the aggregate information to all workers
so that they will create endpoints to each other, in a fully-connected
network. Each worker will then communicate its result back to the scheduler
which will return that result back to the caller.
Parameters
----------
server_file: str or None
A string containing the path to a file that will be populated to contain
the address and port of the server, or `None` to print that information
to stdout.
num_workers : int
Number of workers in the entire network, required to infer when all
workers have connected and completed.
ucx_options_list: list of dict
Options to pass to UCX when initializing workers, one for each worker.
Returns
-------
return : tuple
A tuple with two elements: the process spawned and a queue where results
will eventually be stored.
"""
q = mp.Queue()
p = mp.Process(
target=_server_process,
args=(
q,
server_file,
n_workers,
ucx_options_list,
),
)
p.start()
return p, q
def run_cluster_server(
server_file,
n_workers,
ucx_options_list=None,
):
"""
Blocking version of `_run_cluster_server()`.
Provides same behavior as `_run_cluster_server()`, except that it will join
processes and thus cause the function to be blocking. It will also combine
the queue as a list with results for each worker in the `[0..n_workers)` range.
"""
p, q = _run_cluster_server(
server_file=server_file,
n_workers=n_workers,
ucx_options_list=ucx_options_list,
)
# Joining the process if the queue is too large (reproducible for more than
# 32 workers) causes the process to hang. We join the queue results in a
# list and return the list instead.
ret = [q.get() for i in range(n_workers)]
p.join()
assert not p.exitcode
return ret
def _worker_process(
queue,
server_info,
num_node_workers,
rank,
ucx_options_list,
ensure_cuda_device,
func,
args,
):
if ensure_cuda_device is True:
_ensure_cuda_device(args.devs, rank % num_node_workers)
import ucp
if ucx_options_list is not None:
ucp.init(ucx_options_list[rank])
async def run():
eps = {}
async def server_handler(ep):
peer_rank = np.empty((1,), dtype=np.uint64)
await ep.recv(peer_rank)
assert peer_rank[0] not in eps
eps[peer_rank[0]] = ep
lf = ucp.create_listener(server_handler)
logger.debug(f"Sending message info to {server_info=}, {rank=}")
server_ep = await ucp.create_endpoint(
server_info["address"], server_info["port"]
)
await send_pickled_msg(server_ep, (rank, get_address(), lf.port))
logger.debug(f"Receiving network info from server {rank=}")
workers_info = await recv_pickled_msg(server_ep)
n_workers = len(workers_info)
logger.debug(f"Creating endpoints to network {rank=}")
for i in range(rank + 1, n_workers):
remote_worker_ip, remote_worker_port = workers_info[i]
eps[i] = await ucp.create_endpoint(remote_worker_ip, remote_worker_port)
await eps[i].send(np.array([rank], dtype=np.uint64))
while len(eps) != n_workers - 1:
await asyncio.sleep(0.1)
logger.debug(f"Running worker {rank=}")
if asyncio.iscoroutinefunction(func):
results = await func(rank, eps, args)
else:
results = func(rank, eps, args)
await send_pickled_msg(server_ep, results)
loop = asyncio.new_event_loop()
ret = loop.run_until_complete(run())
queue.put(ret)
def _run_cluster_workers(
server_info,
num_workers,
num_node_workers,
node_idx,
worker_func,
worker_args=None,
ucx_options_list=None,
ensure_cuda_device=False,
):
"""
Create `n_workers` UCX processes that each run `worker_func`.
Each process will first connect to a server spawned with
`run_cluster_server()` which will synchronize workers across the nodes.
This function is non-blocking and the processes created by this function
call are started but not joined, making this function non-blocking. It's the
user's responsibility to join all processes in the returned list to ensure
their completion.
Parameters
----------
server_info: str or dict
A string containing the path to a file created by `run_cluster_server()`
containing the address and port of the server. Alternatively, a
dictionary containing keys `"address"` and `"port"` may be used the same
way.
num_workers : int
Number of workers in the entire network. Every node must run the same
number of workers, and thus this value should be equal to
`node_num_workers * num_cluster_nodes`.
num_node_workers: int
Number of workers that this node will run.
node_idx: int
Index of the node in the entire cluster, within the range
`[0..num_cluster_nodes)`. This value is used to calculate the rank
of each worker. Each node must have a unique index.
worker_func: callable (can be a coroutine)
Function that each worker executes.
Must have signature: `worker(rank, eps, args)` where
- rank is the worker id
- eps is a dict of ranks to ucx endpoints
- args given here as `worker_args`
worker_args: object
The argument to pass to `worker_func`.
ucx_options_list: list of dict
Options to pass to UCX when initializing workers, one for each worker.
ensure_cuda_device: bool
If `True`, sets the `CUDA_VISIBLE_DEVICES` environment variable to match
the proper CUDA device based on the worker's rank and create the CUDA
context on the corresponding device before calling `import ucp` for the
first time on the newly-spawned worker process, otherwise continues
without modifying `CUDA_VISIBLE_DEVICES` and creating a CUDA context.
Please note that having this set to `False` may cause all workers to use
device 0 and will not ensure proper InfiniBand<->GPU mapping on UCX,
potentially leading to low performance as GPUDirectRDMA will not be
active.
Returns
-------
processes : list
The list of processes spawned (one for each worker).
"""
if isinstance(server_info, str):
with open(server_info, mode="r") as fp:
server_info = json.load(fp)
elif not isinstance(server_info, dict):
raise ValueError(
"server_info must be the path to a server file, or a dictionary "
"with the unpacked values."
)
processes = []
for worker_num in range(num_node_workers):
rank = node_idx * num_node_workers + worker_num
q = mp.Queue()
p = mp.Process(
target=_worker_process,
args=(
q,
server_info,
num_node_workers,
rank,
ucx_options_list,
ensure_cuda_device,
worker_func,
worker_args,
),
)
p.start()
processes.append(p)
return processes
def run_cluster_workers(
server_info,
num_workers,
num_node_workers,
node_idx,
worker_func,
worker_args=None,
ucx_options_list=None,
ensure_cuda_device=False,
):
"""
Blocking version of `_run_cluster_workers()`.
Provides same behavior as `_run_cluster_workers()`, except that it will join
processes and thus cause the function to be blocking.
"""
processes = _run_cluster_workers(
server_info=server_info,
num_workers=num_workers,
num_node_workers=num_node_workers,
node_idx=node_idx,
worker_func=worker_func,
worker_args=worker_args,
ucx_options_list=ucx_options_list,
ensure_cuda_device=ensure_cuda_device,
)
for proc in processes:
proc.join()
assert not proc.exitcode
|
f86976baf2311dd1b10186a1b9a32426e128abe5
|
b26c41926fa3a7c2c061132d80e91a2750f2f468
|
/tensorflow_probability/python/distributions/truncated_normal_test.py
|
cb7019ad5945a33ecdf94a5c1962dbe26e702686
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/probability
|
22e679a4a883e408f8ef237cda56e3e3dfa42b17
|
42a64ba0d9e0973b1707fcd9b8bd8d14b2d4e3e5
|
refs/heads/main
| 2023-09-04T02:06:08.174935
| 2023-08-31T20:30:00
| 2023-08-31T20:31:33
| 108,053,674
| 4,055
| 1,269
|
Apache-2.0
| 2023-09-13T21:49:49
| 2017-10-23T23:50:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 24,194
|
py
|
truncated_normal_test.py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for TruncatedNormal distribution."""
import itertools
import unittest
# Dependency imports
from absl.testing import parameterized
import numpy as np
from scipy import stats as sp_stats
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import normal
from tensorflow_probability.python.distributions import truncated_normal
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.math import gradient
EPSILON = 1e-5
def scipy_trunc_norm_dist(loc, scale, low, high):
"""Construct a scipy.sp_stats.truncnorm for the (scalar) parameters given.
Note: scipy's definition of the parameters is slightly different.
https://github.com/scipy/scipy/issues/7591
Args:
loc: Params describing distribution (doesn't support batch)
scale:
low:
high:
Returns:
scipy frozen distribution.
"""
a = (low - loc) / scale
b = (high - loc) / scale
return sp_stats.truncnorm(a, b, loc=loc, scale=scale)
class _TruncatedNormalTestCase(test_util.TestCase):
def setUp(self):
super(_TruncatedNormalTestCase, self).setUp()
self._rng = np.random.RandomState(42)
def assertAllGreaterEqual(self, a, b):
comparison = a >= b
all_true = np.ones_like(comparison, dtype=np.bool_)
self.assertAllEqual(comparison, all_true)
def assertAllLessEqual(self, a, b):
comparison = a <= b
all_true = np.ones_like(comparison, dtype=np.bool_)
self.assertAllEqual(comparison, all_true)
def assertEmpiricalDistributionsEqual(self, sample_a, sample_b, rtol=1e-6,
atol=1e-6):
"""Assert the empirical distribution of two set of samples is similar.
Args:
sample_a: Flat numpy array of samples from dist a.
sample_b: Flat numpy array of samples from dist b.
rtol: Relative tolerances in the histogram comparison.
atol: Absolute tolerances in the histogram comparison.
"""
self.assertAllFinite(sample_a)
self.assertAllFinite(sample_b)
lb = np.min([sample_a, sample_b])
ub = np.max([sample_a, sample_b])
hist_a = np.histogram(sample_a, range=(lb, ub), bins=30, density=True)[0]
hist_b = np.histogram(sample_b, range=(lb, ub), bins=30, density=True)[0]
self.assertAllClose(hist_a, hist_b, rtol=rtol, atol=atol)
@test_util.test_all_tf_execution_regimes
class TruncatedNormalStandaloneTestCase(_TruncatedNormalTestCase):
def _testParamShapes(self, desired_shape):
tn_param_shapes = truncated_normal.TruncatedNormal.param_shapes(
desired_shape)
# Check the shapes by comparison with the untruncated Normal.
n_param_shapes = normal.Normal.param_shapes(desired_shape)
self.assertAllEqual(
self.evaluate(tn_param_shapes['loc']),
self.evaluate(n_param_shapes['loc']))
self.assertAllEqual(
self.evaluate(tn_param_shapes['scale']),
self.evaluate(n_param_shapes['scale']))
self.assertAllEqual(
self.evaluate(tn_param_shapes['low']),
self.evaluate(n_param_shapes['loc']))
self.assertAllEqual(
self.evaluate(tn_param_shapes['high']),
self.evaluate(n_param_shapes['loc']))
loc = tf.zeros(tn_param_shapes['loc'])
scale = tf.ones(tn_param_shapes['scale'])
high = tf.ones(tn_param_shapes['high'])
low = tf.zeros(tn_param_shapes['low'])
sample_shape = self.evaluate(
tf.shape(
truncated_normal.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high,
validate_args=True).sample(seed=test_util.test_seed())))
self.assertAllEqual(desired_shape, sample_shape)
def testParamShapes(self):
desired_shape = [10, 3, 4]
self._testParamShapes(desired_shape)
self._testParamShapes(tf.constant(desired_shape))
def testParamStaticShapes(self):
sample_shape = [7]
self._testParamShapes(sample_shape)
self._testParamShapes(tf.TensorShape(sample_shape))
def testShapeWithPlaceholders(self):
if tf.executing_eagerly():
return
loc = tf1.placeholder_with_default(5., shape=None)
scale = tf1.placeholder_with_default([1., 2], shape=None)
ub = tf1.placeholder_with_default([10., 11.], shape=None)
lb = tf1.placeholder_with_default([-1.], shape=None)
dist = truncated_normal.TruncatedNormal(
loc, scale, lb, ub, validate_args=True)
self.assertEqual(dist.batch_shape, tf.TensorShape(None))
self.assertEqual(dist.event_shape, ())
self.assertAllEqual(self.evaluate(dist.event_shape_tensor()), [])
self.assertAllEqual(self.evaluate(dist.batch_shape_tensor()), [2])
self.assertAllEqual(self.evaluate(
dist.sample(5, seed=test_util.test_seed())).shape, [5, 2])
ub = tf1.placeholder_with_default([[5., 11.]], shape=None)
dist = truncated_normal.TruncatedNormal(
loc, scale, lb, ub, validate_args=True)
self.assertAllEqual(self.evaluate(
dist.sample(5, seed=test_util.test_seed())).shape, [5, 1, 2])
def testBatchSampling(self):
"""Check (empirically) the different parameters in a batch are respected.
"""
n = int(1e5)
lb = [[-1.0, 9.0], [0., 8.]]
ub = [[1.0, 11.0], [5., 20.]]
dist = truncated_normal.TruncatedNormal(
loc=[[0., 10.], [0., 10.]],
scale=[[1., 1.], [5., 5.]],
low=lb,
high=ub,
validate_args=True)
x = self.evaluate(dist.sample(n, seed=test_util.test_seed()))
self.assertEqual(x.shape, (n, 2, 2))
means = np.mean(x, axis=0)
var = np.var(x, axis=0)
self.assertAllClose(
means, [[0., 10.], [2.299, 12.48]], rtol=1e-2, atol=1e-2)
self.assertAllClose(var, [[0.29, 0.29], [1.99, 8.74]], rtol=1e-2, atol=1e-2)
empirical_lb = np.min(x, axis=0)
self.assertAllClose(empirical_lb, lb, atol=0.1)
empirical_ub = np.max(x, axis=0)
self.assertAllClose(empirical_ub, ub, atol=0.1)
@parameterized.parameters(
(0., 1., -1., 1.),
(1., 1., 0., 2.),
(-0.5, 0.5, -0.9, -0.4),
(10., 3.0, 9.9, 25.),
(2., 1.5, 0.1, 1.9))
def testMomentsEmpirically(self, loc, scale, low, high):
n = int(2e5)
dist = truncated_normal.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high, validate_args=True)
x = self.evaluate(dist.sample(n, seed=test_util.test_seed()))
empirical_mean = np.mean(x)
empirical_var = np.var(x)
expected_mean = self.evaluate(dist.mean())
expected_var = self.evaluate(dist.variance())
self.assertAlmostEqual(expected_mean, empirical_mean, places=1)
self.assertAlmostEqual(expected_var, empirical_var, places=1)
def testNegativeSigmaFails(self):
with self.assertRaisesOpError('`scale` must be positive'):
dist = truncated_normal.TruncatedNormal(
loc=0., scale=-0.1, low=-1.0, high=1.0, validate_args=True)
self.evaluate(dist.mean())
def testIncorrectBoundsFails(self):
with self.assertRaisesOpError('`low >= high`'):
dist = truncated_normal.TruncatedNormal(
loc=0., scale=0.1, low=1.0, high=-1.0, validate_args=True)
self.evaluate(dist.mean())
with self.assertRaisesOpError('`low >= high`'):
dist = truncated_normal.TruncatedNormal(
loc=0., scale=0.1, low=1.0, high=1.0, validate_args=True)
self.evaluate(dist.mean())
def testAssertValidSample(self):
dist = truncated_normal.TruncatedNormal(
loc=0., scale=2., low=-4., high=3., validate_args=True)
with self.assertRaisesOpError('must be greater than or equal to `low`'):
self.evaluate(dist.cdf([-4.2, 1.7, 2.3]))
with self.assertRaisesOpError('must be less than or equal to `high`'):
self.evaluate(dist.survival_function([2.3, -3.2, 4.]))
def testLogPdfAtBoundary(self):
dist = truncated_normal.TruncatedNormal(
loc=[-2., 3.], scale=1., low=-4., high=2., validate_args=True)
log_pdf_at_boundary = self.evaluate(dist.log_prob([[-4.], [2.]]))
self.assertTrue(np.isfinite(log_pdf_at_boundary).all())
def testTruncatedAtTail(self):
dist = truncated_normal.TruncatedNormal(
loc=0., scale=1., low=13., high=15., validate_args=True)
sp_dist = scipy_trunc_norm_dist(0., 1., 13., 15.)
actual_log_prob = self.evaluate(dist.log_prob(14.))
self.assertTrue(np.isfinite(actual_log_prob))
expected_log_prob = sp_dist.logpdf(14.)
self.assertAlmostEqual(actual_log_prob, expected_log_prob, places=4)
actual_cdf = self.evaluate(dist.cdf(14.))
self.assertAlmostEqual(actual_cdf, 1., places=4)
actual_log_cdf = self.evaluate(dist.log_cdf(14.))
self.assertAlmostEqual(actual_log_cdf, 0., places=4)
def testNegativeSigmaFailsVarAssignment(self):
dist = truncated_normal.TruncatedNormal(
loc=0., scale=tf.Variable(0.1), low=-1.0, high=1.0, validate_args=True)
self.evaluate([v.initializer for v in dist.variables])
self.evaluate(dist.mean())
with tf.control_dependencies([dist.scale.assign(-.1)]):
with self.assertRaisesOpError('`scale` must be positive'):
self.evaluate(dist.mean())
def testIncorrectBoundsFailsVarAssignment(self):
# low is var
dist = truncated_normal.TruncatedNormal(
loc=0., scale=0.1, low=tf.Variable(-1.), high=-.5, validate_args=True)
self.evaluate([v.initializer for v in dist.variables])
self.evaluate(dist.mean())
with tf.control_dependencies([dist.low.assign(-.1)]):
with self.assertRaisesOpError('`low >= high`'):
self.evaluate(dist.mean())
# high is var
dist = truncated_normal.TruncatedNormal(
loc=0., scale=0.1, low=-1., high=tf.Variable(-.5), validate_args=True)
self.evaluate([v.initializer for v in dist.variables])
self.evaluate(dist.mean())
with tf.control_dependencies([dist.high.assign(-1.1)]):
with self.assertRaisesOpError('`low >= high`'):
self.evaluate(dist.mean())
# both are vars
dist = truncated_normal.TruncatedNormal(
loc=0.,
scale=0.1,
low=tf.Variable(-1.),
high=tf.Variable(-.5),
validate_args=True)
self.evaluate([v.initializer for v in dist.variables])
self.evaluate(dist.mean())
with tf.control_dependencies([dist.high.assign(-1.)]):
with self.assertRaisesOpError('`low >= high`'):
self.evaluate(dist.mean())
@parameterized.parameters(
(0., 1., -1., 1.),
(1., 1., 0., 2.),
(-0.5, 0.5, -0.9, -0.4),
(10., 3.0, 9.9, 25.),
(2., 1.5, 0.1, 1.9),
(-2., 0.2, -1.5, -0.5))
def testMode(self, loc, scale, low, high):
dist = truncated_normal.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high, validate_args=True)
mode = self.evaluate(dist.mode()).item()
if loc < low:
expected_mode = low
elif loc > high:
expected_mode = high
else:
expected_mode = loc
self.assertAlmostEqual(mode, expected_mode)
@test_util.numpy_disable_gradient_test
@parameterized.parameters((np.float32), (np.float64))
def testReparametrizable(self, dtype=np.float32):
loc = dtype(0.1)
scale = dtype(1.1)
low = dtype(-10.0)
high = dtype(5.0)
def f(loc, scale, low, high):
dist = truncated_normal.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high, validate_args=True)
n = int(2e5)
return tf.reduce_mean(
tf.abs(dist.sample(n, seed=test_util.test_seed())))
err = self.compute_max_gradient_error(f, [loc, scale, low, high], delta=0.1)
# These gradients are noisy due to sampling.
self.assertLess(err, 0.05)
@test_util.numpy_disable_gradient_test
def testReparametrizableBatch(self):
def samples_sum(loc):
dist = truncated_normal.TruncatedNormal(
loc=loc, scale=1., low=-1., high=1., validate_args=True)
return tf.reduce_sum(dist.sample(100, seed=test_util.test_seed()))
loc = tf.constant([0., 1.])
_, dy_loc = self.evaluate(gradient.value_and_gradient(samples_sum, loc))
self.assertAllGreaterEqual(dy_loc, 0.)
@test_util.numpy_disable_gradient_test
@parameterized.parameters(
itertools.product((np.float32, np.float64),
('prob', 'log_prob', 'cdf', 'log_cdf',
'survival_function', 'log_survival_function'))
)
def testGradientsFx(self, dtype, fn_name):
if not tf.executing_eagerly(): return
loc = dtype(0.1)
scale = dtype(3.0)
low = dtype(-10.0)
high = dtype(5.0)
x = np.array([-1.0, 0.01, 0.1, 1., 4.9]).astype(dtype).reshape((5, 1))
def f(loc, scale):
dist = truncated_normal.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high, validate_args=True)
func = getattr(dist, fn_name)
return tf.reduce_mean(func(x))
err = self.compute_max_gradient_error(f, [loc, scale])
self.assertLess(err, 1e-2)
@test_util.numpy_disable_gradient_test
@parameterized.parameters(
itertools.product((np.float32, np.float64),
('entropy', 'mean', 'variance', 'mode'))
)
def testGradientsNx(self, dtype, fn_name):
loc = dtype(0.1)
scale = dtype(3.0)
low = dtype(-10.0)
high = dtype(5.0)
def f(loc, scale):
dist = truncated_normal.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high, validate_args=True)
func = getattr(dist, fn_name)
return func()
if fn_name not in ['mode']:
err = self.compute_max_gradient_error(f, [loc, scale])
self.assertLess(err, 0.005)
else:
err = self.compute_max_gradient_error(lambda x: f(x, scale), [loc])
self.assertLess(err, 0.005)
def testSupportBijectorOutsideRange(self):
low = np.array([1., 2., 3., -5.]).astype(np.float32)
loc = np.array([4., 4., 4., -2.]).astype(np.float32)
high = np.array([6., 7., 6., 1.]).astype(np.float32)
dist = truncated_normal.TruncatedNormal(
loc, scale=2., low=low, high=high, validate_args=False)
eps = 1e-6
x = np.array([1. - eps, 1.5, 6. + eps, -5. - eps]).astype(np.float32)
bijector_inverse_x = dist.experimental_default_event_space_bijector(
).inverse(x)
self.assertAllNan(self.evaluate(bijector_inverse_x))
def testSampleXLA(self):
self.skip_if_no_xla()
@tf.function(jit_compile=True)
def f(loc):
return truncated_normal.TruncatedNormal(
loc=loc, scale=1., low=-1.,
high=1.).sample([3], seed=test_util.test_seed())
self.evaluate(f(tf.constant(0.2)))
# TODO(b/150161911): reconcile graph- and eager-mode handling of denormal floats
# so that we can re-enable eager mode tests.
@test_util.test_graph_mode_only
class TruncatedNormalTestGraphMode(_TruncatedNormalTestCase):
@test_util.numpy_disable_test_missing_functionality(
'This is a regression test for TF-graph mode.')
@parameterized.named_parameters(
{'testcase_name': '_float32', 'dtype': tf.float32},
{'testcase_name': '_float64', 'dtype': tf.float64})
def testReproduceVmap1(self, dtype):
# Regression test for b/145554459
loc = tf.constant(-200., dtype=dtype)
scale = tf.constant(2.188274e+01, dtype=dtype)
high = tf.constant(113.33857, dtype=dtype)
low = tf.constant(102.94414, dtype=dtype)
# Not validating args b/c the assertions confuse pfor.
dist = truncated_normal.TruncatedNormal(
loc, scale, low, high, validate_args=False)
sample = tf.constant([102.950745, 103.87256, 107.78299], dtype=dtype)
batch_lp = dist.log_prob(sample)
pfor_lp = tf.vectorized_map(dist.log_prob, sample)
batch_lp_, pfor_lp_ = self.evaluate((batch_lp, pfor_lp))
self.assertAllClose(batch_lp_, pfor_lp_, atol=2e-6)
@test_util.numpy_disable_test_missing_functionality(
'This is a regression test for TF-graph mode.')
@parameterized.named_parameters(
{'testcase_name': '_float32', 'dtype': tf.float32},
{'testcase_name': '_float64', 'dtype': tf.float64})
def testReproduceVmap2(self, dtype):
# Regression test for b/150811273
if dtype == np.float32:
raise unittest.SkipTest('b/150811273')
seed = test_util.test_seed()
loc = tf.constant(-12.500191, dtype=dtype)
scale = tf.constant(1e-06, dtype=dtype)
high = tf.constant(-12.502851, dtype=dtype)
low = tf.constant(-187.50009, dtype=dtype)
# Not validating args b/c the assertions confuse pfor.
dist = truncated_normal.TruncatedNormal(
loc, scale, low, high, validate_args=False)
# At the default seed, the sample comes out as [-12.502851 -12.502851
# -12.502851], but that's also weird. At a scale of 1e-6, the samples
# should cluster more tightly around the location, which is -12.500191.
sample = self.evaluate(dist.sample(3, seed=seed))
batch_lp = dist.log_prob(sample)
pfor_lp = tf.vectorized_map(dist.log_prob, tf.convert_to_tensor(sample))
batch_lp_, pfor_lp_ = self.evaluate((batch_lp, pfor_lp))
self.assertAllClose(batch_lp_, pfor_lp_, atol=1e-6)
@test_util.test_all_tf_execution_regimes
@parameterized.parameters(
(0.0, 1.0),
(10.0, 1.0),
(-0.3, 2.0),
(100., 5.0),
)
class TruncatedNormalTestCompareWithNormal(_TruncatedNormalTestCase):
"""Test by comparing TruncatedNormals with wide bounds and unbounded Normal.
"""
def constructDists(self, loc, scale, validate_args=True):
truncated_dist = truncated_normal.TruncatedNormal(
loc=loc,
scale=scale,
low=loc - (10. * scale),
high=loc + (10. * scale),
validate_args=validate_args)
normal_dist = normal.Normal(loc=loc, scale=scale)
return truncated_dist, normal_dist
def testEntropy(self, loc, scale):
truncated_dist, normal_dist = self.constructDists(loc, scale)
self.assertAllClose(
self.evaluate(truncated_dist.entropy()),
self.evaluate(normal_dist.entropy()),
rtol=1e-6, atol=1e-6)
def testSampling(self, loc, scale):
n = 1000000
truncated_dist, normal_dist = self.constructDists(loc, scale)
seed_stream = test_util.test_seed_stream(salt='TruncNormal')
truncated_samples = self.evaluate(
truncated_dist.sample(n, seed=seed_stream())).flatten()
lb = self.evaluate(truncated_dist.low)
ub = self.evaluate(truncated_dist.high)
self.assertAllGreaterEqual(truncated_samples, lb)
self.assertAllLessEqual(truncated_samples, ub)
normal_samples = self.evaluate(normal_dist.sample(
n, seed=seed_stream())).flatten()
# Rejection sample the normal distribution
rejection_samples = normal_samples[normal_samples >= lb]
rejection_samples = rejection_samples[rejection_samples <= ub]
self.assertEmpiricalDistributionsEqual(
truncated_samples, rejection_samples, rtol=1e-2, atol=1e-1)
def testLogProb(self, loc, scale):
truncated_dist, normal_dist = self.constructDists(
loc, scale, validate_args=False)
low = self.evaluate(truncated_dist.low)
high = self.evaluate(truncated_dist.high)
test_x = list(np.float32(np.random.uniform(low, high, 10)))
test_x += [low, high, low + EPSILON, high - EPSILON]
tr_log_prob = self.evaluate(truncated_dist.log_prob(test_x))
n_log_prob = self.evaluate(normal_dist.log_prob(test_x))
self.assertAllClose(tr_log_prob, n_log_prob, rtol=1e-4, atol=1e-4)
no_support_log_prob = self.evaluate(
truncated_dist.log_prob(
np.float32(
[low - EPSILON, high + EPSILON, low - 100., high + 100.]
)))
self.assertAllEqual(no_support_log_prob,
[np.log(0.)] * len(no_support_log_prob))
def testCDF(self, loc, scale):
truncated_dist, normal_dist = self.constructDists(loc, scale)
low = self.evaluate(truncated_dist.low)
high = self.evaluate(truncated_dist.high)
test_x = list(
np.float32(np.random.uniform(low, high, 10)))
test_x += [low, high, low + EPSILON, high - EPSILON]
tr_cdf = self.evaluate(truncated_dist.cdf(test_x))
n_cdf = self.evaluate(normal_dist.cdf(test_x))
self.assertAllClose(tr_cdf, n_cdf, rtol=1e-4, atol=1e-4)
@test_util.test_all_tf_execution_regimes
@parameterized.parameters(
(0., 1., -1., 1.),
(1., 1., 0., 2.),
(-0.5, 0.5, -0.9, -0.4),
(10., 3.0, 9.9, 25.),
(2., 1.5, 0.1, 1.9),
(-2., 0.2, -1.5, -0.5))
class TruncatedNormalTestCompareWithScipy(_TruncatedNormalTestCase):
def constructDists(self, loc, scale, low, high, validate_args=True):
tf_dist = truncated_normal.TruncatedNormal(
loc=loc, scale=scale, low=low, high=high, validate_args=validate_args)
sp_dist = scipy_trunc_norm_dist(loc, scale, low, high)
return tf_dist, sp_dist
@test_util.jax_disable_test_missing_functionality(
'In JAX, truncated_normal samples can fall outside the support.')
def testSampling(self, loc, scale, low, high):
n = int(1000000)
tf_dist, sp_dist = self.constructDists(loc, scale, low, high)
tf_samples = self.evaluate(tf_dist.sample(
n, seed=test_util.test_seed())).flatten()
self.assertAllGreaterEqual(tf_samples, low)
self.assertAllLessEqual(tf_samples, high)
sp_samples = sp_dist.rvs(size=n)
self.assertEmpiricalDistributionsEqual(
tf_samples, sp_samples, atol=0.05, rtol=0.05)
def testEntropy(self, loc, scale, low, high):
tf_dist, sp_dist = self.constructDists(loc, scale, low, high)
self.assertAlmostEqual(
self.evaluate(tf_dist.entropy()), sp_dist.entropy(), places=2)
def testLogProb(self, loc, scale, low, high):
test_x = list(np.float32(np.random.uniform(low, high, 10)))
test_x += [
low, high, low + EPSILON, low - EPSILON, high + EPSILON,
high - EPSILON
]
tf_dist, sp_dist = self.constructDists(
loc, scale, low, high, validate_args=False)
tf_log_prob = self.evaluate(tf_dist.log_prob(test_x))
sp_log_prob = sp_dist.logpdf(test_x)
self.assertAllClose(tf_log_prob, sp_log_prob, rtol=1e-4, atol=1e-4)
def testCDF(self, loc, scale, low, high):
test_x = list(np.float32(np.random.uniform(low, high, 10)))
test_x += [
low, high, low + EPSILON, low - EPSILON, high + EPSILON,
high - EPSILON, low - 100., high + 100.
]
tf_dist, sp_dist = self.constructDists(
loc, scale, low, high, validate_args=False)
tf_cdf = self.evaluate(tf_dist.cdf(test_x))
sp_cdf = sp_dist.cdf(test_x)
self.assertAllClose(tf_cdf, sp_cdf, rtol=1e-4, atol=1e-4)
def testLogCDF(self, loc, scale, low, high):
test_x = list(np.float32(np.random.uniform(low, high, 10)))
test_x += [
low, high, low + 100 * EPSILON, low - EPSILON, high + EPSILON,
high - EPSILON, low - 100., high + 100.
]
tf_dist, sp_dist = self.constructDists(
loc, scale, low, high, validate_args=False)
tf_log_cdf = self.evaluate(tf_dist.log_cdf(test_x))
sp_log_cdf = sp_dist.logcdf(test_x)
self.assertAllClose(tf_log_cdf, sp_log_cdf, rtol=1e-4, atol=1e-4)
def testMoments(self, loc, scale, low, high):
tf_dist, sp_dist = self.constructDists(loc, scale, low, high)
self.assertAlmostEqual(
self.evaluate(tf_dist.mean()), sp_dist.mean(), places=3)
self.assertAlmostEqual(
self.evaluate(tf_dist.variance()), sp_dist.var(), places=3)
def testQuantile(self, loc, scale, low, high):
tf_dist, sp_dist = self.constructDists(loc, scale, low, high)
for q in [0.01, 0.1, 0.5, 0.9, 0.99]:
self.assertAlmostEqual(
self.evaluate(tf_dist.quantile(value=q)), sp_dist.ppf(q=q), places=3)
if __name__ == '__main__':
test_util.main()
|
c7b5441d661c21d320a95c6f2e51875f9176bf18
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayEcoMycarVehicleInfoQueryModel.py
|
9997e6268855b678d39f3c713e831c429cac872d
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,321
|
py
|
AlipayEcoMycarVehicleInfoQueryModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEcoMycarVehicleInfoQueryModel(object):
def __init__(self):
self._plate_no = None
self._vi_id = None
@property
def plate_no(self):
return self._plate_no
@plate_no.setter
def plate_no(self, value):
self._plate_no = value
@property
def vi_id(self):
return self._vi_id
@vi_id.setter
def vi_id(self, value):
self._vi_id = value
def to_alipay_dict(self):
params = dict()
if self.plate_no:
if hasattr(self.plate_no, 'to_alipay_dict'):
params['plate_no'] = self.plate_no.to_alipay_dict()
else:
params['plate_no'] = self.plate_no
if self.vi_id:
if hasattr(self.vi_id, 'to_alipay_dict'):
params['vi_id'] = self.vi_id.to_alipay_dict()
else:
params['vi_id'] = self.vi_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoMycarVehicleInfoQueryModel()
if 'plate_no' in d:
o.plate_no = d['plate_no']
if 'vi_id' in d:
o.vi_id = d['vi_id']
return o
|
40b6efec0cf5906c8278dc10acf47fea507cc60a
|
1ebb431a94bc5afcdde7b3fd50354ae70b4aa556
|
/html5css3/postprocessors.py
|
926a126d2fc51b2e93572ee48633d02900405767
|
[
"MIT"
] |
permissive
|
marianoguerra/rst2html5
|
a2ce9c74b6906639e4f201f4e6aa2d81798c2aca
|
525ed5bade67ca5cf4c7c9e50498896704f439f7
|
refs/heads/master
| 2021-10-26T03:51:03.619384
| 2021-09-28T08:19:09
| 2021-09-28T08:19:09
| 2,613,617
| 107
| 35
|
MIT
| 2021-09-28T08:19:10
| 2011-10-20T14:22:19
|
HTML
|
UTF-8
|
Python
| false
| false
| 13,173
|
py
|
postprocessors.py
|
from __future__ import absolute_import
import os
import sys
import html5css3
import json
from . import html
IS_PY3 = sys.version[0] == '3'
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst import Directive
BASE_PATH = os.path.dirname(__file__)
join_path = os.path.join
if IS_PY3:
def read_file(path):
return open(path).read()
else:
def read_file(path):
return open(path).read().decode('utf-8')
def as_list(val):
"""return a list with val if val is not already a list, val otherwise"""
if isinstance(val, list):
return val
else:
return [val]
def abspath(path):
return join_path(BASE_PATH, path)
def js_fullpath(path, embed=True):
content = read_file(path)
if embed:
return html.Script(content)
else:
return html.Script(src=path)
def js(path, embed=True):
return js_fullpath(abspath(path), embed)
def css(path, embed=True):
content = read_file(abspath(path))
if embed:
return html.Style(content, type="text/css")
else:
return html.Link(href=path, rel="stylesheet", type="text/css")
def pretty_print_code(tree, embed=True, params=None):
head = tree[0]
body = tree[1]
body.append(js(join_path("thirdparty", "prettify.js"), embed))
body.append(html.Script("$(function () { prettyPrint() })"))
langs_str = params.get("langs", "")
langs = [x.strip() for x in langs_str.split(":") if x.strip() != ""]
for lang in langs:
lang_path = join_path("thirdparty", "prettify", "lang-" + lang + ".js")
body.append(js(lang_path, embed))
head.append(css(join_path("thirdparty", "prettify.css")))
def jquery(tree, embed=True, params=None):
body = tree[1]
body.append(js(join_path("thirdparty", "jquery.js"), embed))
def add_class(element, cls_name):
cls = element.get("class", "")
if cls:
cls += " " + cls_name
else:
cls += cls_name
element.set("class", cls)
def deckjs(tree, embed=True, params=None):
head = tree[0]
body = tree[1]
def path(*args):
return join_path("thirdparty", "deckjs", *args)
add_class(body, "deck-container")
for section in tree.findall(".//section"):
add_class(section, "slide")
# Core and extension CSS files
head.append(css(path("core", "deck.core.css"), embed))
head.append(css(path("extensions", "goto", "deck.goto.css"), embed))
head.append(css(path("extensions", "menu", "deck.menu.css"), embed))
head.append(css(path("extensions", "navigation", "deck.navigation.css"), embed))
head.append(css(path("extensions", "status", "deck.status.css"), embed))
# Theme CSS files (menu swaps these out)
head.append(css(path("themes", "style", "web-2.0.css"), embed))
head.append(css(path("themes", "transition", "horizontal-slide.css"), embed))
body.append(js(path("modernizr.custom.js"), embed))
jquery(tree, embed)
# Deck Core and extensions
body.append(js(path("core", "deck.core.js"), embed))
body.append(js(path("extensions", "menu", "deck.menu.js"), embed))
body.append(js(path("extensions", "goto", "deck.goto.js"), embed))
body.append(js(path("extensions", "status", "deck.status.js"), embed))
body.append(js(path("extensions", "navigation", "deck.navigation.js"), embed))
body.append(html.Script("$(function () { $.deck('.slide'); });"))
def add_js(tree, embed=True, params=None):
params = params or {}
paths = as_list(params.get("path", []))
body = tree[1]
for path in paths:
body.append(js_fullpath(path, embed))
def revealjs(tree, embed=True, params=None):
import json
head = tree[0]
body = tree[1]
params = params or {}
theme_name = params.pop("theme", "league") + ".css"
theme_base_dir = params.pop("themepath", None)
printpdf = params.pop("printpdf", False)
def path(*args):
return join_path("thirdparty", "revealjs", *args)
if theme_base_dir:
theme_path = join_path(os.path.expanduser(theme_base_dir), theme_name)
else:
theme_path = path("css", "theme", theme_name)
add_class(body, "reveal")
slides = html.Div(class_="slides")
for item in list(body):
body.remove(item)
slides.append(item)
body.append(slides)
# <link rel="stylesheet" href="css/reveal.css">
# <link rel="stylesheet" href="css/theme/default.css" id="theme">
head.append(css(path("css", "reveal.css"), embed))
head.append(css(theme_path, embed))
if printpdf:
head.append(css(path("css", "print", "pdf.css"), embed))
else:
# Embed print-pdf URL semantics
css_print = read_file(abspath(path("css", "print", "pdf.css")))
# Escape for HTML page
css_print = json.dumps(css_print)
script = html.Script(r"""
(function() {{
if (window.location.search.match( /print-pdf/gi )) {{
var printStyle = document.createElement( 'style' );
printStyle.type = 'text/css';
printStyle.innerHTML = {};
document.getElementsByTagName( 'head' )[0].appendChild( printStyle );
}}
}})();
""".format(css_print))
head.append(script)
# <script src="lib/js/head.min.js"></script>
# <script src="js/reveal.js"></script>
body.append(js(path("lib", "js", "head.min.js"), embed))
body.append(js(path("js", "reveal.js"), embed))
head.append(css("rst2html5-reveal.css", embed))
params['history'] = True
param_s = json.dumps(params)
body.append(
html.Script("$(function () { Reveal.initialize(%s); });" % param_s))
def impressjs(tree, embed=True, params=None):
head = tree[0]
body = tree[1]
def path(*args):
return join_path("thirdparty", "impressjs", *args)
# remove the default style
#head.remove(head.find("./style"))
add_class(body, "impress-not-supported")
failback = html.Div('<div class="fallback-message">' +
'<p>Your browser <b>doesn\'t support the features required</b> by' +
'impress.js, so you are presented with a simplified version of this' +
'presentation.</p>' +
'<p>For the best experience please use the latest <b>Chrome</b>,' +
'<b>Safari</b> or <b>Firefox</b> browser.</p></div>')
slides = html.Div(id="impress")
for item in list(body):
body.remove(item)
slides.append(item)
body.append(slides)
# <script src="js/impress.js"></script>
body.append(js(path("js", "impress.js"), embed))
body.append(html.Script("impress().init();"))
def bootstrap_css(tree, embed=True, params=None):
head = tree[0]
head.append(css(join_path("thirdparty", "bootstrap.css"), embed))
def embed_images(tree, embed=True, params=None):
import base64
for image in tree.findall(".//img"):
path = image.attrib['src']
lowercase_path = path.lower()
if lowercase_path.endswith(".png"):
content_type = "image/png"
elif lowercase_path.endswith(".jpg"):
content_type = "image/jpg"
elif lowercase_path.endswith(".gif"):
content_type = "image/gif"
else:
continue
encoded = base64.b64encode(open(path, 'rb').read()).decode('utf-8')
content = "data:%s;base64,%s" % (content_type, encoded)
image.set('src', content)
def pygmentize(tree, embed=True, params=None):
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
pygments_formatter = HtmlFormatter()
body = tree[1]
def highlight_code(lang, code):
try:
lexer = get_lexer_by_name(lang)
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = get_lexer_by_name('text')
parsed = highlight(code, lexer, pygments_formatter)
return parsed
for block in body.findall(".//pre"):
cls = block.attrib.get('class', '')
classes = cls.split()
if 'code' in classes:
lang_classes = [cls for cls in classes if cls.startswith('lang-')]
if len(lang_classes) > 0:
lang = lang_classes[0][5:]
new_content = highlight_code(lang, block.text)
block.tag = 'div'
block.text = new_content
def mathjax(tree, embed=True, params=None):
body = tree[1]
params = params or {}
config_path = params.get("config")
url = params.get("url", "http://cdn.mathjax.org/mathjax/latest/MathJax.js")
if config_path is None:
content = """
MathJax.Hub.Config({
extensions: ["tex2jax.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'], ["\\(","\\)"] ],
displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
processEscapes: true
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
"""
else:
with open(config_path) as f_in:
content = f_in.read()
body.append(html.Script(content, type="text/x-mathjax-config"))
body.append(html.Script(src=url))
PROCESSORS = [
("mathjax", {
"name": "add mathjax support",
"processor": mathjax
}),
("jquery", {
"name": "add jquery",
"processor": jquery
}),
("pretty_print_code", {
"name": "pretty print code",
"processor": pretty_print_code
}),
("pygments", {
"name": "pygments",
"processor": pygmentize
}),
("deck_js", {
"name": "deck.js",
"processor": deckjs
}),
("reveal_js", {
"name": "reveal.js",
"processor": revealjs
}),
("impress_js", {
"name": "impress.js",
"processor": impressjs
}),
("bootstrap_css", {
"name": "bootstrap css",
"processor": bootstrap_css
}),
("embed_images", {
"name": "embed images",
"processor": embed_images
}),
("add_js", {
"name": "add js files",
"processor": add_js
})
]
class Code(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
has_content = True
def run(self):
language = self.arguments[0]
content = self.content
attrs = {
'class': "code lang-" + language
}
return [nodes.literal_block('', "\n".join(content), **attrs)]
class Slide3D(Directive):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'x': int,
'y': int,
'z': int,
'rotate': int,
'rotate-x': int,
'rotate-y': int,
'scale': int,
'class': directives.unchanged,
'id': directives.unchanged,
'title': directives.unchanged
}
has_content = True
def run(self):
attributes = {}
for key, value in self.options.items():
if key in ('class', 'id', 'title'):
attributes[key] = value
else:
attributes['data-' + key] = value
node = nodes.container(rawsource=self.block_text, **attributes)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Video(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
has_content = False
option_spec = {
'autoplay': bool,
'preload': bool,
'poster': str,
'controls': bool,
'height': int,
'width': int,
'loop': bool,
'muted': bool,
'class': directives.unchanged,
'id': directives.unchanged,
'title': directives.unchanged
}
def run(self):
src = self.arguments[0]
opts = self.options
code = '<video src="%s"' % src
if opts.get('controls'):
code += ' controls="true"'
if opts.get('muted'):
code += ' muted="true"'
if opts.get('loop'):
code += ' loop="true"'
if opts.get('autoplay'):
code += ' autoplay="true"'
preload = opts.get('preload')
width = opts.get('width')
if width is not None:
code += ' width="%s"' % width
height = opts.get('height')
if height is not None:
code += ' height="%s"' % height
poster = opts.get('poster')
if poster is not None:
code += ' poster="%s"' % poster
if preload:
if preload in ['none', 'metadata', 'auto']:
code += ' preload="%s"' % preload
code += '></video>'
return [nodes.raw('', code, format='html')]
directives.register_directive('slide-3d', Slide3D)
directives.register_directive('code-block', Code)
directives.register_directive('video', Video)
|
ec5174ebd89dea3d46453ddc04d7d96ac4e3ced0
|
0c5b9ebee22450c214576f18929436527b26a1b0
|
/starfish/core/util/try_import.py
|
831856330d572e17b2436fb48cab5937b32f5e12
|
[
"MIT"
] |
permissive
|
spacetx/starfish
|
962b4e4a8c0c193acaa84c016a6edaef76c14769
|
853f56c7c02b15397adb921db5e3bde02fdadb63
|
refs/heads/master
| 2023-03-09T13:51:30.772904
| 2022-09-06T22:16:25
| 2022-09-06T22:16:25
| 92,539,237
| 211
| 75
|
MIT
| 2023-02-11T01:52:25
| 2017-05-26T18:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
try_import.py
|
import functools
from typing import Callable, Optional, Set
def try_import(allowable_module_names: Optional[Set[str]]=None) -> Callable:
"""
Decorator to apply to a method. If one of the modules in `allowable_module_names` fail to
import, raise a friendly error message. If `allowable_module_names` is None, then all failed
imports raise a friendly error message.
Enables large and peripheral dependencies to be excluded from the build.
"""
def _try_import_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except ImportError as ex:
module_name = ex.name
if allowable_module_names is None or module_name in allowable_module_names:
raise ImportError(
f"{module_name} is an optional dependency of starfish. Please install "
f"{module_name} and its dependencies to use this functionality."
) from ex
else:
raise
return wrapper
return _try_import_decorator
|
2d9f9b96b8a45a9637b3a38f4785dd209bffdc53
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/0573. Squirrel Simulation/0573.py
|
7a812eb2569b6ac7988830d4be2e0122994e6ddf
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 390
|
py
|
0573.py
|
class Solution:
def minDistance(self, height: int, width: int, tree: List[int], squirrel: List[int], nuts: List[List[int]]) -> int:
def dist(a: List[int], b: List[int]) -> int:
return abs(a[0] - b[0]) + abs(a[1] - b[1])
totDist = sum(dist(nut, tree) for nut in nuts) * 2
maxSave = max(dist(nut, tree) - dist(nut, squirrel) for nut in nuts)
return totDist - maxSave
|
3d0fbe1fa165e834eb10a1ab0162a89459ac8c4b
|
753cd066a9bd26b6c37c8d53a86c7a9c659ec18c
|
/vision/cnns/pytorch-lightning/test/test_notebook.py
|
76e2190e4c65478ea6a0e5cde6ea50edbd1ff8c1
|
[
"MIT"
] |
permissive
|
graphcore/examples
|
ac872015808ed2a913d4d7bf0d63202ce15ebbae
|
e2f834dd60e7939672c1795b4ac62e89ad0bca49
|
refs/heads/master
| 2023-08-05T02:08:12.341836
| 2023-07-27T11:13:10
| 2023-07-27T11:13:10
| 143,977,106
| 311
| 80
|
MIT
| 2023-09-11T16:42:56
| 2018-08-08T07:29:17
|
Python
|
UTF-8
|
Python
| false
| false
| 964
|
py
|
test_notebook.py
|
# Copyright (c) 2022 Graphcore Ltd. All rights reserved.
import pathlib
import nbformat
import pytest
from nbconvert.preprocessors import ExecutePreprocessor
EXAMPLE_ROOT_DIR = pathlib.Path(__file__).parent.parent / "code-examples" / "fashion-mnist"
@pytest.mark.category2
@pytest.mark.ipus(4)
def test_notebook_1():
notebook_filename = EXAMPLE_ROOT_DIR / "fashionmnist.ipynb"
with open(notebook_filename) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name="python3")
ep.preprocess(nb, {"metadata": {"path": f"{EXAMPLE_ROOT_DIR}"}})
@pytest.mark.category2
@pytest.mark.ipus(4)
def test_notebook_2():
notebook_filename = EXAMPLE_ROOT_DIR / "fashionmnist_torchvision.ipynb"
with open(notebook_filename) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name="python3")
ep.preprocess(nb, {"metadata": {"path": f"{EXAMPLE_ROOT_DIR}"}})
|
df796762481ffa5cdcb3fe4f4cf16ffe6355d8ce
|
767dae79df18f9868855774464d08864a1d8629b
|
/protonfixes/gamefixes/493200.py
|
32ed3ca3f92d058a3fa59c60f26238c087f19435
|
[
"BSD-2-Clause"
] |
permissive
|
simons-public/protonfixes
|
05cd9c2c37c35ce56ec4c3cdcdba375c6eadf530
|
681411ba8ceb5d2d790e674eb7a5b98951d426e6
|
refs/heads/master
| 2022-11-16T04:16:32.764931
| 2022-11-15T00:24:24
| 2022-11-15T00:24:24
| 150,211,569
| 245
| 75
|
NOASSERTION
| 2022-11-15T00:24:25
| 2018-09-25T05:20:02
|
Python
|
UTF-8
|
Python
| false
| false
| 765
|
py
|
493200.py
|
""" Game fix for RiME
"""
# pylint: disable=C0103
from protonfixes import util
def main():
""" Install xact and dinput8, override libraries and disable esync
"""
print('Applying fixes for RiME')
# if Proton version older than 3.16-5
if util.protonversion(True) < 1544476838:
# If not already installed, install xact
util.protontricks('xact')
# To fix audio crackling, set xaudio2_7.dll to native
util.winedll_override('xaudio2_7', 'n')
# Gamepad doesn't work properly without dinput8 installed
util.protontricks('dinput8')
# To fix gamepad set dinput8 to native
util.winedll_override('dinput8', 'n')
# disable esync to prevent game crash after a few minutes
util.disable_esync()
|
c44df6f736cfcc6754e904ba4f283ea6d5c05acf
|
9935c1a1142a19d72dd8ca5b8a8ad2a70e2a7edb
|
/Plugins/Aspose_Words_Java_for_Jython/asposewords/quickstart/LoadAndSaveToStream.py
|
a49fe353dd0630bacc74af08a24b5e1949dc00f7
|
[
"MIT"
] |
permissive
|
aspose-words/Aspose.Words-for-Java
|
705ad9a8047b8d9b7986dd5569859af24632afc0
|
2dceb8acb5547bbc0a62c49587b97fd4f3159b36
|
refs/heads/master
| 2023-08-18T08:43:51.900921
| 2023-05-15T13:48:56
| 2023-05-15T13:48:56
| 2,849,872
| 347
| 194
|
MIT
| 2023-02-01T13:45:06
| 2011-11-25T13:43:55
|
Java
|
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
LoadAndSaveToStream.py
|
from asposewords import Settings
from com.aspose.words import Document
from com.aspose.words import SaveFormat
from java.io import ByteArrayOutputStream
from java.io import FileInputStream
from java.io import FileOutputStream
class LoadAndSaveToStream:
def __init__(self):
dataDir = Settings.dataDir + 'quickstart/'
# Open the stream. Read only access is enough for Aspose.Words to load a document.
stream = FileInputStream(dataDir + 'Document.doc')
# Load the entire document into memory.
doc = Document(stream)
# You can close the stream now, it is no longer needed because the document is in memory.
stream.close()
# ... do something with the document
# Convert the document to a different format and save to stream.
dstStream = ByteArrayOutputStream()
doc.save(dstStream, SaveFormat.RTF)
output = FileOutputStream(dataDir + "Document Out.rtf")
output.write(dstStream.toByteArray())
output.close()
print "Document loaded from stream and then saved successfully."
if __name__ == '__main__':
LoadAndSaveToStream()
|
f5118affac77f01d492bc45e8576d4635c201b8e
|
8a4b17f89a8b24e6e1f8073f4df9d3bee7c546ec
|
/misc/refalt-repr-examples.py
|
6caf7f2f07110cf8c05a94939b5056c22b1618f7
|
[
"Apache-2.0"
] |
permissive
|
biocommons/hgvs
|
f8a600f15657b7f6aaa7c913d55d3acc43c1cb51
|
697b32bba2b191c3e10c44d408030927f031c03e
|
refs/heads/main
| 2023-08-19T12:40:15.503258
| 2023-05-23T21:46:06
| 2023-05-23T21:46:06
| 84,496,560
| 228
| 95
|
Apache-2.0
| 2023-09-14T05:01:26
| 2017-03-09T22:58:27
|
Python
|
UTF-8
|
Python
| false
| false
| 854
|
py
|
refalt-repr-examples.py
|
#!/usr/bin/env python
import hgvs
import hgvs.parser
from tabulate import tabulate
from six.moves import map
hp = hgvs.parser.Parser()
variants = [
"NM_01.2:c.1_2del",
"NM_01.2:c.1_2del2",
"NM_01.2:c.1_2delAA",
"NM_01.2:c.1_2insTTT",
"NM_01.2:c.1_2delinsTTT",
"NM_01.2:c.1_2del2insTTT",
"NM_01.2:c.1_2delAAinsTTT",
"NM_01.2:c.1_2delAAinsAA",
"NM_01.2:c.1_1delAinsA",
"NM_01.2:c.1A>A",
"NM_01.2:c.1_1delAinsT",
"NM_01.2:c.1A>T",
"NM_01.2:c.1=",
"NM_01.2:c.1A=",
"NM_01.2:c.1AA=",
]
headers = "hgvs ref alt etype delta".split()
def gen1(h):
v = hp.parse_hgvs_variant(h)
return [h, v.posedit.edit.ref, v.posedit.edit.alt, v.posedit.edit.type, v.posedit.length_change()]
rows = [list(map(str, gen1(h))) for h in variants]
print(tabulate(rows, headers=headers)) #, tablefmt="pipe"))
|
d34912b90b50893cb9387e8fdc0acc72ff20eade
|
55defa28b5bd395e7ead2f9ca848f378ee2c8b13
|
/tests/python/unittest/test_target_codegen_hexagon.py
|
ef0eb4ff5a7e8f8e0de16b83e2eb15b973673bd1
|
[
"Apache-2.0",
"BSD-3-Clause",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
neo-ai/tvm
|
456d48c8d80bd7190c91b488b8f9d6cf22918706
|
da529bf421fcfddd914b41bbe9bf9d5863671266
|
refs/heads/dev
| 2023-03-06T03:28:18.303189
| 2022-05-09T04:25:16
| 2022-05-09T04:25:16
| 167,632,700
| 101
| 43
|
Apache-2.0
| 2023-02-17T20:49:09
| 2019-01-26T00:35:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,944
|
py
|
test_target_codegen_hexagon.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import os
import pytest
import re
import sys
import tvm
import tvm.relay
import tvm.testing
import tvm.contrib.hexagon.hexagon as hexagon
@pytest.fixture(autouse=True)
def register_linker():
original_linker = tvm.contrib.hexagon.hexagon.hexagon_link()
# Register a phony linker, so that we can test codegen without a Hexagon toolchain.
hexagon.register_linker(lambda: "/bin/true")
yield None
# Restore registration.
hexagon.register_linker(original_linker)
@tvm.testing.requires_hexagon
def test_basic():
target = tvm.target.hexagon("v66", hvx=128)
def check_add(offload):
A = tvm.te.placeholder((128,), dtype="uint8", name="A")
B = tvm.te.placeholder((128,), dtype="uint8", name="A")
C = tvm.te.compute((128,), lambda i: A[i] + B[i], name="C")
s = tvm.te.create_schedule(C.op)
if offload:
xo, xi = s[C].split(s[C].op.axis[0], nparts=1)
s[C].bind(xo, tvm.te.thread_axis("pipeline"))
m = tvm.build(s, [C, A, B], target=target, name="offload_add")
hexm = m.imported_modules[0]
else:
hexm = tvm.build(
s, [C, A, B], target=tvm.target.Target(target, target), name="native_add"
)
asm = hexm.get_source("s")
vadds = re.findall(r"v[0-9]+.b = vadd\(v[0-9]+.b,v[0-9]+.b\)", asm)
assert vadds # Check that it's non-empty
check_add(True)
check_add(False)
@tvm.testing.requires_hexagon
def test_llvm_target_features():
target = tvm.target.hexagon("v66", hvx=128)
# Define some trivial compute
A = tvm.te.placeholder((128,), dtype="uint8", name="A")
C = tvm.te.compute((128,), lambda i: A[i] + 1, name="C")
s = tvm.te.create_schedule(C.op)
m = tvm.build(s, [C, A], target=tvm.target.Target(target, target), name="add_one")
llvm_ir = m.get_source("ll")
# Make sure we find +hvx-length128b in "attributes".
fs = re.findall(r"attributes.*\+hvx-length128b", llvm_ir)
assert fs # Check that it's non-empty
@tvm.testing.requires_hexagon
def test_alloc_vtcm():
target = tvm.target.hexagon("v66")
buf_len = 2048
A = tvm.te.placeholder((buf_len,), name="A", dtype="int8")
B = tvm.te.placeholder((buf_len,), name="B", dtype="int8")
A_buf = tvm.te.compute((buf_len,), lambda *i: A(*i), "A_buf")
B_buf = tvm.te.compute((buf_len,), lambda *i: B(*i), "B_buf")
C = tvm.te.compute((buf_len,), lambda *i: A_buf(*i) + B_buf(*i), name="C")
s = tvm.te.create_schedule(C.op)
# Use VTCM for each buffer.
s[A_buf].set_scope("local.vtcm")
s[B_buf].set_scope("local.vtcm")
config = {"tir.add_lower_pass": hexagon.ir_lower_vtcm_pass()}
with tvm.transform.PassContext(config=config):
irmod = tvm.lower(s, [A, B, C], name="alloc_vtcm")
calls = re.findall("HexagonBackend[A-Za-z]*VTCM", str(irmod["alloc_vtcm"]))
assert "HexagonBackendAllocateVTCM" in calls
assert "HexagonBackendFreeVTCM" in calls
@tvm.testing.requires_hexagon
def test_llvm_options():
target = tvm.target.hexagon("v66", llvm_options="-hexagon-noopt")
Zero = tvm.te.compute((10,), lambda _: tvm.tir.const(0, "int32"))
s = tvm.te.create_schedule(Zero.op)
tvm.build(s, [Zero], target=target, name="zero")
# Check that BuildHexagon hasn't crashed because of target attribute
# type mismatch.
assert re.search("-hexagon-noopt", str(target))
@tvm.testing.requires_hexagon
def test_linked_params_codegen():
# A simple model (a single conv2d) to trigger parameter separation:
mod_lines = [
'#[version = "0.0.5"]',
"def @main(%input: Tensor[(1, 16, 16, 3), uint8], %weights: Tensor[(3, 3, 3, 3), uint8])"
" -> Tensor[(1, 14, 14, 3), uint8] {",
' nn.conv2d(%input, %weights, data_layout="NHWC", kernel_layout="HWIO", '
'kernel_size=[3, 3], out_dtype="uint8")',
"}",
]
mod = tvm.parser.fromtext("\n".join(mod_lines))
# Make the params be 81 x 'T':
params = {"weights": np.full([3, 3, 3, 3], fill_value=ord("T"), dtype=np.uint8)}
target = tvm.target.hexagon("v68", link_params=True)
with tvm.transform.PassContext(opt_level=3):
lib = tvm.relay.build(mod, target=target, params=params)
llvm_ir = lib.get_lib().get_source("ll")
# The definition of the parameter:
p0_def_re = r"@__tvm_param__p0 = internal constant \[81 x i8\] c\"T{81}\", align 128"
assert re.search(p0_def_re, llvm_ir)
# The body of the _lookup_linked_param function:
linked_param_re = r"(define.*@_lookup_linked_param\(.*\).* {[^}]*})"
linked_param_body = re.search(linked_param_re, llvm_ir, flags=re.MULTILINE)
assert linked_param_body and linked_param_body.groups()
# Reference to the parameter:
p0_use_re = r"\[81 x i8\]\* @__tvm_param__p0"
assert re.search(p0_use_re, linked_param_body.groups()[0])
"""
A snippet of actual LLVM IR containing the definition of the linked
parameter, and the the body of the _lookup_linked_param function.
@__tvm_param__p0 = internal constant [81 x i8] c"TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT", align 128
define dllexport i32 @_lookup_linked_param(i8* nocapture readonly %0, i32* nocapture readnone %1, i32 %2, i8* nocapture %3, i32* nocapture %4, i8* nocapture readnone %5) local_unnamed_addr #2 {
entry:
%6 = bitcast i8* %0 to i64*
%7 = load i64, i64* %6, align 8
%cond = icmp eq i64 %7, 1
br i1 %cond, label %case___tvm_param__p0, label %common.ret
common.ret: ; preds = %entry, %case___tvm_param__p0
%storemerge = phi i32 [ 3, %case___tvm_param__p0 ], [ 4, %entry ]
store i32 %storemerge, i32* %4, align 4
ret i32 0
case___tvm_param__p0: ; preds = %entry
%8 = bitcast i8* %3 to i8**
store i8* getelementptr inbounds ([81 x i8], [81 x i8]* @__tvm_param__p0, i32 0, i32 0), i8** %8, align 4
br label %common.ret
}
"""
if __name__ == "__main__":
sys.exit(pytest.main([__file__] + sys.argv[1:]))
|
7f61c845aa7ce5d4f8c356ab083f555c363a33d6
|
8caa926fa767898a0a8fba1d5b3bef73d96be410
|
/sagemaker-pyspark-sdk/tests/sagemakerestimator_test.py
|
17f456ae1672365ece885f5b500a410fd6c5dda9
|
[
"Apache-2.0"
] |
permissive
|
aws/sagemaker-spark
|
72122fd3675487050965c9d35f3b26295c9f174b
|
e27ccff05cd4f062b67712411b3c9ac95308502c
|
refs/heads/master
| 2023-05-11T12:38:13.951884
| 2022-08-26T16:19:57
| 2022-08-26T16:19:57
| 111,010,291
| 297
| 141
|
Apache-2.0
| 2023-05-10T02:39:27
| 2017-11-16T18:58:56
|
Scala
|
UTF-8
|
Python
| false
| false
| 3,973
|
py
|
sagemakerestimator_test.py
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import pytest
from pyspark import SparkConf, SparkContext
from sagemaker_pyspark import (classpath_jars, SageMakerEstimator)
from sagemaker_pyspark.transformation.deserializers import KMeansProtobufResponseRowDeserializer
from sagemaker_pyspark.transformation.serializers import ProtobufRequestRowSerializer
@pytest.fixture(autouse=True)
def with_spark_context():
os.environ['SPARK_CLASSPATH'] = ":".join(classpath_jars())
conf = (SparkConf()
.set("spark.driver.extraClassPath", os.environ['SPARK_CLASSPATH']))
if SparkContext._active_spark_context is None:
SparkContext(conf=conf)
yield SparkContext._active_spark_context
# TearDown
SparkContext.stop(SparkContext._active_spark_context)
def test_sagemakerestimator_passes_correct_params_to_scala():
training_image = "train-abc-123"
model_image = "model-abc-123"
training_instance_count = 2
training_instance_type = "train-abc-123"
endpoint_instance_type = "c4.8xlarge"
endpoint_initial_instance_count = 2
estimator = SageMakerEstimator(
trainingImage=training_image,
modelImage=model_image,
trainingInstanceCount=training_instance_count,
trainingInstanceType=training_instance_type,
endpointInstanceType=endpoint_instance_type,
endpointInitialInstanceCount=endpoint_initial_instance_count,
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=KMeansProtobufResponseRowDeserializer()
)
assert estimator.trainingImage == training_image
assert estimator.modelImage == model_image
assert estimator.trainingInstanceType == training_instance_type
assert estimator.trainingInstanceCount == training_instance_count
assert estimator.endpointInitialInstanceCount == endpoint_initial_instance_count
assert estimator.endpointInstanceType == endpoint_instance_type
def test_sagemakerestimator_default_params():
training_image = "train-abc-123"
model_image = "model-abc-123"
training_instance_count = 2
training_instance_type = "train-abc-123"
endpoint_instance_type = "endpoint-abc-123"
endpoint_initial_instance_count = 2
estimator = SageMakerEstimator(
trainingImage=training_image,
modelImage=model_image,
trainingInstanceCount=training_instance_count,
trainingInstanceType=training_instance_type,
endpointInstanceType=endpoint_instance_type,
endpointInitialInstanceCount=endpoint_initial_instance_count,
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=KMeansProtobufResponseRowDeserializer()
)
assert estimator.trainingInstanceVolumeSizeInGB == 1024
assert estimator.trainingProjectedColumns is None
assert estimator.trainingChannelName == "train"
assert estimator.trainingContentType is None
assert estimator.trainingS3DataDistribution == "ShardedByS3Key"
assert estimator.trainingSparkDataFormat == "sagemaker"
assert estimator.trainingInputMode == "File"
assert estimator.trainingCompressionCodec is None
assert estimator.trainingMaxRuntimeInSeconds == 24 * 60 * 60
assert estimator.trainingKmsKeyId is None
assert estimator.modelPrependInputRowsToTransformationRows is True
assert estimator.deleteStagingDataAfterTraining is True
assert estimator.latestTrainingJob is None
|
218afdeeefd2e4a1c607bb0c58bfe936c6b03140
|
35b6013c1943f37d1428afd2663c8aba0a02628d
|
/appengine/standard/firebase/firetactoe/firetactoe_test.py
|
95c6cb9523c8ee6957f48e1f5d10295f95ceaedb
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/python-docs-samples
|
d2a251805fbeab15d76ed995cf200727f63f887d
|
44e819e713c3885e38c99c16dc73b7d7478acfe8
|
refs/heads/main
| 2023-08-28T12:52:01.712293
| 2023-08-28T11:18:28
| 2023-08-28T11:18:28
| 35,065,876
| 7,035
| 7,593
|
Apache-2.0
| 2023-09-14T20:20:56
| 2015-05-04T23:26:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,712
|
py
|
firetactoe_test.py
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import re
from google.appengine.api import users
from google.appengine.ext import ndb
from six.moves import http_client
import pytest
import webtest
import firetactoe
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
@pytest.fixture
def app(testbed, monkeypatch, login):
# Don't let the _get_http function memoize its value
firetactoe._get_session.cache_clear()
# Provide a test firebase config. The following will set the databaseURL
# databaseURL: "http://firebase.com/test-db-url"
monkeypatch.setattr(firetactoe, "_FIREBASE_CONFIG", "../firetactoe_test.py")
login(id="38")
firetactoe.app.debug = True
return webtest.TestApp(firetactoe.app)
def test_index_new_game(app, monkeypatch):
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.request", autospec=True
) as auth_session:
data = {"access_token": "123"}
auth_session.return_value = MockResponse(data, http_client.OK)
response = app.get("/")
assert "g=" in response.body
# Look for the unique game token
assert re.search(
r"initGame[^\n]+\'[\w+/=]+\.[\w+/=]+\.[\w+/=]+\'", response.body
)
assert firetactoe.Game.query().count() == 1
auth_session.assert_called_once_with(
mock.ANY, # AuthorizedSession object
method="PATCH",
url="http://firebase.com/test-db-url/channels/3838.json",
body='{"winner": null, "userX": "38", "moveX": true, "winningBoard": null, "board": " ", "userO": null}',
data=None,
)
def test_index_existing_game(app, monkeypatch):
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.request", autospec=True
) as auth_session:
data = {"access_token": "123"}
auth_session.return_value = MockResponse(data, http_client.OK)
userX = users.User("x@example.com", _user_id="123")
firetactoe.Game(id="razem", userX=userX).put()
response = app.get("/?g=razem")
assert "g=" in response.body
# Look for the unique game token
assert re.search(
r"initGame[^\n]+\'[\w+/=]+\.[\w+/=]+\.[\w+/=]+\'", response.body
)
assert firetactoe.Game.query().count() == 1
game = ndb.Key("Game", "razem").get()
assert game is not None
assert game.userO.user_id() == "38"
auth_session.assert_called_once_with(
mock.ANY, # AuthorizedSession object
method="PATCH",
url="http://firebase.com/test-db-url/channels/38razem.json",
body='{"winner": null, "userX": "123", "moveX": null, "winningBoard": null, "board": null, "userO": "38"}',
data=None,
)
def test_index_nonexisting_game(app, monkeypatch):
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.request", autospec=True
) as auth_session:
data = {"access_token": "123"}
auth_session.return_value = MockResponse(data, http_client.OK)
firetactoe.Game(id="razem", userX=users.get_current_user()).put()
app.get("/?g=razemfrazem", status=404)
assert not auth_session.called
def test_opened(app, monkeypatch):
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.request", autospec=True
) as auth_session:
data = {"access_token": "123"}
auth_session.return_value = MockResponse(data, http_client.OK)
firetactoe.Game(id="razem", userX=users.get_current_user()).put()
app.post("/opened?g=razem", status=200)
auth_session.assert_called_once_with(
mock.ANY, # AuthorizedSession object
method="PATCH",
url="http://firebase.com/test-db-url/channels/38razem.json",
body='{"winner": null, "userX": "38", "moveX": null, "winningBoard": null, "board": null, "userO": null}',
data=None,
)
def test_bad_move(app, monkeypatch):
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.request", autospec=True
) as auth_session:
data = {"access_token": "123"}
auth_session.return_value = MockResponse(data, http_client.OK)
firetactoe.Game(
id="razem", userX=users.get_current_user(), board=9 * " ", moveX=True
).put()
app.post("/move?g=razem", {"i": 10}, status=400)
assert not auth_session.called
def test_move(app, monkeypatch):
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.request", autospec=True
) as auth_session:
data = {"access_token": "123"}
auth_session.return_value = MockResponse(data, http_client.OK)
firetactoe.Game(
id="razem", userX=users.get_current_user(), board=9 * " ", moveX=True
).put()
app.post("/move?g=razem", {"i": 0}, status=200)
game = ndb.Key("Game", "razem").get()
assert game.board == "X" + (8 * " ")
auth_session.assert_called_once_with(
mock.ANY, # AuthorizedSession object
method="PATCH",
url="http://firebase.com/test-db-url/channels/38razem.json",
body='{"winner": null, "userX": "38", "moveX": false, "winningBoard": null, "board": "X ", "userO": null}',
data=None,
)
def test_delete(app, monkeypatch):
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.request", autospec=True
) as auth_session:
data = {"access_token": "123"}
auth_session.return_value = MockResponse(data, http_client.OK)
firetactoe.Game(id="razem", userX=users.get_current_user()).put()
app.post("/delete?g=razem", status=200)
auth_session.assert_called_once_with(
mock.ANY, # AuthorizedSession object
method="DELETE",
url="http://firebase.com/test-db-url/channels/38razem.json",
)
|
4799ce1a9197317b1e840b65a616013d4b6e0655
|
99d5e10013778f3822b3dac404db04156ad99acb
|
/ema_workbench/em_framework/ema_multiprocessing.py
|
606a3396b8d9e6f77ac3e491144c937173acd9ee
|
[
"BSD-3-Clause"
] |
permissive
|
quaquel/EMAworkbench
|
0e25b9caa86fb3c41376b14ad6f70911aec2a594
|
9d13fb6fc8e8e3fc8cc693102f85966c5876f9ac
|
refs/heads/master
| 2023-08-10T08:23:48.007445
| 2023-07-18T10:54:43
| 2023-07-18T10:54:43
| 5,109,457
| 102
| 86
|
BSD-3-Clause
| 2023-09-12T06:51:17
| 2012-07-19T12:18:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,576
|
py
|
ema_multiprocessing.py
|
"""
support for using the multiprocessing library in combination with the workbench
"""
import logging
import multiprocessing
import os
import queue
import shutil
import sys
import threading
import time
import traceback
from collections import defaultdict
from logging import handlers
from .experiment_runner import ExperimentRunner
from .model import AbstractModel
from .util import NamedObjectMap
from ..util import get_module_logger, ema_logging
# Created on 22 Feb 2017
#
# .. codeauthor::jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
__all__ = ["setup_working_directories"]
_logger = get_module_logger(__name__)
def initializer(*args):
"""initializer for a worker process
Parameters
----------
models : list of AbstractModel instances
This function initializes the worker. This entails
* initializing the experiment runner
* setting up the working directory
* setting up the logging
"""
global experiment_runner, current_process
current_process = multiprocessing.current_process()
models, queue, log_level, root_dir = args
# setup the experiment runner
msis = NamedObjectMap(AbstractModel)
msis.extend(models)
experiment_runner = ExperimentRunner(msis)
# setup the logging
setup_logging(queue, log_level)
# setup the working directories
# make a root temp
# copy each model directory
tmpdir = setup_working_directories(models, root_dir)
# register a cleanup finalizer function
# remove the root temp
if tmpdir:
multiprocessing.util.Finalize(
None, finalizer, args=(os.path.abspath(tmpdir),), exitpriority=10
)
def finalizer(tmpdir):
"""cleanup"""
global experiment_runner
_logger.info("finalizing")
experiment_runner.cleanup()
del experiment_runner
time.sleep(1)
if tmpdir:
try:
shutil.rmtree(tmpdir)
except OSError:
pass
def setup_logging(queue, log_level):
"""helper function for enabling logging from the workers to the main
process
Parameters
----------
queue : multiprocessing.Queue instance
log_level : int
"""
# create a logger
logger = logging.getLogger(ema_logging.LOGGER_NAME + ".subprocess")
ema_logging._logger = logger
logger.handlers = []
# add the handler
handler = handlers.QueueHandler(queue)
handler.setFormatter(ema_logging.LOG_FORMAT)
logger.addHandler(handler)
# set the log_level
logger.setLevel(log_level)
def setup_working_directories(models, root_dir):
"""copies the working directory of each model to a process specific
temporary directory and update the working directory of the model
Parameters
----------
models : list
root_dir : str
"""
# group models by working directory to avoid copying the same directory
# multiple times
wd_by_model = defaultdict(list)
for model in models:
try:
wd = model.working_directory
except AttributeError:
pass
else:
wd_by_model[wd].append(model)
# if the dict is not empty
if wd_by_model:
# make a directory with the process id as identifier
tmpdir_name = f"tmp{os.getpid()}"
tmpdir = os.path.join(root_dir, tmpdir_name)
os.mkdir(tmpdir)
_logger.debug(f"setting up working directory: {tmpdir}")
for key, value in wd_by_model.items():
# we need a sub directory in the process working directory
# for each unique model working directory
subdir = os.path.basename(os.path.normpath(key))
new_wd = os.path.join(tmpdir, subdir)
# the copy operation
shutil.copytree(key, new_wd)
for model in value:
model.working_directory = new_wd
return tmpdir
else:
return None
def worker(experiment):
"""the worker function for executing an individual experiment
Parameters
----------
experiment : dict
"""
global experiment_runner
return experiment, experiment_runner.run_experiment(experiment)
class LogQueueReader(threading.Thread):
"""
thread to write subprocesses log records to main process log
This thread reads the records written by subprocesses and writes them to
the handlers defined in the main process's handlers.
found `online <https://stackoverflow.com/questions/641420/how-should-i-log-while-using-multiprocessing-in-python>`_
TODO:: should be generalized with logwatcher used with ipyparallel
"""
def __init__(self, queue):
threading.Thread.__init__(self, name="log queue reader")
self.queue = queue
self.daemon = True
def run(self):
"""
read from the queue and write to the log handlers
The logging documentation says logging is thread safe, so there
shouldn't be contention between normal logging (from the main
process) and this thread.
Note that we're using the name of the original logger.
"""
while True:
try:
record = self.queue.get()
# get the logger for this record
if record is None:
_logger.debug("none received")
break
logger = logging.getLogger(record.name)
logger.callHandlers(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except TypeError:
break
except BaseException:
traceback.print_exc(file=sys.stderr)
class ExperimentFeeder(threading.Thread):
def __init__(self, pool, results_queue, experiments):
threading.Thread.__init__(self, name="task feeder")
self.pool = pool
self.experiments = experiments
self.results_queue = results_queue
self.daemon = True
def run(self):
for experiment in self.experiments:
result = self.pool.apply_async(worker, [experiment])
self.results_queue.put(result)
class ResultsReader(threading.Thread):
def __init__(self, queue, callback):
threading.Thread.__init__(self, name="results reader")
self.queue = queue
self.callback = callback
self.daemon = True
def run(self):
while True:
try:
result = self.queue.get()
# get the logger for this record
if result is None:
_logger.debug("none received")
break
self.callback(*result.get())
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except TypeError:
break
except BaseException:
traceback.print_exc(file=sys.stderr)
def add_tasks(n_processes, pool, experiments, callback):
"""add experiments to pool
Parameters
----------
n_processes : int
pool : Pool instance
experiments : collection
callback : callable
"""
# by limiting task queue, we avoid putting all experiments on queue in
# one go
results_queue = queue.Queue(maxsize=5 * n_processes)
feeder = ExperimentFeeder(pool, results_queue, experiments)
reader = ResultsReader(results_queue, callback)
feeder.start()
reader.start()
feeder.join()
results_queue.put(None)
reader.join()
|
5535895c3ccfa8f9b4931350ac9a658e5e059cf6
|
5770a3fc8bd224d926d4aff5b7d8f1863f145cab
|
/quarkchain/p2p/constants.py
|
80db768a635eed324f42e6238fc2efeab54d07b3
|
[
"MIT"
] |
permissive
|
QuarkChain/pyquarkchain
|
d06a59d630fd0c4a07e1c10548ba044329da95ba
|
2068153c9386a1eacb5eccb8cf93d98f87537203
|
refs/heads/master
| 2023-02-27T14:16:07.419575
| 2022-04-18T20:35:59
| 2022-04-18T20:35:59
| 143,354,339
| 253
| 133
|
MIT
| 2023-02-07T21:54:01
| 2018-08-02T23:28:47
|
Python
|
UTF-8
|
Python
| false
| false
| 4,327
|
py
|
constants.py
|
SUPPORTED_RLPX_VERSION = 4
# Overhead added by ECIES encryption
ENCRYPT_OVERHEAD_LENGTH = 113
# Lentgh of elliptic S256 signatures
SIGNATURE_LEN = 65
# Length of public keys: 512 bit keys in uncompressed form, without format byte
PUBKEY_LEN = 64
# Hash length (for nonce etc)
HASH_LEN = 32
# Length of initial auth handshake message
AUTH_MSG_LEN = SIGNATURE_LEN + HASH_LEN + PUBKEY_LEN + HASH_LEN + 1
# Length of auth ack handshake message
AUTH_ACK_LEN = PUBKEY_LEN + HASH_LEN + 1
# Length of encrypted pre-EIP-8 initiator handshake
ENCRYPTED_AUTH_MSG_LEN = AUTH_MSG_LEN + ENCRYPT_OVERHEAD_LENGTH
# Length of encrypted pre-EIP-8 handshake reply
ENCRYPTED_AUTH_ACK_LEN = AUTH_ACK_LEN + ENCRYPT_OVERHEAD_LENGTH
# Length of an RLPx packet's header
HEADER_LEN = 16
# Length of an RLPx header's/frame's MAC
MAC_LEN = 16
# The amount of seconds a connection can be idle.
CONN_IDLE_TIMEOUT = 30
# Timeout used when waiting for a reply from a remote node.
REPLY_TIMEOUT = 3
MAX_REQUEST_ATTEMPTS = 3
# Default timeout before giving up on a caller-initiated interaction
COMPLETION_TIMEOUT = 5
MAINNET_BOOTNODES = (
'enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303', # noqa: E501
'enode://aa36fdf33dd030378a0168efe6ed7d5cc587fafa3cdd375854fe735a2e11ea3650ba29644e2db48368c46e1f60e716300ba49396cd63778bf8a818c09bded46f@13.93.211.84:30303', # noqa: E501
'enode://78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d@191.235.84.50:30303', # noqa: E501
'enode://158f8aab45f6d19c6cbf4a089c2670541a8da11978a2f90dbf6a502a4a3bab80d288afdbeb7ec0ef6d92de563767f3b1ea9e8e334ca711e9f8e2df5a0385e8e6@13.75.154.138:30303', # noqa: E501
'enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303', # noqa: E501
)
ROPSTEN_BOOTNODES = (
'enode://30b7ab30a01c124a6cceca36863ece12c4f5fa68e3ba9b0b51407ccc002eeed3b3102d20a88f1c1d3c3154e2449317b8ef95090e77b312d5cc39354f86d5d606@52.176.7.10:30303', # noqa: E501
'enode://865a63255b3bb68023b6bffd5095118fcc13e79dcf014fe4e47e065c350c7cc72af2e53eff895f11ba1bbb6a2b33271c1116ee870f266618eadfc2e78aa7349c@52.176.100.77:30303', # noqa: E501
'enode://6332792c4a00e3e4ee0926ed89e0d27ef985424d97b6a45bf0f23e51f0dcb5e66b875777506458aea7af6f9e4ffb69f43f3778ee73c81ed9d34c51c4b16b0b0f@52.232.243.152:30303', # noqa: E501
'enode://94c15d1b9e2fe7ce56e458b9a3b672ef11894ddedd0c6f247e0f1d3487f52b66208fb4aeb8179fce6e3a749ea93ed147c37976d67af557508d199d9594c35f09@192.81.208.223:30303', # noqa: E501
)
DISCOVERY_V5_BOOTNODES = (
'enode://06051a5573c81934c9554ef2898eb13b33a34b94cf36b202b69fde139ca17a85051979867720d4bdae4323d4943ddf9aeeb6643633aa656e0be843659795007a@35.177.226.168:30303', # noqa: E501
'enode://0cc5f5ffb5d9098c8b8c62325f3797f56509bff942704687b6530992ac706e2cb946b90a34f1f19548cd3c7baccbcaea354531e5983c7d1bc0dee16ce4b6440b@40.118.3.223:30304', # noqa: E501
'enode://1c7a64d76c0334b0418c004af2f67c50e36a3be60b5e4790bdac0439d21603469a85fad36f2473c9a80eb043ae60936df905fa28f1ff614c3e5dc34f15dcd2dc@40.118.3.223:30306', # noqa: E501
'enode://85c85d7143ae8bb96924f2b54f1b3e70d8c4d367af305325d30a61385a432f247d2c75c45c6b4a60335060d072d7f5b35dd1d4c45f76941f62a4f83b6e75daaf@40.118.3.223:30307', # noqa: E501
)
# Maximum peers number, we'll try to keep open connections up to this number of peers
DEFAULT_MAX_PEERS = 25
# Maximum allowed depth for chain reorgs.
MAX_REORG_DEPTH = 24
# Random sampling rate (i.e. every K-th) for header seal checks during light/fast sync. Apparently
# 100 was the optimal value determined by geth devs
# (https://github.com/ethereum/go-ethereum/pull/1889#issue-47241762), but in order to err on the
# side of caution, we use a higher value.
SEAL_CHECK_RANDOM_SAMPLE_RATE = 48
# The amount of time that the BasePeerPool will wait for a peer to boot before
# aborting the connection attempt.
DEFAULT_PEER_BOOT_TIMEOUT = 20
# period between adding a dialout_blacklisted node and removing it
DIALOUT_BLACKLIST_COOLDOWN_SEC = 24 * 3600
DIALIN_BLACKLIST_COOLDOWN_SEC = 8 * 3600
UNBLACKLIST_INTERVAL = 15 * 60
|
726e4f6f85221546eb2931465ba3a4bf6157f074
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/tests/test_hist.py
|
ada91364a1419d4f56cab446f6b4f3bdb4eab72e
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,839
|
py
|
test_hist.py
|
# ----------------------------------------------------------------------
# Test noc.core.hist module
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
import pytest
# NOC modules
from noc.core.hist.base import Histogram
@pytest.mark.parametrize(
"config,sample,expected",
[
# Empty config, all counts to +inf
([], [], [0]),
([], [1], [1]),
([], [1, 2, 3, 4, 5], [5]),
# One threshold, 1ms
([0.001], [], [0, 0]),
([0.001], [0], [1, 1]),
([0.001], [0, 999], [2, 2]),
([0.001], [0, 999, 1000], [3, 3]),
([0.001], [0, 999, 1000, 100000], [3, 4]),
# Two thresholds
([0.001, 0.01], [], [0, 0, 0]),
([0.001, 0.01], [0], [1, 1, 1]),
([0.001, 0.01], [0, 999], [2, 2, 2]),
([0.001, 0.01], [0, 999, 1000], [3, 3, 3]),
([0.001, 0.01], [0, 999, 1000, 1001], [3, 4, 4]),
([0.001, 0.01], [0, 999, 1000, 1001, 5000], [3, 5, 5]),
([0.001, 0.01], [0, 999, 1000, 1001, 5000, 9999], [3, 6, 6]),
([0.001, 0.01], [0, 999, 1000, 1001, 5000, 9999, 10000], [3, 7, 7]),
([0.001, 0.01], [0, 999, 1000, 1001, 5000, 9999, 10000, 100000], [3, 7, 8]),
],
)
def test_hist_register(config, sample, expected):
# Create histogram
hist = Histogram(config)
# Fill with samples
for x in sample:
hist.register(x)
# Compare result
assert hist.get_values() == expected
@pytest.mark.parametrize(
"config,sample,labels,expected",
[
# Empty config
(
[],
[],
{},
"# TYPE test_bucket untyped\n"
'test_bucket{le="+Inf"} 0\n'
"# TYPE test_sum untyped\n"
"test_sum{} 0.0\n"
"# TYPE test_count untyped\n"
"test_count{} 0",
),
(
[],
[],
{"pool": "default"},
"# TYPE test_bucket untyped\n"
'test_bucket{pool="default",le="+Inf"} 0\n'
"# TYPE test_sum untyped\n"
'test_sum{pool="default"} 0.0\n'
"# TYPE test_count untyped\n"
'test_count{pool="default"} 0',
),
# Two thresholds
(
[0.001, 0.01],
[0, 999, 1000, 1001, 5000, 9999, 10000, 100000],
{},
"# TYPE test_bucket untyped\n"
'test_bucket{le="0.001"} 3\n'
"# TYPE test_bucket untyped\n"
'test_bucket{le="0.01"} 7\n'
"# TYPE test_bucket untyped\n"
'test_bucket{le="+Inf"} 8\n'
"# TYPE test_sum untyped\n"
"test_sum{} 0.127999\n"
"# TYPE test_count untyped\n"
"test_count{} 8",
),
(
[0.001, 0.01],
[0, 999, 1000, 1001, 5000, 9999, 10000, 100000],
{"pool": "default"},
"# TYPE test_bucket untyped\n"
'test_bucket{pool="default",le="0.001"} 3\n'
"# TYPE test_bucket untyped\n"
'test_bucket{pool="default",le="0.01"} 7\n'
"# TYPE test_bucket untyped\n"
'test_bucket{pool="default",le="+Inf"} 8\n'
"# TYPE test_sum untyped\n"
'test_sum{pool="default"} 0.127999\n'
"# TYPE test_count untyped\n"
'test_count{pool="default"} 8',
),
],
)
def test_prom_metrics(config, sample, labels, expected):
# Create histogram
hist = Histogram(config)
# Fill with samples
for x in sample:
hist.register(x)
# Compare result
metrics = "\n".join(hist.iter_prom_metrics("test", labels))
assert metrics == expected
|
d797c1c031e05a7d28597723d94db34dbf417a6c
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/convert-the-temperature.py
|
728fb12b182dcd43c683e736c611cf0913d182ad
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 236
|
py
|
convert-the-temperature.py
|
# Time: O(1)
# Space: O(1)
# math
class Solution(object):
def convertTemperature(self, celsius):
"""
:type celsius: float
:rtype: List[float]
"""
return [celsius+273.15, celsius*1.80+32.00]
|
4dd9d153132077ae1cdd39d8163947abb52402f4
|
9a717ace10555004356b963a1836844e5764ba7a
|
/napalm_logs/scripts/__init__.py
|
f8b354a484c36ea63410e4c0c72839fc7ddf1ee5
|
[
"Apache-2.0"
] |
permissive
|
napalm-automation/napalm-logs
|
467e7654f34318e5dca4323e1726a3543065e395
|
1778d79ccd0eaa0d5205d369637e8f557365fa65
|
refs/heads/develop
| 2023-06-26T15:16:33.621929
| 2022-11-08T14:48:00
| 2022-11-08T14:48:00
| 84,958,777
| 142
| 53
|
Apache-2.0
| 2023-06-12T09:22:43
| 2017-03-14T14:30:19
|
Python
|
UTF-8
|
Python
| false
| false
| 52
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
napalm-logs scripts
"""
|
7f28836fc7938099963b9a7d8639e8dbd0edcce6
|
34305ef03fffd872195fced3d946fcaccbc79ddf
|
/skrf/io/tests/test_io.py
|
2bf52a39b3eb480149d3e305eaa3a26bb49deef4
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-rf/scikit-rf
|
20477c643883b6b46dca50fca31ad1010a9512e9
|
26243ffc45160f17612badc880ad5d022036537a
|
refs/heads/master
| 2023-08-19T03:34:42.208425
| 2023-07-28T13:56:06
| 2023-07-28T13:56:06
| 3,218,028
| 555
| 276
|
BSD-3-Clause
| 2023-09-12T21:56:17
| 2012-01-19T14:25:58
|
Python
|
UTF-8
|
Python
| false
| false
| 6,009
|
py
|
test_io.py
|
import unittest
import os
import numpy as npy
import skrf as rf
from skrf.io import Touchstone
class IOTestCase(unittest.TestCase):
"""
"""
def setUp(self):
"""
"""
self.test_dir = os.path.dirname(os.path.abspath(__file__))+'/'
self.pickle_file = os.path.join(self.test_dir, 'pickled.p')
self.hfss_oneport_file = os.path.join(self.test_dir, 'hfss_oneport.s1p')
self.hfss_twoport_file = os.path.join(self.test_dir, 'hfss_twoport.s2p')
self.ntwk1 = rf.Network(os.path.join(self.test_dir, 'ntwk1.s2p'))
self.ntwk2 = rf.Network(os.path.join(self.test_dir, 'ntwk2.s2p'))
self.ntwk3 = rf.Network(os.path.join(self.test_dir, 'ntwk3.s2p'))
self.short = rf.Network(os.path.join(self.test_dir, 'short.s1p'))
self.match = rf.Network(os.path.join(self.test_dir, 'match.s1p'))
self.open = rf.Network(os.path.join(self.test_dir, 'open.s1p'))
self.ntwk_comments_file = os.path.join(self.test_dir, 'comments.s3p')
self.test_files = [os.path.join(self.test_dir, test_file) for test_file in ['ntwk1.s2p', 'ntwk2.s2p']]
self.embeding_network= rf.Network(os.path.join(self.test_dir, 'embedingNetwork.s2p'))
self.freq = rf.F(75, 110, 101, unit='GHz')
def read_write(self,obj):
"""
function to test write/read equivalence for an obj which has
__eq__ defined
"""
rf.write(self.pickle_file,obj)
self.assertEqual(rf.read(self.pickle_file), obj)
# os.remove(self.pickle_file)
def test_read_all(self):
rf.read_all(self.test_dir)
def test_read_all_files(self):
rf.read_all(files=self.test_files)
def test_save_sesh(self):
a=self.ntwk1
b=self.ntwk2
c=self.ntwk3
rf.save_sesh(locals(),self.pickle_file )
#os.remove(self.pickle_file)
def test_write_all_dict(self):
d = dict(a=self.ntwk1, b=self.ntwk2, c=self.ntwk3)
rf.write_all(d, dir =self.test_dir )
os.remove(os.path.join(self.test_dir, 'a.ntwk'))
os.remove(os.path.join(self.test_dir, 'b.ntwk'))
os.remove(os.path.join(self.test_dir, 'c.ntwk'))
def test_readwrite_network(self):
self.read_write(self.ntwk1)
def test_readwrite_list_of_network(self):
self.read_write([self.ntwk1, self.ntwk2])
def test_readwrite_networkSet(self):
"""
test_readwrite_networkSet
TODO: need __eq__ method for NetworkSet
This doesnt test equality between read/write, because there is no
__eq__ test for NetworkSet. it only tests for other errors
"""
rf.write(self.pickle_file,rf.NS([self.ntwk1, self.ntwk2]))
rf.read(self.pickle_file)
#self.assertEqual(rf.read(self.pickle_file), rf.NS([self.ntwk1, self.ntwk2])
#os.remove(self.pickle_file)
def test_readwrite_frequency(self):
freq = rf.Frequency(1,10,10,'ghz')
self.read_write(freq)
def test_readwrite_calibration(self):
ideals, measured = [], []
std_list = [self.short, self.match,self.open]
for ntwk in std_list:
ideals.append(ntwk)
measured.append(self.embeding_network ** ntwk)
cal = rf.Calibration(\
ideals = ideals,\
measured = measured,\
type = 'one port',\
is_reciprocal = True,\
)
original = cal
rf.write(self.pickle_file, original)
unpickled = rf.read(self.pickle_file)
# TODO: this test should be more extensive
self.assertEqual(original.ideals, unpickled.ideals)
self.assertEqual(original.measured, unpickled.measured)
os.remove(self.pickle_file)
def test_readwrite_media(self):
a_media = rf.media.DefinedGammaZ0(
frequency = self.freq,
gamma = 1j*npy.ones(101) ,
z0 = 50*npy.ones(101),
)
self.read_write(a_media)
@unittest.skip
def test_readwrite_media_func_propgamma(self):
a_media = rf.media.Media(
frequency = self.freq,
propagation_constant = lambda :1j ,
characteristic_impedance = lambda :50,
)
self.read_write(a_media)
@unittest.skip
def test_readwrite_RectangularWaveguide(self):
a_media = rf.media.RectangularWaveguide(
frequency = self.freq,
a=100*rf.mil,
z0=50,
)
self.read_write(a_media)
@unittest.skip
def test_readwrite_DistributedCircuit(self):
one = npy.ones(self.freq.npoints)
a_media = rf.media.DistributedCircuit(
frequency = self.freq,
R=1e5*one, G=1*one, I=1e-6*one, C=8e-12*one
)
self.read_write(a_media)
@unittest.skip
def test_readwrite_Freespace(self):
a_media = rf.media.Freespace(
frequency = self.freq,
)
self.read_write(a_media)
def test_snp_json_roundtrip(self):
"""
Tests if snp object saved to json and reloaded is still the same.
"""
given = self.ntwk1
actual = rf.from_json_string(rf.to_json_string(given))
self.assertEqual(actual, given)
self.assertEqual(actual.frequency, given.frequency)
self.assertEqual(actual.name, given.name)
self.assertEqual(actual.comments, given.comments)
self.assertEqual(actual.z0.tolist(), given.z0.tolist())
self.assertEqual(actual.port_names, given.port_names)
self.assertEqual(actual.variables, given.variables)
def test_touchstone_get_comment_variables(self):
"""
Tests if comments are parsed correctly with get_comment_variables() method.
"""
given = {'p1': ('.03', ''), 'p2': ('0.03', ''), 'p3': ('100', ''), 'p4': ('2.5', 'um')}
actual = Touchstone(self.ntwk_comments_file).get_comment_variables()
self.assertEqual(given, actual)
|
ff833d9fffd56279a9bda34386445506d858b75a
|
5ea4a3a0e50d2cee386f497c8449d13cd80450f9
|
/xsdata/formats/dataclass/parsers/mixins.py
|
04a348b2f195d43abbdd43ef27a22ac8e8414269
|
[
"MIT"
] |
permissive
|
tefra/xsdata
|
8df028ff79cd04b29ecf24401810562b8917b7be
|
31f672af84fd040a97996871916a41b1046fe46b
|
refs/heads/main
| 2023-08-17T03:20:06.912750
| 2023-08-12T15:24:40
| 2023-08-12T15:24:40
| 217,130,848
| 243
| 49
|
MIT
| 2023-08-30T15:25:31
| 2019-10-23T18:51:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,621
|
py
|
mixins.py
|
import abc
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from xsdata.exceptions import XmlHandlerError
from xsdata.formats.bindings import AbstractParser
from xsdata.formats.dataclass.parsers.config import ParserConfig
from xsdata.models.enums import EventType
NoneStr = Optional[str]
class PushParser(AbstractParser):
"""
A generic interface for event based content handlers like sax.
:param config: Parser configuration.
"""
config: ParserConfig
ns_map: Dict
@abc.abstractmethod
def start(
self,
clazz: Optional[Type],
queue: List,
objects: List,
qname: str,
attrs: Dict,
ns_map: Dict,
):
"""Queue the next xml node for parsing."""
@abc.abstractmethod
def end(
self,
queue: List,
objects: List,
qname: str,
text: NoneStr,
tail: NoneStr,
) -> bool:
"""
Parse the last xml node and bind any intermediate objects.
:return: The result of the binding process.
"""
def register_namespace(self, prefix: NoneStr, uri: str):
"""
Add the given prefix-URI namespaces mapping if the prefix is new.
:param prefix: Namespace prefix
:param uri: Namespace uri
"""
if prefix not in self.ns_map:
self.ns_map[prefix] = uri
class XmlNode(abc.ABC):
"""
The xml node interface.
The nodes are responsible to find and queue the child nodes when a
new element starts and build the resulting object tree when the
element ends. The parser needs to maintain a queue for these nodes
and a list of all the intermediate object trees.
"""
@abc.abstractmethod
def child(self, qname: str, attrs: Dict, ns_map: Dict, position: int) -> "XmlNode":
"""
Initialize the next child node to be queued, when a new xml element
starts.
This entry point is responsible to create the next node type
with all the necessary information on how to bind the incoming
input data.
:param qname: Qualified name
:param attrs: Attribute key-value map
:param ns_map: Namespace prefix-URI map
:param position: The current objects position, to mark future
objects as children
"""
@abc.abstractmethod
def bind(self, qname: str, text: NoneStr, tail: NoneStr, objects: List) -> bool:
"""
Build the object tree for the ending element and return whether the
result was successful or not.
This entry point is called when an xml element ends and is
responsible to parse the current element attributes/text, bind
any children objects and initialize new object.
:param qname: Qualified name
:param text: Text content
:param tail: Tail content
:param objects: The list of intermediate parsed objects, eg
[(qname, object)]
"""
class XmlHandler:
"""
Abstract content handler.
:param parser: The parser instance to feed with events
:param clazz: The target binding model, auto located if omitted.
"""
__slots__ = ("parser", "clazz", "queue", "objects")
def __init__(self, parser: PushParser, clazz: Optional[Type]):
self.parser = parser
self.clazz = clazz
self.queue: List = []
self.objects: List = []
def parse(self, source: Any) -> Any:
"""Parse an XML document from a system identifier or an InputSource."""
raise NotImplementedError("This method must be implemented!")
def merge_parent_namespaces(self, ns_map: Dict) -> Dict:
"""
Merge and return the given prefix-URI map with the parent node.
Register new prefixes with the parser.
:param ns_map: Namespace prefix-URI map
"""
if self.queue:
parent_ns_map = self.queue[-1].ns_map
if not ns_map:
return parent_ns_map
result = parent_ns_map.copy() if parent_ns_map else {}
else:
result = {}
for prefix, uri in ns_map.items():
self.parser.register_namespace(prefix, uri)
result[prefix] = uri
return result
class EventsHandler(XmlHandler):
"""Sax content handler for pre-recorded events."""
__slots__ = ("data_frames", "flush_next")
def __init__(self, parser: PushParser, clazz: Optional[Type]):
super().__init__(parser, clazz)
self.data_frames: List = []
self.flush_next: Optional[str] = None
def parse(self, source: List[Tuple]) -> Any:
"""Forward the pre-recorded events to the main parser."""
for event, *args in source:
if event == EventType.START:
qname, attrs, ns_map = args
self.parser.start(
self.clazz,
self.queue,
self.objects,
qname,
attrs,
ns_map,
)
elif event == EventType.END:
qname, text, tail = args
self.parser.end(self.queue, self.objects, qname, text, tail)
elif event == EventType.START_NS:
prefix, uri = args
self.parser.register_namespace(prefix or None, uri)
else:
raise XmlHandlerError(f"Unhandled event: `{event}`.")
return self.objects[-1][1] if self.objects else None
|
3cd79d26035d8aa83fe14043a349f8b54f9166bc
|
7a836801cdfefe5cb645e855989a456b43dc883e
|
/src/ops/jinja/__init__.py
|
09b242c5dec4d29925ae2be153b2fabd4a1ee444
|
[
"Apache-2.0"
] |
permissive
|
adobe/ops-cli
|
57fa2925d200e429ca0a028324ee2d25bdcfc0a5
|
02fea7dbb7652b6dd0e53cea6433a632d6a1aaea
|
refs/heads/master
| 2023-08-31T11:23:14.781104
| 2023-08-01T13:55:35
| 2023-08-01T13:55:35
| 168,430,474
| 200
| 54
|
Apache-2.0
| 2023-09-12T08:06:49
| 2019-01-30T23:18:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,743
|
py
|
__init__.py
|
# Copyright 2019 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from jinja2 import FileSystemLoader, Environment, StrictUndefined, Undefined, DebugUndefined
from jinja2.loaders import ChoiceLoader
from ansible.plugins.loader import PluginLoader
class Template(object):
def __init__(self, root_dir, ops_config):
loader = ChoiceLoader([
FileSystemLoader(root_dir),
FileSystemLoader("/")
])
mode = ops_config.get('jinja2.undefined')
undefined = Undefined
if mode == 'StrictUndefined':
undefined = StrictUndefined
elif mode == 'DebugUndefined':
undefined = DebugUndefined
self.env = Environment(loader=loader, undefined=undefined)
self.filter_plugin_loader = PluginLoader(
'FilterModule',
'ansible.plugins.filter',
ops_config.ansible_filter_plugins.split(':'),
'filter_plugins'
)
for filter in self.filter_plugin_loader.all():
self.env.filters.update(filter.filters())
def render(self, source, vars):
jinja_template = self.env.get_template(source)
return jinja_template.render(**vars)
|
e4daeec172f07fe8fadc267de6ac266cfdbf7d2d
|
9c028c54fcba2d22d48bed063090e35e9af35dc4
|
/ciw/tests/test_individual.py
|
3de2412db79039d4453b5c7a61eaf1b4a0037011
|
[
"MIT"
] |
permissive
|
CiwPython/Ciw
|
b2612edf361c7c6af58fdf5f710b44c81f417d23
|
85d03a4988f34c1ca05d7781649042ea2761a42b
|
refs/heads/master
| 2023-04-19T03:45:53.599066
| 2023-04-06T17:42:08
| 2023-04-06T17:42:08
| 47,995,577
| 143
| 39
|
MIT
| 2023-02-07T13:06:10
| 2015-12-14T19:10:50
|
Python
|
UTF-8
|
Python
| false
| false
| 5,033
|
py
|
test_individual.py
|
import unittest
import ciw
from hypothesis import given
from hypothesis.strategies import integers
class TestIndividual(unittest.TestCase):
def test_init_method_1(self):
i = ciw.Individual(22, 3)
self.assertEqual(i.customer_class, 3)
self.assertEqual(i.previous_class, 3)
self.assertEqual(i.priority_class, 0)
self.assertEqual(i.id_number, 22)
self.assertEqual(i.service_start_date, False)
self.assertEqual(i.service_time, False)
self.assertEqual(i.service_end_date, False)
self.assertEqual(i.arrival_date, False)
self.assertEqual(i.destination, False)
self.assertEqual(i.queue_size_at_arrival, False)
self.assertEqual(i.queue_size_at_departure, False)
self.assertEqual(i.data_records, [])
self.assertEqual(i.simulation, False)
def test_init_method_2(self):
i = ciw.Individual(5)
self.assertEqual(i.customer_class, 0)
self.assertEqual(i.previous_class, 0)
self.assertEqual(i.priority_class, 0)
self.assertEqual(i.id_number, 5)
self.assertEqual(i.service_start_date, False)
self.assertEqual(i.service_time, False)
self.assertEqual(i.service_end_date, False)
self.assertEqual(i.arrival_date, False)
self.assertEqual(i.destination, False)
self.assertEqual(i.queue_size_at_arrival, False)
self.assertEqual(i.queue_size_at_departure, False)
self.assertEqual(i.data_records, [])
self.assertEqual(i.simulation, False)
def test_init_method_3(self):
i = ciw.Individual(5, 0, 2)
self.assertEqual(i.customer_class, 0)
self.assertEqual(i.previous_class, 0)
self.assertEqual(i.priority_class, 2)
self.assertEqual(i.id_number, 5)
self.assertEqual(i.service_start_date, False)
self.assertEqual(i.service_time, False)
self.assertEqual(i.service_end_date, False)
self.assertEqual(i.arrival_date, False)
self.assertEqual(i.destination, False)
self.assertEqual(i.queue_size_at_arrival, False)
self.assertEqual(i.queue_size_at_departure, False)
self.assertEqual(i.data_records, [])
self.assertEqual(i.simulation, False)
def test_init_method_4(self):
Q = ciw.Simulation(ciw.create_network_from_yml(
'ciw/tests/testing_parameters/params_mm1.yml'))
i = ciw.Individual(5, 0, 2, simulation=Q)
self.assertEqual(i.customer_class, 0)
self.assertEqual(i.previous_class, 0)
self.assertEqual(i.priority_class, 2)
self.assertEqual(i.id_number, 5)
self.assertEqual(i.service_start_date, False)
self.assertEqual(i.service_time, False)
self.assertEqual(i.service_end_date, False)
self.assertEqual(i.arrival_date, False)
self.assertEqual(i.destination, False)
self.assertEqual(i.queue_size_at_arrival, False)
self.assertEqual(i.queue_size_at_departure, False)
self.assertEqual(i.data_records, [])
self.assertEqual(i.simulation, Q)
def test_repr_method(self):
i = ciw.Individual(3, 6)
self.assertEqual(str(i), 'Individual 3')
@given(id_num = integers(),
customer_class = integers(),
priority_class=integers())
def test_init_method_1h(self, id_num, customer_class, priority_class):
i = ciw.Individual(id_num, customer_class, priority_class)
self.assertEqual(i.customer_class, customer_class)
self.assertEqual(i.previous_class, customer_class)
self.assertEqual(i.priority_class, priority_class)
self.assertEqual(i.id_number, id_num)
self.assertEqual(i.service_start_date, False)
self.assertEqual(i.service_time, False)
self.assertEqual(i.service_end_date, False)
self.assertEqual(i.arrival_date, False)
self.assertEqual(i.destination, False)
self.assertEqual(i.queue_size_at_arrival, False)
self.assertEqual(i.queue_size_at_departure, False)
self.assertEqual(i.data_records, [])
@given(id_num = integers())
def test_init_method_2h(self, id_num):
i = ciw.Individual(id_num)
self.assertEqual(i.customer_class, 0)
self.assertEqual(i.previous_class, 0)
self.assertEqual(i.priority_class, 0)
self.assertEqual(i.id_number, id_num)
self.assertEqual(i.service_start_date, False)
self.assertEqual(i.service_time, False)
self.assertEqual(i.service_end_date, False)
self.assertEqual(i.arrival_date, False)
self.assertEqual(i.destination, False)
self.assertEqual(i.queue_size_at_arrival, False)
self.assertEqual(i.queue_size_at_departure, False)
self.assertEqual(i.data_records, [])
@given(id_num = integers(),
customer_class = integers())
def test_repr_methodh(self, id_num, customer_class):
i = ciw.Individual(id_num, customer_class)
self.assertEqual(str(i), 'Individual ' + str(id_num))
|
7a3871ef3c456df483508e10f9e80c3987f4ad3f
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/ios/third_party/webkit/copy_webkit_for_clusterfuzz.py
|
3615a0a4a80ae5736e8e192b4a8ed4cad138c421
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
copy_webkit_for_clusterfuzz.py
|
# Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import shutil
import subprocess
import sys
def main():
description = 'Packages WebKit build for Clusterfuzz.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--output',
help='Name of the output file.')
parser.add_argument('--webkit_build',
help='WebKit build directory to copy.')
parser.add_argument('--clusterfuzz_script',
help='Clusterfuzz script to copy.')
parser.add_argument('--clang_asan_library',
help='Clang ASan library to copy.')
opts = parser.parse_args()
if os.path.exists(opts.output):
shutil.rmtree(opts.output)
shutil.copytree(opts.webkit_build, opts.output, symlinks=True)
shutil.copyfile(
opts.clusterfuzz_script,
os.path.join(opts.output,
os.path.basename(opts.clusterfuzz_script)))
shutil.copyfile(
opts.clang_asan_library,
os.path.join(opts.output,
os.path.basename(opts.clang_asan_library)))
zip_command = ['zip', '--symlinks', '-r', os.extsep.join([opts.output, 'zip']), opts.output]
proc = subprocess.Popen(zip_command)
proc.communicate()
return proc.returncode
if __name__ == '__main__':
sys.exit(main())
|
453eb83130d9e028cf173b53159c5f545dae5197
|
5f69a6549b8d5e417553d910622e6855b2ae679b
|
/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/comment_cmd.py
|
2c1074666125dafe986be8b272c4922dda8f58eb
|
[
"Apache-2.0"
] |
permissive
|
opendr-eu/opendr
|
822219f709613d77c5eb62c5d02808d344239835
|
b3d6ce670cdf63469fc5766630eb295d67b3d788
|
refs/heads/master
| 2023-08-31T07:02:36.375231
| 2023-08-29T06:39:51
| 2023-08-29T06:39:51
| 293,755,225
| 535
| 82
|
Apache-2.0
| 2023-09-13T16:53:34
| 2020-09-08T08:55:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,921
|
py
|
comment_cmd.py
|
# Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .misc_cmd import MiscCommand
class CommentCommand(MiscCommand):
"""
Class for having comments in the JSON files, and while we are at it, be able to print them during simulation.
"""
def __init__(self, config, callback, last_pose):
"""
Instantiate Comment command object.
:param config: (dict) Configuration dictionary, with parameters:
* "txt": (string) (Optional)[Default: "") Comment
* "print": (bool) (Optional)[Default: True) Print the contents of "txt" during
simulation if True.
:param callback: (callable) Function, lambda or other callable object to be executed
when calling the command object.
:param last_pose: (Pose) Last pose of the robot before this command. Unused.
"""
super(CommentCommand, self).__init__(config, callback, last_pose)
if 'print' in config:
do_print = bool(config['print'])
else:
do_print = True
if 'txt' in config:
self.msg = config['txt']
else:
self.msg = ""
if self.msg and do_print:
self.do_print = True
else:
self.do_print = False
|
f9cb277343ab73306e9efe699313a914ef8fd659
|
88cbc17731050b425f197ac7766c6156d5823ca8
|
/flake8_stripe/flake8_stripe.py
|
500e0f4ade2838e9cd700cdbbceaf38a0de116c8
|
[
"MIT"
] |
permissive
|
stripe/stripe-python
|
011fe719f4305302013b360b76d2af3a0d5b3bf9
|
85066908ed1e638574351da1a1b260291ea0f8a1
|
refs/heads/master
| 2023-08-31T23:47:53.826885
| 2023-08-31T20:25:09
| 2023-08-31T20:25:09
| 1,953,389
| 1,396
| 465
|
MIT
| 2023-09-14T18:12:54
| 2011-06-25T19:53:04
|
Python
|
UTF-8
|
Python
| false
| false
| 4,256
|
py
|
flake8_stripe.py
|
# Hint: if you're developing this plugin, test changes with:
# venv/bin/tox -e lint -r
# so that tox re-installs the plugin from the local directory
import ast
from typing import Iterator, Tuple
class TypingImportsChecker:
name = __name__
version = "0.1.0"
# Rules:
# * typing_extensions v4.1.1 is the latest that supports Python 3.6
# so don't depend on anything from a more recent version than that.
#
# If we need something newer, maybe we can provide it for users on
# newer versions with a conditional import, but we'll cross that
# bridge when we come to it.
# If a symbol exists in both `typing` and `typing_extensions`, which
# should you use? Prefer `typing_extensions` if the symbol available there.
# in 4.1.1. In typing_extensions 4.7.0, `typing_extensions` started re-exporting
# EVERYTHING from `typing` but this is not the case in v4.1.1.
allowed_typing_extensions_imports = [
"Literal",
"NoReturn",
"Protocol",
"TYPE_CHECKING",
"Type",
"TypedDict",
"Self",
]
allowed_typing_imports = [
"Any",
"ClassVar",
"Optional",
"TypeVar",
"Union",
"cast",
"overload",
"Dict",
"List",
"Generic",
]
def __init__(self, tree: ast.AST):
self.tree = tree
intersection = set(self.allowed_typing_imports) & set(
self.allowed_typing_extensions_imports
)
if len(intersection) > 0:
raise AssertionError(
"TypingImportsChecker: allowed_typing_imports and allowed_typing_extensions_imports must not overlap. Both entries contained: %s"
% (intersection)
)
def run(self) -> Iterator[Tuple[int, int, str, type]]:
for node in ast.walk(self.tree):
if isinstance(node, ast.ImportFrom):
if node.module == "typing":
for name in node.names:
if name.name not in self.allowed_typing_imports:
msg = None
if (
name.name
in self.allowed_typing_extensions_imports
):
msg = (
"SPY100 Don't import %s from 'typing', instead import from 'typing_extensions'"
% (name.name)
)
else:
msg = (
"SPY101 Importing %s from 'typing' is prohibited. Do you need to add to the allowlist in flake8_stripe.py?"
% (name.name)
)
yield (
name.lineno,
name.col_offset,
msg,
type(self),
)
elif node.module == "typing_extensions":
for name in node.names:
if (
name.name
not in self.allowed_typing_extensions_imports
):
msg = None
if name.name in self.allowed_typing_imports:
msg = (
"SPY102 Don't import '%s' from 'typing_extensions', instead import from 'typing'"
% (name.name)
)
else:
msg = (
"SPY103 Importing '%s' from 'typing_extensions' is prohibited. Do you need to add to the allowlist in flake8_stripe.py?"
% (name.name)
)
yield (
name.lineno,
name.col_offset,
msg,
type(self),
)
|
0f476a903dded4ad308253e5aeb6773ede787de9
|
4c800425b941243c521f0a878c1b12a8f5a50585
|
/deepreg/model/backbone/interface.py
|
36fe4be9511bdf553d8988b0e0061697220dcd36
|
[
"Apache-2.0"
] |
permissive
|
DeepRegNet/DeepReg
|
f7af4554c89a7a40a53bac9f7fc9939402d1110d
|
650a2f1a88ad3c6932be305d6a98a36e26feedc6
|
refs/heads/main
| 2023-04-06T20:40:38.722315
| 2022-05-18T21:52:19
| 2022-05-18T21:52:19
| 269,365,590
| 509
| 78
|
Apache-2.0
| 2023-03-11T12:18:21
| 2020-06-04T13:21:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,834
|
py
|
interface.py
|
from abc import abstractmethod
import tensorflow as tf
class Backbone(tf.keras.Model):
"""
Interface class for backbones.
"""
def __init__(
self,
image_size: tuple,
num_channel_initial: int,
out_kernel_initializer: str,
out_activation: str,
out_channels: int,
name: str = "Backbone",
**kwargs,
):
"""
Init.
:param image_size: (dim1, dim2, dim3), dims of input image.
:param num_channel_initial: number of initial channels, control the network size
:param out_kernel_initializer: kernel initializer for the last layer
:param out_activation: activation at the last layer
:param out_channels: number of channels for the output
:param name: name of the backbone.
:param kwargs: additional arguments.
"""
super().__init__(name=name, **kwargs)
self.image_size = image_size
self.num_channel_initial = num_channel_initial
self.out_kernel_initializer = out_kernel_initializer
self.out_activation = out_activation
self.out_channels = out_channels
@abstractmethod
def call(self, inputs: tf.Tensor, training=None, mask=None):
"""
Forward.
:param inputs: shape = (batch, dim1, dim2, dim3, in_channels)
:param training:
:param mask:
:return:
"""
def get_config(self) -> dict:
"""Return the config dictionary for recreating this class."""
return dict(
image_size=self.image_size,
num_channel_initial=self.num_channel_initial,
out_kernel_initializer=self.out_kernel_initializer,
out_activation=self.out_activation,
out_channels=self.out_channels,
name=self.name,
)
|
5958b90229f4f643d077c2c41ca7a1c32a6a4023
|
56f9e2a097f827c13ccaee56c7dfeb7a1ed2cc4f
|
/stylegan_human/bg_white.py
|
f9bd13169baf5e000599b5b45d22e6d76726518c
|
[
"CC-BY-NC-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
saic-mdal/HiDT
|
ae825455aec994edbc5730f896d39eb27bc5c6e5
|
f023fb99879bb5043650124cc7c14f0a60d3577b
|
refs/heads/main
| 2023-07-25T07:29:48.531637
| 2023-07-24T10:43:10
| 2023-07-24T10:43:10
| 247,065,521
| 669
| 90
|
BSD-3-Clause
| 2022-10-20T19:00:18
| 2020-03-13T12:23:52
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,771
|
py
|
bg_white.py
|
# Copyright (c) SenseTime Research. All rights reserved.
import os
import click
import cv2
import numpy as np
def bg_white(seg, raw, blur_level=3, gaussian=81):
seg = cv2.blur(seg, (blur_level, blur_level))
empty = np.ones_like(seg)
seg_bg = (empty - seg) * 255
seg_bg = cv2.GaussianBlur(seg_bg,(gaussian,gaussian),0)
background_mask = cv2.cvtColor(255 - cv2.cvtColor(seg, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR)
masked_fg = (raw * (1 / 255)) * (seg * (1 / 255))
masked_bg = (seg_bg * (1 / 255)) * (background_mask * (1 / 255))
frame = np.uint8(cv2.add(masked_bg,masked_fg)*255)
return frame
"""
To turn background into white.
Examples:
\b
python bg_white.py --raw_img_dir=./SHHQ-1.0/no_segment/ --raw_seg_dir=./SHHQ-1.0/segments/ \\
--outdir=./SHHQ-1.0/bg_white/
"""
@click.command()
@click.pass_context
@click.option('--raw_img_dir', default="./SHHQ-1.0/no_segment/", help='folder of raw image', required=True)
@click.option('--raw_seg_dir', default='./SHHQ-1.0/segments/', help='folder of segmentation masks', required=True)
@click.option('--outdir', help='Where to save the output images', default= "./SHHQ-1.0/bg_white/" , type=str, required=True, metavar='DIR')
def main(
ctx: click.Context,
raw_img_dir: str,
raw_seg_dir: str,
outdir: str):
os.makedirs(outdir, exist_ok=True)
files = os.listdir(raw_img_dir)
for file in files:
print(file)
raw = cv2.imread(os.path.join(raw_img_dir, file))
seg = cv2.imread(os.path.join(raw_seg_dir, file))
assert raw is not None
assert seg is not None
white_frame = bg_white(seg, raw)
cv2.imwrite(os.path.join(outdir,file), white_frame)
if __name__ == "__main__":
main()
|
4e0fd7cd901e9dca95311bd4a5910935cc5d481e
|
5aaa9a22478723dbafce0238fd84a2aa6994f050
|
/yabox/algorithms/base.py
|
394f56c4592ed0d223fde062a2cc0a893622b69f
|
[
"Apache-2.0"
] |
permissive
|
pablormier/yabox
|
ec75cc32738e16b0466e0f8ab89e670e62546b3c
|
a3ed449edf4349849c598fb1e5fe8e787ed57131
|
refs/heads/master
| 2022-12-09T18:26:10.983040
| 2022-12-06T09:07:39
| 2022-12-06T09:07:39
| 97,233,963
| 141
| 20
|
Apache-2.0
| 2022-12-06T09:07:40
| 2017-07-14T12:57:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,414
|
py
|
base.py
|
# -*- coding: utf-8 -*-
import numpy as np
def binomial_crossover(target, mutant, cr):
n = len(target)
p = np.random.rand(n) < cr
if not np.any(p):
p[np.random.randint(0, n)] = True
return np.where(p, mutant, target)
def random_sample(population, exclude, size=3):
# Optimized version using numpy
idxs = list(range(population.shape[0]))
idxs.remove(exclude)
sample = np.random.choice(idxs, size=size, replace=False)
return population[sample]
def rand1(target_idx, population, f):
a, b, c = random_sample(population, target_idx)
return a + f * (b - c)
def denormalize(min_, diff, matrix):
return min_ + matrix * diff
def random_repair(x):
# Detect the positions where the params is not valid
loc = np.logical_or(x < 0, x > 1)
# Count the number of invalid params
count = np.sum(loc)
# Replace each position where a True appears by a new random number in [0-1]
if count > 0:
np.place(x, loc, np.random.rand(count))
return x
def bound_repair(x):
return np.clip(x, 0, 1)
def random_init(popsize, dimensions):
return np.random.rand(popsize, dimensions)
def dither_from_interval(interval):
low, up = min(interval), max(interval)
if low == up:
return low
return np.random.uniform(low, up)
def dither(*intervals):
return [dither_from_interval(interval) for interval in intervals]
|
2e9bbb9c825311bdc63d821d4a6d016c30240a7a
|
b48f14f38a4c95af0fa07c10c181a607a8c29cd7
|
/src/pynlpir/nlpir.py
|
ac223c54352042580a03d87a41e044210c21bddc
|
[
"MIT"
] |
permissive
|
tsroten/pynlpir
|
7fc3cb67966ea0b12489073d63c545f8d1da13f8
|
515c77dbc79e35aad96d81071af9385c213f0758
|
refs/heads/main
| 2023-08-09T18:58:26.009949
| 2023-07-24T12:10:39
| 2023-07-24T12:10:39
| 18,347,833
| 587
| 153
|
MIT
| 2023-07-24T12:10:40
| 2014-04-01T22:55:19
|
Python
|
UTF-8
|
Python
| false
| false
| 6,495
|
py
|
nlpir.py
|
# -*- coding: utf-8 -*-
"""This module uses :mod:`ctypes` to provide a Python API to NLPIR.
Other than argument names used in this documentation, the functions are left
the same as they are in NLPIR.
When this module is imported, the NLPIR library is imported and the functions
listed below are exported by a :class:`ctypes.CDLL` instance.
There is a less extensive, easier-to-use NLPIR interface directly in the
:mod:`pynlpir` module.
:func:`Init` must be called before any other NLPIR functions can be called.
After using the API, you can call :func:`Exit` to exit the API and free up
allocated memory.
"""
from ctypes import (
c_bool,
c_char,
c_char_p,
c_double,
c_int,
c_uint,
c_ulong,
c_void_p,
cdll,
POINTER,
Structure,
)
import logging
import os
import sys
logger = logging.getLogger("pynlpir.nlpir")
#: The absolute path to this package (used by NLPIR to find its ``Data``
#: directory).
PACKAGE_DIR = os.path.abspath(os.path.dirname(__file__))
#: The absolute path to this path's lib directory.
LIB_DIR = os.path.join(PACKAGE_DIR, "lib")
#: NLPIR's GBK encoding constant.
GBK_CODE = 0
#: NLPIR's UTF-8 encoding constant.
UTF8_CODE = 1
#: NLPIR's BIG5 encoding constant.
BIG5_CODE = 2
#: NLPIR's GBK (Traditional Chinese) encoding constant.
GBK_FANTI_CODE = 3
#: ICTCLAS part of speech constant #2.
ICT_POS_MAP_SECOND = 0
#: ICTCLAS part of speech constant #1.
ICT_POS_MAP_FIRST = 1
#: PKU part of speech constant #2.
PKU_POS_MAP_SECOND = 2
#: PKU part of speech constant #1.
PKU_POS_MAP_FIRST = 3
class ResultT(Structure):
"""The NLPIR ``result_t`` structure."""
_fields_ = [
# The start position of the word in the source Chinese text string.
("start", c_int),
# The detected word's length.
("length", c_int),
# A string representing the word's part of speech.
("sPOS", c_char * 40),
("iPOS", c_int),
("word_ID", c_int),
# If the word is found in the user's dictionary.
("word_type", c_int),
# The weight of the detected word.
("weight", c_int),
]
def load_library(platform, is_64bit, lib_dir=LIB_DIR):
"""Loads the NLPIR library appropriate for the user's system.
This function is called automatically when this module is loaded.
:param str platform: The platform identifier for the user's system.
:param bool is_64bit: Whether or not the user's system is 64-bit.
:param str lib_dir: The directory that contains the library files
(defaults to :data:`LIB_DIR`).
:raises RuntimeError: The user's platform is not supported by NLPIR.
"""
logger.debug("Loading NLPIR library file from '{0}'".format(lib_dir))
if platform.startswith("win") and is_64bit:
lib = os.path.join(lib_dir, "NLPIR64")
logger.debug("Using library file for 64-bit Windows.")
elif platform.startswith("win"):
lib = os.path.join(lib_dir, "NLPIR32")
logger.debug("Using library file for 32-bit Windows.")
elif platform.startswith("linux") and is_64bit:
lib = os.path.join(lib_dir, "libNLPIR64.so")
logger.debug("Using library file for 64-bit GNU/Linux.")
elif platform.startswith("linux"):
lib = os.path.join(lib_dir, "libNLPIR32.so")
logger.debug("Using library file for 32-bit GNU/Linux.")
elif platform == "darwin":
lib = os.path.join(lib_dir, "libNLPIRios.so")
logger.debug("Using library file for OSX/iOS.")
else:
raise RuntimeError("Platform '{0}' is not supported by NLPIR.".format(platform))
lib_nlpir = cdll.LoadLibrary(lib)
logger.debug("NLPIR library file '{0}' loaded.".format(lib))
return lib_nlpir
is_64bit = sys.maxsize > 2**32
#: A :class:`ctypes.CDLL` instance for the NLPIR API library.
libNLPIR = load_library(sys.platform, is_64bit) # noqa: N816
def get_func(name, argtypes=None, restype=c_int, lib=libNLPIR):
"""Retrieves the corresponding NLPIR function.
:param str name: The name of the NLPIR function to get.
:param list argtypes: A list of :mod:`ctypes` data types that correspond
to the function's argument types.
:param restype: A :mod:`ctypes` data type that corresponds to the
function's return type (only needed if the return type isn't
:class:`ctypes.c_int`).
:param lib: A :class:`ctypes.CDLL` instance for the NLPIR API library where
the function will be retrieved from (defaults to :data:`libNLPIR`).
:returns: The exported function. It can be called like any other Python
callable.
"""
logger.debug(
"Getting NLPIR API function: 'name': '{0}', 'argtypes': '{1}',"
" 'restype': '{2}'.".format(name, argtypes, restype)
)
func = getattr(lib, name)
if argtypes is not None:
func.argtypes = argtypes
if restype is not c_int:
func.restype = restype
logger.debug("NLPIR API function '{0}' retrieved.".format(name))
return func
# Get the exported NLPIR API functions.
Init = get_func("NLPIR_Init", [c_char_p, c_int, c_char_p], c_bool)
Exit = get_func("NLPIR_Exit", restype=c_bool)
ParagraphProcess = get_func("NLPIR_ParagraphProcess", [c_char_p, c_int], c_char_p)
ParagraphProcessA = get_func(
"NLPIR_ParagraphProcessA", [c_char_p, c_void_p, c_bool], POINTER(ResultT)
)
FileProcess = get_func("NLPIR_FileProcess", [c_char_p, c_char_p, c_int], c_double)
ImportUserDict = get_func("NLPIR_ImportUserDict", [c_char_p], c_uint)
AddUserWord = get_func("NLPIR_AddUserWord", [c_char_p])
SaveTheUsrDic = get_func("NLPIR_SaveTheUsrDic")
DelUsrWord = get_func("NLPIR_DelUsrWord", [c_char_p])
GetKeyWords = get_func("NLPIR_GetKeyWords", [c_char_p, c_int, c_bool], c_char_p)
GetFileKeyWords = get_func("NLPIR_GetFileKeyWords", [c_char_p, c_int, c_bool], c_char_p)
GetNewWords = get_func("NLPIR_GetNewWords", [c_char_p, c_int, c_bool], c_char_p)
GetFileNewWords = get_func("NLPIR_GetFileNewWords", [c_char_p, c_int, c_bool], c_char_p)
FingerPrint = get_func("NLPIR_FingerPrint", [c_char_p], c_ulong)
SetPOSmap = get_func("NLPIR_SetPOSmap", [c_int])
NWI_Start = get_func("NLPIR_NWI_Start", None, c_bool)
NWI_AddFile = get_func("NLPIR_NWI_AddFile", [c_char_p], c_bool)
NWI_AddMem = get_func("NLPIR_NWI_AddMem", [c_char_p], c_bool)
NWI_Complete = get_func("NLPIR_NWI_Complete", None, c_bool)
NWI_GetResult = get_func("NLPIR_NWI_GetResult", [c_bool], c_char_p)
NWI_Result2UserDict = get_func("NLPIR_NWI_Result2UserDict", None, c_bool)
|
4a6a824f35c487043ea0af0825544cbcf5f4e767
|
ae575a6207acf1bae6252aac20f7934c87e55e44
|
/wechat_django/pay/admin/payapp.py
|
ed25397d2cc3ec1bd6b198eaa452a0d338c5cfa2
|
[
"MIT"
] |
permissive
|
Xavier-Lam/wechat-django
|
57c28c189f7e443dc94a7a8393d8597f4610bb34
|
526e2d9d261fde8279314cf30fb70dbeb439d943
|
refs/heads/master
| 2023-08-17T02:25:46.025887
| 2023-08-15T02:05:49
| 2023-08-15T02:05:49
| 171,990,883
| 185
| 62
|
MIT
| 2023-08-15T02:05:50
| 2019-02-22T03:51:44
|
Python
|
UTF-8
|
Python
| false
| false
| 4,378
|
py
|
payapp.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from wechat_django.admin.base import has_wechat_permission
from wechat_django.admin.wechatapp import WeChatAppAdmin
from wechat_django.constants import AppType
from wechat_django.models import WeChatApp
from ..models import WeChatPay
class WeChatPayForm(forms.ModelForm):
clear_certs = forms.BooleanField(
label=_("clear certs"), initial=False, required=False,
help_text=_("Your mch_cert already uploaded"))
_mch_cert = forms.FileField(
label=_("mch_cert"), required=False, help_text=_("商户证书"))
_mch_key = forms.FileField(
label=_("mch_key"), required=False, help_text=_("商户证书私钥"))
class Meta(object):
model = WeChatPay
fields = (
"title", "name", "weight", "mch_id", "api_key", "sub_mch_id",
"mch_app_id", "_mch_cert", "_mch_key", "clear_certs")
widgets = dict(
api_key=forms.PasswordInput(render_value=True),
)
def __init__(self, *args, **kwargs):
super(WeChatPayForm, self).__init__(*args, **kwargs)
# 处理字段
inst = self.instance
if inst.pk:
self._readonly_field("name")
self._readonly_field("mch_id")
if not inst.mch_app_id:
self._remove_field("sub_mch_id")
self._remove_field("mch_app_id")
if inst.pk and inst.mch_cert and inst.mch_key:
self._remove_field("_mch_cert")
self._remove_field("_mch_key")
else:
self._remove_field("clear_certs")
def _remove_field(self, field):
self.fields[field].widget = forms.widgets.HiddenInput()
self.fields[field].disabled = True
def _readonly_field(self, field):
self.fields[field].disabled = True
def clean__mch_cert(self):
file = self.cleaned_data.get("_mch_cert")
if file:
return file.read()
return None
def clean__mch_key(self):
file = self.cleaned_data.get("_mch_key")
if file:
return file.read()
return None
def clean(self):
rv = super(WeChatPayForm, self).clean()
mch_cert = rv.get("_mch_cert")
mch_key = rv.get("_mch_key")
if (mch_cert or mch_key) and not (mch_cert and mch_key):
self.add_error(
"_mch_cert", _("must upload both mch_cert and mch_key"))
return rv
def _post_clean(self):
super(WeChatPayForm, self)._post_clean()
# 处理证书
if self.cleaned_data.get("clear_certs"):
self.instance.mch_cert = None
self.instance.mch_key = None
if self.cleaned_data.get("_mch_cert"):
self.instance.mch_cert = self.cleaned_data.pop("_mch_cert")
if self.cleaned_data.get("_mch_key"):
self.instance.mch_key = self.cleaned_data.pop("_mch_key")
class WeChatPayInline(admin.StackedInline):
form = WeChatPayForm
model = WeChatPay
def get_extra(self, request, obj=None):
return 0 if obj.pay else 1
admin.site.unregister(WeChatApp)
@admin.register(WeChatApp)
class WeChatAppWithPayAdmin(WeChatAppAdmin):
inlines = (WeChatPayInline,)
def get_inline_instances(self, request, obj=None):
rv = super(WeChatAppWithPayAdmin, self).get_inline_instances(request,
obj)
if not obj\
or not obj.type & (AppType.SERVICEAPP
| AppType.MINIPROGRAM
| AppType.PAYPARTNER)\
or not has_wechat_permission(request, obj, "pay", "manage"):
rv = tuple(filter(lambda o: not isinstance(o, WeChatPayInline),
rv))
return rv
def get_deleted_objects(self, objs, request):
from ..models import UnifiedOrder
deleted_objects, model_count, perms_needed, protected =\
super(WeChatAppWithPayAdmin, self).get_deleted_objects(objs, request)
ignored_models = (UnifiedOrder._meta.verbose_name,)
perms_needed = perms_needed.difference(ignored_models)
return deleted_objects, model_count, perms_needed, protected
|
3c12a7138de6cd348c0eed7157a5428b45e91576
|
6d652aa802d90571a640ac0b538ff3055d0e34c5
|
/i3pystatus/updates/yaourt.py
|
8855f3169970be8e72dd718e8ac4247d10560ecb
|
[
"MIT"
] |
permissive
|
enkore/i3pystatus
|
38eaea8203ed309ff90e1717bd3a9075f12590b0
|
0820dd4e3d479dddec7797b2ea9a83da0f62b7cf
|
refs/heads/current
| 2023-08-18T11:36:18.296269
| 2023-04-25T20:56:08
| 2023-04-25T20:56:08
| 8,130,605
| 438
| 244
|
MIT
| 2023-08-13T12:13:33
| 2013-02-11T01:01:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
yaourt.py
|
from i3pystatus.core.command import run_through_shell
from i3pystatus.updates import Backend
class Yaourt(Backend):
"""
This module counts the available updates using yaourt.
By default it will only count aur packages. Thus it can be used with the
pacman backend like this:
.. code-block:: python
from i3pystatus.updates import pacman, yaourt
status.register("updates", backends = \
[pacman.Pacman(), yaourt.Yaourt()])
To count both pacman and aur packages, pass False in the constructor:
.. code-block:: python
from i3pystatus.updates import yaourt
status.register("updates", backends = [yaourt.Yaourt(False)])
"""
def __init__(self, aur_only=True):
self.aur_only = aur_only
@property
def updates(self):
command = ["yaourt", "-Qua"]
checkupdates = run_through_shell(command)
out = checkupdates.out
if(self.aur_only):
out = "".join([line for line in out.splitlines(True)
if line.startswith("aur")])
return out.count("\n"), out
Backend = Yaourt
if __name__ == "__main__":
"""
Call this module directly; Print the update count and notification body.
"""
print("Updates: {}\n\n{}".format(*Backend().updates))
|
9c6a8466a061f4c33141878cde3ff3a82eb03534
|
5dc77586e3e0f9de1f032fd2ca68494d8e58928f
|
/tests/cli/test_init_sqlite.py
|
083d48e1553929b14598efd118d1721ca0fdf20e
|
[
"Apache-2.0"
] |
permissive
|
great-expectations/great_expectations
|
dd7c22e6277d6b08bee3ff38a015e6e8cd434df6
|
b0290e2fd2aa05aec6d7d8871b91cb4478e9501d
|
refs/heads/develop
| 2023-09-04T09:30:26.395518
| 2023-09-02T00:00:13
| 2023-09-02T00:00:13
| 103,071,520
| 8,931
| 1,535
|
Apache-2.0
| 2023-09-14T19:57:16
| 2017-09-11T00:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 22,782
|
py
|
test_init_sqlite.py
|
import os
import re
import shutil
import pytest
from click.testing import CliRunner
from freezegun import freeze_time
from great_expectations.cli import cli
from great_expectations.data_context.data_context.file_data_context import (
FileDataContext,
)
from great_expectations.data_context.util import file_relative_path
from great_expectations.util import gen_directory_tree_str, get_context
from tests.cli.test_cli import yaml
from tests.cli.test_init_pandas import _delete_and_recreate_dir
from tests.cli.utils import assert_no_logging_messages_or_tracebacks
try:
from unittest import mock
except ImportError:
from unittest import mock
pytestmark = pytest.mark.cli
@pytest.fixture
def titanic_sqlite_db_file(sa, tmp_path_factory):
temp_dir = str(tmp_path_factory.mktemp("foo_path"))
fixture_db_path = file_relative_path(__file__, "../test_sets/titanic.db")
db_path = os.path.join(temp_dir, "titanic.db") # noqa: PTH118
shutil.copy(fixture_db_path, db_path)
engine = sa.create_engine(f"sqlite:///{db_path}", pool_recycle=3600)
with engine.connect() as connection:
assert connection.execute(sa.text("select count(*) from titanic")).fetchall()[
0
] == (1313,)
return db_path
@pytest.mark.xfail(
reason="This command is not yet implemented for the modern API",
run=True,
strict=True,
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
@freeze_time("09/26/2019 13:42:41")
def test_cli_init_on_new_project(
mock_webbrowser, caplog, monkeypatch, tmp_path_factory, titanic_sqlite_db_file, sa
):
project_dir = str(tmp_path_factory.mktemp("test_cli_init_diff"))
ge_dir = os.path.join(project_dir, FileDataContext.GX_DIR) # noqa: PTH118
database_path = os.path.join(project_dir, "titanic.db") # noqa: PTH118
shutil.copy(titanic_sqlite_db_file, database_path)
engine = sa.create_engine(f"sqlite:///{database_path}", pool_recycle=3600)
inspector = sa.inspect(engine)
# get the default schema and table for testing
schemas = inspector.get_schema_names()
default_schema = schemas[0]
tables = [
table_name for table_name in inspector.get_table_names(schema=default_schema)
]
default_table = tables[0]
runner = CliRunner(mix_stderr=False)
monkeypatch.chdir(project_dir)
result = runner.invoke(
cli,
["init"],
input=f"\n\n2\n6\ntitanic\n{engine.url}\n\n\n1\n{default_schema}\n{default_table}\nwarning\n\n\n\n",
catch_exceptions=False,
)
stdout = result.output
assert len(stdout) < 6000, "CLI output is unreasonably long."
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert "Which database backend are you using" in stdout
assert "Give your new Datasource a short name" in stdout
assert "What is the url/connection string for the sqlalchemy connection" in stdout
assert "Attempting to connect to your database." in stdout
assert "Great Expectations connected to your database" in stdout
assert (
"You have selected a datasource that is a SQL database. How would you like to specify the data?"
in stdout
)
assert "Name the new Expectation Suite [main.titanic.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations about them"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "Data Docs" in stdout
assert "Great Expectations is now set up" in stdout
context = get_context(context_root_dir=ge_dir)
assert len(context.list_datasources()) == 1
assert context.list_datasources()[0]["class_name"] == "SqlAlchemyDatasource"
assert context.list_datasources()[0]["name"] == "titanic"
first_suite = context.list_expectation_suites()[0]
suite = context.get_expectation_suite(first_suite.expectation_suite_name)
assert len(suite.expectations) == 14
assert os.path.isdir(ge_dir) # noqa: PTH112
config_path = os.path.join( # noqa: PTH118
project_dir, "great_expectations/great_expectations.yml"
)
assert os.path.isfile(config_path) # noqa: PTH113
config = yaml.load(open(config_path))
data_source_class = config["datasources"]["titanic"]["data_asset_type"][
"class_name"
]
assert data_source_class == "SqlAlchemyDataset"
obs_tree = gen_directory_tree_str(ge_dir)
# Instead of monkey patching guids, just regex out the guids
guid_safe_obs_tree = re.sub(
r"[a-z0-9]{32}(?=\.(json|html))", "foobarbazguid", obs_tree
)
# print(guid_safe_obs_tree)
assert (
guid_safe_obs_tree
== """\
great_expectations/
.gitignore
great_expectations.yml
checkpoints/
expectations/
.ge_store_backend_id
warning.json
plugins/
custom_data_docs/
renderers/
styles/
data_docs_custom_styles.css
views/
profilers/
uncommitted/
config_variables.yml
data_docs/
local_site/
index.html
expectations/
warning.html
static/
fonts/
HKGrotesk/
HKGrotesk-Bold.otf
HKGrotesk-BoldItalic.otf
HKGrotesk-Italic.otf
HKGrotesk-Light.otf
HKGrotesk-LightItalic.otf
HKGrotesk-Medium.otf
HKGrotesk-MediumItalic.otf
HKGrotesk-Regular.otf
HKGrotesk-SemiBold.otf
HKGrotesk-SemiBoldItalic.otf
images/
favicon.ico
glossary_scroller.gif
iterative-dev-loop.png
logo-long-vector.svg
logo-long.png
short-logo-vector.svg
short-logo.png
validation_failed_unexpected_values.gif
styles/
data_docs_custom_styles_template.css
data_docs_default_styles.css
validations/
warning/
20190926T134241.000000Z/
20190926T134241.000000Z/
foobarbazguid.html
validations/
.ge_store_backend_id
warning/
20190926T134241.000000Z/
20190926T134241.000000Z/
foobarbazguid.json
"""
)
assert_no_logging_messages_or_tracebacks(caplog, result)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/warning/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
@pytest.mark.xfail(
reason="This command is not yet implemented for the modern API",
run=True,
strict=True,
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_cli_init_on_new_project_extra_whitespace_in_url(
mock_webbrowser, caplog, monkeypatch, tmp_path_factory, titanic_sqlite_db_file, sa
):
project_dir = str(tmp_path_factory.mktemp("test_cli_init_diff"))
ge_dir = os.path.join(project_dir, FileDataContext.GX_DIR) # noqa: PTH118
database_path = os.path.join(project_dir, "titanic.db") # noqa: PTH118
shutil.copy(titanic_sqlite_db_file, database_path)
engine = sa.create_engine(f"sqlite:///{database_path}", pool_recycle=3600)
engine_url_with_added_whitespace = " " + str(engine.url) + " "
inspector = sa.inspect(engine)
# get the default schema and table for testing
schemas = inspector.get_schema_names()
default_schema = schemas[0]
tables = [
table_name for table_name in inspector.get_table_names(schema=default_schema)
]
default_table = tables[0]
runner = CliRunner(mix_stderr=False)
monkeypatch.chdir(project_dir)
result = runner.invoke(
cli,
["init"],
input=f"\n\n2\n6\ntitanic\n{engine_url_with_added_whitespace}\n\n\n1\n{default_schema}\n{default_table}\nwarning\n\n\n\n",
catch_exceptions=False,
)
stdout = result.output
assert len(stdout) < 6000, "CLI output is unreasonably long."
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert "Which database backend are you using" in stdout
assert "Give your new Datasource a short name" in stdout
assert "What is the url/connection string for the sqlalchemy connection" in stdout
assert "Attempting to connect to your database." in stdout
assert "Great Expectations connected to your database" in stdout
assert (
"You have selected a datasource that is a SQL database. How would you like to specify the data?"
in stdout
)
assert "Name the new Expectation Suite [main.titanic.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations about them"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "Data Docs" in stdout
assert "Great Expectations is now set up" in stdout
context = get_context(context_root_dir=ge_dir)
assert len(context.list_datasources()) == 1
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "titanic",
"module_name": "great_expectations.datasource",
"credentials": {"url": str(engine.url)},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
first_suite = context.list_expectation_suites()[0]
suite = context.get_expectation_suite(first_suite.expectation_suite_name)
assert len(suite.expectations) == 14
assert os.path.isdir(ge_dir) # noqa: PTH112
config_path = os.path.join( # noqa: PTH118
project_dir, "great_expectations/great_expectations.yml"
)
assert os.path.isfile(config_path) # noqa: PTH113
config = yaml.load(open(config_path))
data_source_class = config["datasources"]["titanic"]["data_asset_type"][
"class_name"
]
assert data_source_class == "SqlAlchemyDataset"
assert_no_logging_messages_or_tracebacks(caplog, result)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/warning/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
@pytest.mark.xfail(
reason="This command is not yet implemented for the modern API",
run=True,
strict=True,
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_no_datasources_should_continue_init_flow_and_add_one(
mock_webbrowser,
caplog,
monkeypatch,
initialized_sqlite_project,
titanic_sqlite_db_file,
sa,
):
project_dir = initialized_sqlite_project
ge_dir = os.path.join(project_dir, FileDataContext.GX_DIR) # noqa: PTH118
_remove_all_datasources(ge_dir)
os.remove( # noqa: PTH107
os.path.join(ge_dir, "expectations", "warning.json") # noqa: PTH118
)
context = get_context(context_root_dir=ge_dir)
assert not context.list_expectation_suites()
runner = CliRunner(mix_stderr=False)
monkeypatch.chdir(project_dir)
url = f"sqlite:///{titanic_sqlite_db_file}"
inspector = sa.inspect(sa.create_engine(url))
# get the default schema and table for testing
schemas = inspector.get_schema_names()
default_schema = schemas[0]
tables = [
table_name for table_name in inspector.get_table_names(schema=default_schema)
]
default_table = tables[0]
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["init"],
input=f"\n\n2\n6\nsqlite\n{url}\n\n\n1\n{default_schema}\n{default_table}\nmy_suite\n\n\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/my_suite/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert (
"Next, we will configure database credentials and store them in the `sqlite` section"
in stdout
)
assert "What is the url/connection string for the sqlalchemy connection?" in stdout
assert (
"You have selected a datasource that is a SQL database. How would you like to specify the data?"
in stdout
)
assert "Great Expectations connected to your database" in stdout
assert "This looks like an existing project that" not in stdout
config = _load_config_file(
os.path.join(ge_dir, FileDataContext.GX_YML) # noqa: PTH118
)
assert "sqlite" in config["datasources"].keys()
context = get_context(context_root_dir=ge_dir)
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "sqlite",
"module_name": "great_expectations.datasource",
"credentials": {"url": url},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
assert context.list_expectation_suites()[0].expectation_suite_name == "my_suite"
assert len(context.list_expectation_suites()) == 1
assert_no_logging_messages_or_tracebacks(caplog, result)
def _remove_all_datasources(ge_dir):
config_path = os.path.join(ge_dir, FileDataContext.GX_YML) # noqa: PTH118
config = _load_config_file(config_path)
config["datasources"] = {}
with open(config_path, "w") as f:
yaml.dump(config, f)
context = get_context(context_root_dir=ge_dir)
assert context.list_datasources() == []
def _load_config_file(config_path):
assert os.path.isfile( # noqa: PTH113
config_path
), "Config file is missing. Check path"
with open(config_path) as f:
read = f.read()
config = yaml.load(read)
assert isinstance(config, dict)
return config
@pytest.fixture
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def initialized_sqlite_project(
mock_webbrowser, caplog, monkeypatch, tmp_path_factory, titanic_sqlite_db_file, sa
):
"""This is an initialized project through the CLI."""
project_dir = str(tmp_path_factory.mktemp("my_rad_project"))
engine = sa.create_engine(f"sqlite:///{titanic_sqlite_db_file}", pool_recycle=3600)
inspector = sa.inspect(engine)
# get the default schema and table for testing
schemas = inspector.get_schema_names()
default_schema = schemas[0]
tables = [
table_name for table_name in inspector.get_table_names(schema=default_schema)
]
default_table = tables[0]
runner = CliRunner(mix_stderr=False)
monkeypatch.chdir(project_dir)
result = runner.invoke(
cli,
["init"],
input=f"\n\n2\n6\ntitanic\n{engine.url}\n\n\n1\n{default_schema}\n{default_table}\nwarning\n\n\n\n",
catch_exceptions=False,
)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
f"{project_dir}/great_expectations/uncommitted/data_docs/local_site/validations/warning/"
in mock_webbrowser.call_args[0][0]
)
assert_no_logging_messages_or_tracebacks(caplog, result)
context = get_context(
context_root_dir=os.path.join( # noqa: PTH118
project_dir, FileDataContext.GX_DIR
)
)
assert isinstance(context, FileDataContext)
assert len(context.list_datasources()) == 1
assert context.list_datasources() == [
{
"class_name": "SqlAlchemyDatasource",
"name": "titanic",
"module_name": "great_expectations.datasource",
"credentials": {"url": str(engine.url)},
"data_asset_type": {
"class_name": "SqlAlchemyDataset",
"module_name": "great_expectations.dataset",
},
}
]
return project_dir
@pytest.mark.xfail(
reason="This command is not yet implemented for the modern API",
run=True,
strict=True,
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_datasource_with_existing_suite_offer_to_build_docs_answer_no(
mock_webbrowser,
caplog,
monkeypatch,
initialized_sqlite_project,
):
project_dir = initialized_sqlite_project
runner = CliRunner(mix_stderr=False)
monkeypatch.chdir(project_dir)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["init"],
input="n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 0
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "This looks like an existing project that" in stdout
assert "appears complete" in stdout
assert "Would you like to build & view this project's Data Docs" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
@pytest.mark.xfail(
reason="This command is not yet implemented for the modern API",
run=True,
strict=True,
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_datasource_with_existing_suite_offer_to_build_docs_answer_yes(
mock_webbrowser,
caplog,
monkeypatch,
initialized_sqlite_project,
):
project_dir = initialized_sqlite_project
runner = CliRunner(mix_stderr=False)
monkeypatch.chdir(project_dir)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["init"],
input="\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
f"{project_dir}/great_expectations/uncommitted/data_docs/local_site/index.html"
in mock_webbrowser.call_args[0][0]
)
assert "Error: invalid input" not in stdout
assert "Always know what to expect from your data" in stdout
assert "This looks like an existing project that" in stdout
assert "appears complete" in stdout
assert "Would you like to build & view this project's Data Docs" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
@pytest.mark.xfail(
reason="This command is not yet implemented for the modern API",
run=True,
strict=True,
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_init_on_existing_project_with_datasource_with_no_suite_create_one(
mock_webbrowser, caplog, monkeypatch, initialized_sqlite_project, sa
):
project_dir = initialized_sqlite_project
ge_dir = os.path.join(project_dir, FileDataContext.GX_DIR) # noqa: PTH118
uncommitted_dir = os.path.join(ge_dir, "uncommitted") # noqa: PTH118
# mangle the setup to remove all traces of any suite
expectations_dir = os.path.join(ge_dir, "expectations") # noqa: PTH118
data_docs_dir = os.path.join(uncommitted_dir, "data_docs") # noqa: PTH118
validations_dir = os.path.join(uncommitted_dir, "validations") # noqa: PTH118
_delete_and_recreate_dir(expectations_dir)
_delete_and_recreate_dir(data_docs_dir)
_delete_and_recreate_dir(validations_dir)
context = get_context(context_root_dir=ge_dir)
# get the datasource from data context
all_datasources = context.list_datasources()
datasource = all_datasources[0] if all_datasources else None
# create a sqlalchemy engine using the URL of existing datasource
engine = sa.create_engine(datasource.get("credentials", {}).get("url"))
inspector = sa.inspect(engine)
# get the default schema and table for testing
schemas = inspector.get_schema_names()
default_schema = schemas[0]
tables = [
table_name for table_name in inspector.get_table_names(schema=default_schema)
]
default_table = tables[0]
assert context.list_expectation_suites() == []
runner = CliRunner(mix_stderr=False)
monkeypatch.chdir(project_dir)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["init"],
input=f"\n1\n{default_schema}\n{default_table}\nsink_me\n\n\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/sink_me/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
assert "Always know what to expect from your data" in stdout
assert (
"You have selected a datasource that is a SQL database. How would you like to specify the data?"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "The following Data Docs sites will be built" in stdout
assert "Great Expectations is now set up" in stdout
assert "Error: invalid input" not in stdout
assert "This looks like an existing project that" not in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
context = get_context(context_root_dir=ge_dir)
assert len(context.list_expectation_suites()) == 1
|
bb644856bada2c88ca04f9360f67ecd392ea8f91
|
c1b77c0b1630c2e319e7ba7782a744f4ac867f7d
|
/test/likelihoods/test_beta_likelihood.py
|
52b2135a457e5140598d3b428d2f232a16f92ec4
|
[
"MIT",
"Python-2.0"
] |
permissive
|
cornellius-gp/gpytorch
|
6b9ab969b2888fa7f27f236a1b20041f00cc0253
|
5e93d2c04ac0634a7aeea9fd964be529bb250888
|
refs/heads/master
| 2023-08-31T21:13:02.741585
| 2023-08-25T19:24:53
| 2023-08-25T19:24:53
| 93,868,719
| 3,182
| 578
|
MIT
| 2023-09-13T01:06:00
| 2017-06-09T14:48:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,822
|
py
|
test_beta_likelihood.py
|
#!/usr/bin/env python3
import unittest
import torch
from gpytorch.likelihoods import _OneDimensionalLikelihood, BetaLikelihood
from gpytorch.test.base_likelihood_test_case import BaseLikelihoodTestCase
class TestBetaLikelihood(BaseLikelihoodTestCase, unittest.TestCase):
seed = 1
def _create_targets(self, batch_shape=torch.Size([])):
res = torch.sigmoid(torch.randn(*batch_shape, 5)).float()
return res
def create_likelihood(self, **kwargs):
return BetaLikelihood(**kwargs)
def _test_log_marginal(self, batch_shape):
likelihood = self.create_likelihood()
input = self._create_marginal_input(batch_shape)
target = self._create_targets(batch_shape)
output = likelihood.log_marginal(target, input)
self.assertTrue(torch.is_tensor(output))
self.assertEqual(output.shape, batch_shape + torch.Size([5]))
default_log_prob = _OneDimensionalLikelihood.log_marginal(likelihood, target, input)
self.assertAllClose(output.sum(-1), default_log_prob.sum(-1), rtol=0.25, atol=0.1)
def _test_log_prob(self, batch_shape):
likelihood = self.create_likelihood()
input = self._create_marginal_input(batch_shape)
target = self._create_targets(batch_shape)
output = likelihood.expected_log_prob(target, input)
self.assertTrue(torch.is_tensor(output))
self.assertEqual(output.shape, batch_shape + torch.Size([5]))
default_log_prob = _OneDimensionalLikelihood.expected_log_prob(likelihood, target, input)
self.assertAllClose(output.sum(-1), default_log_prob.sum(-1), rtol=0.25, atol=0.1)
def _test_marginal(self, batch_shape):
# Likelihood uses the default marginal behavior, no point testing a set of samples against a set of samples.
pass
|
1133c05fb41f10921c102c7fc64f5c7297a67fc9
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/design-tic-tac-toe.py
|
4e0cb9b3f2adbc36c0d00cae949d7ffb70423574
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 1,385
|
py
|
design-tic-tac-toe.py
|
# Time: O(1), per move.
# Space: O(n^2)
class TicTacToe(object):
def __init__(self, n):
"""
Initialize your data structure here.
:type n: int
"""
self.__size = n
self.__rows = [[0, 0] for _ in xrange(n)]
self.__cols = [[0, 0] for _ in xrange(n)]
self.__diagonal = [0, 0]
self.__anti_diagonal = [0, 0]
def move(self, row, col, player):
"""
Player {player} makes a move at ({row}, {col}).
@param row The row of the board.
@param col The column of the board.
@param player The player, can be either 1 or 2.
@return The current winning condition, can be either:
0: No one wins.
1: Player 1 wins.
2: Player 2 wins.
:type row: int
:type col: int
:type player: int
:rtype: int
"""
i = player - 1
self.__rows[row][i] += 1
self.__cols[col][i] += 1
if row == col:
self.__diagonal[i] += 1
if col == len(self.__rows) - row - 1:
self.__anti_diagonal[i] += 1
if any(self.__rows[row][i] == self.__size,
self.__cols[col][i] == self.__size,
self.__diagonal[i] == self.__size,
self.__anti_diagonal[i] == self.__size):
return player
return 0
|
959a1defd2f48049db9467dba7d0161c0d766e81
|
40282fc3afc28166ce01cdf2240d445a1930f2b0
|
/docs/sections/section10/solutions/sol_2b.py
|
875dd7e78708f85d6d7f748777d7dce92d33b67a
|
[
"MIT"
] |
permissive
|
Harvard-IACS/2020-CS109A
|
7dac61f88aefe9647fe7e3eabb3dc6ef85cc8d73
|
665100fec24309edb818a51bc8c29db2912d370f
|
refs/heads/master
| 2022-07-31T18:05:47.127653
| 2021-11-17T22:30:00
| 2021-11-17T22:30:00
| 287,811,847
| 114
| 123
|
MIT
| 2022-05-04T06:26:14
| 2020-08-15T19:28:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 800
|
py
|
sol_2b.py
|
kernel_weight = 0.03
bias_weight = 0.03
model_iris_l1 = models.Sequential([
layers.Input(shape = (4,)),
layers.Dense(32, activation='relu', kernel_regularizer=regularizers.l1(kernel_weight), bias_regularizer=regularizers.l2(bias_weight)),
layers.Dense(32, activation='relu', kernel_regularizer=regularizers.l1(kernel_weight), bias_regularizer=regularizers.l2(bias_weight)),
layers.Dense(3, activation = 'softmax')
])
model_iris_l1.compile(
loss='sparse_categorical_crossentropy',
optimizer=optimizers.Adam(0.005),
metrics=['accuracy'],
)
iris_trained_l1 = model_iris_l1.fit(
x = X_train_iris.to_numpy(), y = y_train_iris.to_numpy(), verbose=0,
epochs=1000, validation_data= (X_test_iris.to_numpy(), y_test_iris.to_numpy()),
)
plot_accuracy_loss_rolling(iris_trained_l1)
|
cddea924b407eedb5e8d88a75b34844796d7cace
|
bd2997cdba05ed3ec73c0c4abc765da7c132fa47
|
/transformer/batch.py
|
931391ad651874469a20ade23d5bf2bc392486a2
|
[] |
no_license
|
scoutbee/pytorch-nlp-notebooks
|
77119b4366c794d779577211282b8dc29063cb49
|
1e1cb88db61fcec1fba2b3ae862cf4cd81138d09
|
refs/heads/develop
| 2020-04-28T04:44:29.561198
| 2019-11-18T07:43:49
| 2019-11-18T07:43:49
| 174,991,743
| 438
| 181
| null | 2019-11-22T08:14:08
| 2019-03-11T12:05:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,020
|
py
|
batch.py
|
from collections import Counter
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from nltk.tokenize import wordpunct_tokenize
from torch import optim
from torch.autograd import Variable
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader, Subset
def tokenize(text):
"""Turn text into discrete tokens.
Remove tokens that are not words.
"""
text = text.lower()
tokens = wordpunct_tokenize(text)
# Only keep words
tokens = [token for token in tokens
if all(char.isalpha() for char in token)]
return tokens
class EnglishFrenchTranslations(Dataset):
def __init__(self, path, max_vocab, max_seq_len):
self.max_vocab = max_vocab
# Extra tokens to add
self.padding_token = '<PAD>'
self.start_of_sequence_token = '<SOS>'
self.end_of_sequence_token = '<EOS>'
self.unknown_word_token = '<UNK>'
self.max_seq_len = max_seq_len
# Helper function
self.flatten = lambda x: [sublst for lst in x for sublst in lst]
# Load the data into a DataFrame
df = pd.read_csv(path, names=['english', 'french'], sep='\t')
# filter out too long sequences
df = self.filter_seq_len(df, max_len=self.max_seq_len)
# Tokenize inputs (English) and targets (French)
self.tokenize_df(df)
# To reduce computational complexity, replace rare words with <UNK>
self.replace_rare_tokens(df)
# Prepare variables with mappings of tokens to indices
self.create_token2idx(df)
# Remove sequences with mostly <UNK>
df = self.remove_mostly_unk(df)
# Every sequence (input and target) should start with <SOS>
# and end with <EOS>
self.add_start_and_end_to_tokens(df)
# Convert tokens to indices
self.tokens_to_indices(df)
def __getitem__(self, idx):
"""Return example at index idx."""
return self.indices_pairs[idx][0], self.indices_pairs[idx][1]
def tokenize_df(self, df):
"""Turn inputs and targets into tokens."""
df['tokens_inputs'] = df.english.apply(tokenize)
df['tokens_targets'] = df.french.apply(tokenize)
def replace_rare_tokens(self, df):
"""Replace rare tokens with <UNK>."""
common_tokens_inputs = self.get_most_common_tokens(
df.tokens_inputs.tolist(),
)
common_tokens_targets = self.get_most_common_tokens(
df.tokens_targets.tolist(),
)
df.loc[:, 'tokens_inputs'] = df.tokens_inputs.apply(
lambda tokens: [token if token in common_tokens_inputs
else self.unknown_word_token for token in tokens]
)
df.loc[:, 'tokens_targets'] = df.tokens_targets.apply(
lambda tokens: [token if token in common_tokens_targets
else self.unknown_word_token for token in tokens]
)
def get_most_common_tokens(self, tokens_series):
"""Return the max_vocab most common tokens."""
all_tokens = self.flatten(tokens_series)
# Substract 4 for <PAD>, <SOS>, <EOS>, and <UNK>
common_tokens = set(list(zip(*Counter(all_tokens).most_common(
self.max_vocab - 4)))[0])
return common_tokens
def remove_mostly_unk(self, df, threshold=0.99):
"""Remove sequences with mostly <UNK>."""
calculate_ratio = (
lambda tokens: sum(1 for token in tokens if token != '<UNK>')
/ len(tokens) > threshold
)
df = df[df.tokens_inputs.apply(calculate_ratio)]
df = df[df.tokens_targets.apply(calculate_ratio)]
return df
def filter_seq_len(self, df, max_len=100):
mask = (df['english'].str.count(' ') < max_len) & (df['french'].str.count(' ') < max_len)
return df.loc[mask]
def create_token2idx(self, df):
"""Create variables with mappings from tokens to indices."""
unique_tokens_inputs = set(self.flatten(df.tokens_inputs))
unique_tokens_targets = set(self.flatten(df.tokens_targets))
for token in reversed([
self.padding_token,
self.start_of_sequence_token,
self.end_of_sequence_token,
self.unknown_word_token,
]):
if token in unique_tokens_inputs:
unique_tokens_inputs.remove(token)
if token in unique_tokens_targets:
unique_tokens_targets.remove(token)
unique_tokens_inputs = sorted(list(unique_tokens_inputs))
unique_tokens_targets = sorted(list(unique_tokens_targets))
# Add <PAD>, <SOS>, <EOS>, and <UNK> tokens
for token in reversed([
self.padding_token,
self.start_of_sequence_token,
self.end_of_sequence_token,
self.unknown_word_token,
]):
unique_tokens_inputs = [token] + unique_tokens_inputs
unique_tokens_targets = [token] + unique_tokens_targets
self.token2idx_inputs = {token: idx for idx, token
in enumerate(unique_tokens_inputs)}
self.idx2token_inputs = {idx: token for token, idx
in self.token2idx_inputs.items()}
self.token2idx_targets = {token: idx for idx, token
in enumerate(unique_tokens_targets)}
self.idx2token_targets = {idx: token for token, idx
in self.token2idx_targets.items()}
def add_start_and_end_to_tokens(self, df):
"""Add <SOS> and <EOS> tokens to the end of every input and output."""
df.loc[:, 'tokens_inputs'] = (
[self.start_of_sequence_token]
+ df.tokens_inputs
+ [self.end_of_sequence_token]
)
df.loc[:, 'tokens_targets'] = (
[self.start_of_sequence_token]
+ df.tokens_targets
+ [self.end_of_sequence_token]
)
def tokens_to_indices(self, df):
"""Convert tokens to indices."""
df['indices_inputs'] = df.tokens_inputs.apply(
lambda tokens: [self.token2idx_inputs[token] for token in tokens])
df['indices_targets'] = df.tokens_targets.apply(
lambda tokens: [self.token2idx_targets[token] for token in tokens])
self.indices_pairs = list(zip(df.indices_inputs, df.indices_targets))
def __len__(self):
return len(self.indices_pairs)
def collate(batch, src_pad, trg_pad, device):
inputs = [torch.LongTensor(item[0]) for item in batch]
targets = [torch.LongTensor(item[1]) for item in batch]
# Pad sequencse so that they are all the same length (within one minibatch)
padded_inputs = pad_sequence(inputs, padding_value=src_pad, batch_first=True)
padded_targets = pad_sequence(targets, padding_value=trg_pad, batch_first=True)
# Sort by length for CUDA optimizations
lengths = torch.LongTensor([len(x) for x in inputs])
lengths, permutation = lengths.sort(dim=0, descending=True)
return padded_inputs[permutation].to(device), padded_targets[permutation].to(device), lengths.to(device)
def no_peak_mask(size):
mask = np.triu(np.ones((1, size, size)), k=1).astype('uint8')
mask = Variable(torch.from_numpy(mask) == 0)
return mask
def create_masks(src, trg, src_pad_idx, trg_pad_idx):
src_mask = (src != src_pad_idx).unsqueeze(-2)
if trg is not None:
trg_mask = (trg != trg_pad_idx).unsqueeze(-2)
size = trg.size(1) # get seq_len for matrix
np_mask = no_peak_mask(size).to(trg_mask.device)
trg_mask = trg_mask & np_mask
else:
trg_mask = None
return src_mask, trg_mask
|
8954df5be3684c566435343e0c92f8d9d25c1beb
|
7d232f51e2330a4f537c50ede9c6bc023d656fd4
|
/src/python/grpcio_tests/tests_aio/unit/done_callback_test.py
|
b6a4a352147fe257a9f4f906303e1b5560c246a1
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
grpc/grpc
|
6975af3ba6f07a6fe965b875a0c09abf18999a52
|
e4d598ab64aa54f1da78c6ed6133b741742d11d4
|
refs/heads/master
| 2023-08-31T01:10:22.666618
| 2023-08-30T22:35:17
| 2023-08-30T22:35:17
| 27,729,880
| 42,330
| 13,022
|
Apache-2.0
| 2023-09-14T21:54:19
| 2014-12-08T18:58:53
|
C++
|
UTF-8
|
Python
| false
| false
| 10,325
|
py
|
done_callback_test.py
|
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing the done callbacks mechanism."""
import asyncio
import logging
import unittest
import grpc
from grpc.experimental import aio
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import test_pb2_grpc
from tests_aio.unit._common import inject_callbacks
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
_NUM_STREAM_RESPONSES = 5
_REQUEST_PAYLOAD_SIZE = 7
_RESPONSE_PAYLOAD_SIZE = 42
_REQUEST = b"\x01\x02\x03"
_RESPONSE = b"\x04\x05\x06"
_TEST_METHOD = "/test/Test"
_FAKE_METHOD = "/test/Fake"
class TestClientSideDoneCallback(AioTestBase):
async def setUp(self):
address, self._server = await start_test_server()
self._channel = aio.insecure_channel(address)
self._stub = test_pb2_grpc.TestServiceStub(self._channel)
async def tearDown(self):
await self._channel.close()
await self._server.stop(None)
async def test_add_after_done(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
self.assertEqual(grpc.StatusCode.OK, await call.code())
validation = inject_callbacks(call)
await validation
async def test_unary_unary(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
validation = inject_callbacks(call)
self.assertEqual(grpc.StatusCode.OK, await call.code())
await validation
async def test_unary_stream(self):
request = messages_pb2.StreamingOutputCallRequest()
for _ in range(_NUM_STREAM_RESPONSES):
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE)
)
call = self._stub.StreamingOutputCall(request)
validation = inject_callbacks(call)
response_cnt = 0
async for response in call:
response_cnt += 1
self.assertIsInstance(
response, messages_pb2.StreamingOutputCallResponse
)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertEqual(_NUM_STREAM_RESPONSES, response_cnt)
self.assertEqual(grpc.StatusCode.OK, await call.code())
await validation
async def test_stream_unary(self):
payload = messages_pb2.Payload(body=b"\0" * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
async def gen():
for _ in range(_NUM_STREAM_RESPONSES):
yield request
call = self._stub.StreamingInputCall(gen())
validation = inject_callbacks(call)
response = await call
self.assertIsInstance(response, messages_pb2.StreamingInputCallResponse)
self.assertEqual(
_NUM_STREAM_RESPONSES * _REQUEST_PAYLOAD_SIZE,
response.aggregated_payload_size,
)
self.assertEqual(grpc.StatusCode.OK, await call.code())
await validation
async def test_stream_stream(self):
call = self._stub.FullDuplexCall()
validation = inject_callbacks(call)
request = messages_pb2.StreamingOutputCallRequest()
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE)
)
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(request)
response = await call.read()
self.assertIsInstance(
response, messages_pb2.StreamingOutputCallResponse
)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
await call.done_writing()
self.assertEqual(grpc.StatusCode.OK, await call.code())
await validation
class TestServerSideDoneCallback(AioTestBase):
async def setUp(self):
self._server = aio.server()
port = self._server.add_insecure_port("[::]:0")
self._channel = aio.insecure_channel("localhost:%d" % port)
async def tearDown(self):
await self._channel.close()
await self._server.stop(None)
async def _register_method_handler(self, method_handler):
"""Registers method handler and starts the server"""
generic_handler = grpc.method_handlers_generic_handler(
"test",
dict(Test=method_handler),
)
self._server.add_generic_rpc_handlers((generic_handler,))
await self._server.start()
async def test_unary_unary(self):
validation_future = self.loop.create_future()
async def test_handler(request: bytes, context: aio.ServicerContext):
self.assertEqual(_REQUEST, request)
validation_future.set_result(inject_callbacks(context))
return _RESPONSE
await self._register_method_handler(
grpc.unary_unary_rpc_method_handler(test_handler)
)
response = await self._channel.unary_unary(_TEST_METHOD)(_REQUEST)
self.assertEqual(_RESPONSE, response)
validation = await validation_future
await validation
async def test_unary_stream(self):
validation_future = self.loop.create_future()
async def test_handler(request: bytes, context: aio.ServicerContext):
self.assertEqual(_REQUEST, request)
validation_future.set_result(inject_callbacks(context))
for _ in range(_NUM_STREAM_RESPONSES):
yield _RESPONSE
await self._register_method_handler(
grpc.unary_stream_rpc_method_handler(test_handler)
)
call = self._channel.unary_stream(_TEST_METHOD)(_REQUEST)
async for response in call:
self.assertEqual(_RESPONSE, response)
validation = await validation_future
await validation
async def test_stream_unary(self):
validation_future = self.loop.create_future()
async def test_handler(request_iterator, context: aio.ServicerContext):
validation_future.set_result(inject_callbacks(context))
async for request in request_iterator:
self.assertEqual(_REQUEST, request)
return _RESPONSE
await self._register_method_handler(
grpc.stream_unary_rpc_method_handler(test_handler)
)
call = self._channel.stream_unary(_TEST_METHOD)()
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(_REQUEST)
await call.done_writing()
self.assertEqual(_RESPONSE, await call)
validation = await validation_future
await validation
async def test_stream_stream(self):
validation_future = self.loop.create_future()
async def test_handler(request_iterator, context: aio.ServicerContext):
validation_future.set_result(inject_callbacks(context))
async for request in request_iterator:
self.assertEqual(_REQUEST, request)
return _RESPONSE
await self._register_method_handler(
grpc.stream_stream_rpc_method_handler(test_handler)
)
call = self._channel.stream_stream(_TEST_METHOD)()
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(_REQUEST)
await call.done_writing()
async for response in call:
self.assertEqual(_RESPONSE, response)
validation = await validation_future
await validation
async def test_error_in_handler(self):
"""Errors in the handler still triggers callbacks."""
validation_future = self.loop.create_future()
async def test_handler(request: bytes, context: aio.ServicerContext):
self.assertEqual(_REQUEST, request)
validation_future.set_result(inject_callbacks(context))
raise RuntimeError("A test RuntimeError")
await self._register_method_handler(
grpc.unary_unary_rpc_method_handler(test_handler)
)
with self.assertRaises(aio.AioRpcError) as exception_context:
await self._channel.unary_unary(_TEST_METHOD)(_REQUEST)
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.UNKNOWN, rpc_error.code())
validation = await validation_future
await validation
async def test_error_in_callback(self):
"""Errors in the callback won't be propagated to client."""
validation_future = self.loop.create_future()
async def test_handler(request: bytes, context: aio.ServicerContext):
self.assertEqual(_REQUEST, request)
def exception_raiser(unused_context):
raise RuntimeError("A test RuntimeError")
context.add_done_callback(exception_raiser)
validation_future.set_result(inject_callbacks(context))
return _RESPONSE
await self._register_method_handler(
grpc.unary_unary_rpc_method_handler(test_handler)
)
response = await self._channel.unary_unary(_TEST_METHOD)(_REQUEST)
self.assertEqual(_RESPONSE, response)
# Following callbacks won't be invoked, if one of the callback crashed.
validation = await validation_future
with self.assertRaises(asyncio.TimeoutError):
await validation
# Invoke RPC one more time to ensure the toxic callback won't break the
# server.
with self.assertRaises(aio.AioRpcError) as exception_context:
await self._channel.unary_unary(_FAKE_METHOD)(_REQUEST)
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.UNIMPLEMENTED, rpc_error.code())
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
dea100149edb147698e98104cbede36c45c5227b
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/refactoring/inlinelocal/multilineStringToTripleQuotedFString.before.py
|
0892870d440dc6ec94eac94c76e761670a48baba
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 47
|
py
|
multilineStringToTripleQuotedFString.before.py
|
s6 = '''
foo
bar
'''
print(f'''{s<caret>6}''')
|
e95d1aa1625f346bf1d68d5cc65ac854cdf11509
|
26a67dfb19c45fc11f655f737fa1061a870a5af1
|
/docs/tutorials/example_code/star_render.py
|
4096e4818a99c9f36e11e2d5fc4c3ce73922646e
|
[] |
no_license
|
pynbody/pynbody
|
e7604befc6de9e78a96b5b5871319b99bf8e2ca8
|
6b0cb61d89b17451f3bf65866e8891f2aefc4453
|
refs/heads/master
| 2023-08-08T04:50:29.914356
| 2023-06-19T17:49:31
| 2023-06-19T17:49:31
| 10,058,773
| 129
| 99
| null | 2023-09-13T11:44:37
| 2013-05-14T15:46:59
|
Python
|
UTF-8
|
Python
| false
| false
| 375
|
py
|
star_render.py
|
import matplotlib.pylab as plt
import pynbody
# load the snapshot and set to physical units
s = pynbody.load('testdata/g15784.lr.01024.gz')
s.physical_units()
# load the halos
h = s.halos()
# center on the largest halo and align the disk
pynbody.analysis.angmom.sideon(h[1])
#create an image using the default bands (i, v, u)
pynbody.plot.stars.render(s,width='20 kpc')
|
dde309be73d391098e9a3a1419b511710f098def
|
4266f2cc3084984f5f2795415d3c22c288e5f9ee
|
/src/formatting/whitespace.py
|
4a6696441dbb40817f713f29a434540e73ad65f5
|
[
"MIT"
] |
permissive
|
jcberquist/sublimetext-cfml
|
5fce38b03be2fa70071a5c55d2830e632bfec49c
|
b91c44a32e251c20c6359a8d9232287e1b408e6c
|
refs/heads/master
| 2022-11-03T20:14:35.918063
| 2022-10-18T20:23:25
| 2022-10-18T20:23:25
| 48,827,528
| 133
| 30
|
MIT
| 2022-10-18T20:23:27
| 2015-12-31T01:48:00
|
Python
|
UTF-8
|
Python
| false
| false
| 7,866
|
py
|
whitespace.py
|
import sublime
import re
from .. import utils
from .delimited_scopes import DELIMITED_SCOPES
OPERATOR_SELECTOR = "source.cfml.script keyword.operator -source.sql"
WHITESPACE_CONTAINER_START = ",".join(
[DELIMITED_SCOPES[k]["start"] for k in DELIMITED_SCOPES]
)
WHITESPACE_CONTAINER_END = ",".join(
[DELIMITED_SCOPES[k]["end"] for k in DELIMITED_SCOPES]
)
WORD_OPERATOR = re.compile("[a-z][a-z ]*[a-z]$", re.IGNORECASE)
def format_whitespace(cfml_format, delimited_scope_command=False):
regions = []
regions.extend(format_operators(cfml_format))
regions.extend(format_parentheses(cfml_format))
regions.extend(format_brackets(cfml_format))
regions.extend(format_struct_key_values(cfml_format))
regions.extend(format_for_semicolons(cfml_format))
return regions
def format_operators(cfml_format):
padding = cfml_format.get_setting("binary_operators.padding")
padding_strip_newlines = cfml_format.get_setting(
"binary_operators.padding_strip_newlines", default=False
)
format_assignment_operator = cfml_format.get_setting(
"binary_operators.format_assignment_operator", default=False
)
substitutions = []
if padding is None or padding not in ["spaced", "compact"]:
return substitutions
operators = cfml_format.find_by_selector(OPERATOR_SELECTOR)
for r in operators:
scope_name = cfml_format.view.scope_name(r.begin())
operator = cfml_format.view.substr(r)
is_word = re.match(WORD_OPERATOR, operator) is not None
if (
"keyword.operator.assignment" in scope_name
and not format_assignment_operator
):
continue
space_str = ""
if is_word or (
padding == "spaced"
and (".binary." in scope_name or ".ternary." in scope_name)
):
space_str = " "
if (
".binary." in scope_name
or ".ternary." in scope_name
or ".postfix." in scope_name
):
prev_pt = utils.get_previous_character(cfml_format.view, r.begin())
if not cfml_format.view.match_selector(prev_pt, WHITESPACE_CONTAINER_START):
replacement_region = sublime.Region(prev_pt + 1, r.begin())
if padding_strip_newlines:
substitutions.append((replacement_region, space_str))
else:
prev_str = cfml_format.view.substr(replacement_region)
if "\n" not in prev_str:
substitutions.append((replacement_region, space_str))
if (
".binary." in scope_name
or ".ternary." in scope_name
or ".prefix." in scope_name
):
next_pt = utils.get_next_character(cfml_format.view, r.end())
if not cfml_format.view.match_selector(next_pt, WHITESPACE_CONTAINER_END):
replacement_region = sublime.Region(r.end(), next_pt)
if padding_strip_newlines:
substitutions.append((replacement_region, space_str))
else:
next_str = cfml_format.view.substr(replacement_region)
if "\n" not in next_str:
substitutions.append((replacement_region, space_str))
return substitutions
def format_parentheses(cfml_format):
padding = cfml_format.get_setting("parentheses.padding")
padding_strip_newlines = cfml_format.get_setting(
"parentheses.padding_strip_newlines", default=False
)
substitutions = []
if padding is None or padding not in ["spaced", "compact"]:
return substitutions
groups = cfml_format.find_by_nested_selector("meta.group.cfml")
for r in groups:
# keyword groups are formatted separately
prev_pt = utils.get_previous_character(cfml_format.view, r.begin())
if cfml_format.view.match_selector(prev_pt, "keyword.control"):
continue
substitutions.extend(
cfml_format.inner_scope_spacing(r, None, padding, padding_strip_newlines)
)
return substitutions
def format_brackets(cfml_format):
padding_strip_newlines = cfml_format.get_setting(
"brackets.padding_strip_newlines", default=False
)
space_str = " " if cfml_format.get_setting("brackets.padding") == "spaced" else ""
bracket_starts = cfml_format.find_by_selector(
"meta.brackets.cfml punctuation.section.brackets.begin.cfml"
)
bracket_ends = cfml_format.find_by_selector(
"meta.brackets.cfml punctuation.section.brackets.end.cfml"
)
substitutions = []
for r in bracket_starts:
first_char = utils.get_next_character(cfml_format.view, r.begin() + 1)
bracket_to_char = sublime.Region(r.begin() + 1, first_char)
if padding_strip_newlines or "\n" not in cfml_format.view.substr(
bracket_to_char
):
substitutions.append((bracket_to_char, space_str))
prev_char = utils.get_previous_character(cfml_format.view, r.begin())
char_to_bracket = sublime.Region(prev_char + 1, r.begin())
if char_to_bracket.size() > 0:
substitutions.append((char_to_bracket, ""))
for r in bracket_ends:
prev_char = utils.get_previous_character(cfml_format.view, r.begin())
char_to_bracket_end = sublime.Region(prev_char + 1, r.end())
if padding_strip_newlines or "\n" not in cfml_format.view.substr(
char_to_bracket_end
):
substitutions.append((char_to_bracket_end, (space_str + "]") * r.size()))
return substitutions
def format_struct_key_values(cfml_format):
substitutions = []
key_value_selector = (
"meta.struct-literal.cfml punctuation.separator.key-value.cfml "
)
key_value_colon = cfml_format.get_setting("struct.key_value_colon")
key_value_equals = cfml_format.get_setting("struct.key_value_equals")
key_values_separators = cfml_format.find_by_selector(key_value_selector)
for r in key_values_separators:
if not cfml_format.view.scope_name(r.begin()).endswith(key_value_selector):
continue
separator = cfml_format.view.substr(r)
separator_setting = key_value_equals if separator == "=" else key_value_colon
if separator_setting is None or separator_setting not in ["spaced", "compact"]:
continue
space_str = " " if separator_setting == "spaced" else ""
prev_pt = utils.get_previous_character(cfml_format.view, r.begin())
if not cfml_format.view.match_selector(prev_pt, WHITESPACE_CONTAINER_START):
prev_space_str = space_str if separator == "=" else ""
substitutions.append(
(sublime.Region(prev_pt + 1, r.begin()), prev_space_str)
)
next_pt = utils.get_next_character(cfml_format.view, r.end())
if not cfml_format.view.match_selector(next_pt, WHITESPACE_CONTAINER_END):
substitutions.append((sublime.Region(r.end(), next_pt), space_str))
return substitutions
def format_for_semicolons(cfml_format):
padding = cfml_format.get_setting("for_loop_semicolons.padding")
substitutions = []
if padding is None or padding not in ["spaced", "compact"]:
return substitutions
selector = "meta.for.cfml meta.group.cfml punctuation.terminator.statement.cfml"
space_str = " " if padding == "spaced" else ""
semicolons = cfml_format.find_by_selector(selector)
for r in semicolons:
for pt in range(r.begin(), r.end()):
next_pt = utils.get_next_character(cfml_format.view, pt + 1)
if not cfml_format.view.match_selector(next_pt, WHITESPACE_CONTAINER_END):
substitutions.append((sublime.Region(pt + 1, next_pt), space_str))
return substitutions
|
f5aadbd4e15adcffb5ffdff467dbdb5ec391203f
|
7d232f51e2330a4f537c50ede9c6bc023d656fd4
|
/test/http2_test/test_ping.py
|
49db39332cdf14fd923487d7ed7415e3b8eb8987
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
grpc/grpc
|
6975af3ba6f07a6fe965b875a0c09abf18999a52
|
e4d598ab64aa54f1da78c6ed6133b741742d11d4
|
refs/heads/master
| 2023-08-31T01:10:22.666618
| 2023-08-30T22:35:17
| 2023-08-30T22:35:17
| 27,729,880
| 42,330
| 13,022
|
Apache-2.0
| 2023-09-14T21:54:19
| 2014-12-08T18:58:53
|
C++
|
UTF-8
|
Python
| false
| false
| 2,270
|
py
|
test_ping.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import http2_base_server
class TestcasePing(object):
"""
This test injects PING frames before and after header and data. Keeps count
of outstanding ping response and asserts when the count is non-zero at the
end of the test.
"""
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers[
"RequestReceived"
] = self.on_request_received
self._base_server._handlers["DataReceived"] = self.on_data_received
self._base_server._handlers["ConnectionLost"] = self.on_connection_lost
def get_base_server(self):
return self._base_server
def on_request_received(self, event):
self._base_server.default_ping()
self._base_server.on_request_received_default(event)
self._base_server.default_ping()
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
logging.info("Creating response size = %s" % sr.response_size)
response_data = self._base_server.default_response_data(
sr.response_size
)
self._base_server.default_ping()
self._base_server.setup_send(response_data, event.stream_id)
self._base_server.default_ping()
def on_connection_lost(self, reason):
logging.info(
"Disconnect received. Ping Count %d"
% self._base_server._outstanding_pings
)
assert self._base_server._outstanding_pings == 0
self._base_server.on_connection_lost(reason)
|
3830b9a7e43b00224b6486a2c3335eee4fc5f8e3
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/refactoring/introduceVariable/dontSuggestBuiltinTypeNames2.py
|
6feada57c99e977ce392a405925db58ca145f1f8
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 66
|
py
|
dontSuggestBuiltinTypeNames2.py
|
choicelist = []
foo = ''
fooval = di<caret>ct(choicelist).get(foo)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.