id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
3,000 | check cache file exists | """RBD Persistent write back cache."""
import datetime
from json import loads
from utility.log import Log
log = Log(__name__)
class PWLException(Exception):
pass
class PWLConfigurationError(Exception):
pass
class PersistentWriteAheadLog:
def __init__(self, rbd, client, drive):
"""Initialize PWL.
Args:
rbd: RBD object
client: SSD/PMEM client node(CephNode)
drive: Cache drive for persistent Write back cache.
"""
self.rbd = rbd
self.client = client
self.drive = drive
self.pwl_path = None
def cleanup(self):
"""cleanup drive."""
log.info("Starting to cleanup cache drive....")
self.client.exec_command(cmd=f"wipefs -af {self.drive}", sudo=True)
self.client.exec_command(
cmd=f"umount -v {self.drive}", sudo=True, check_ec=False
)
self.client.exec_command(cmd=f"rm -rf {self.pwl_path}", sudo=True)
def configure_cache_client(self):
"""Configure cache device with DAX.
Configuration involves,
- wipe drive
- create mount directory.
- mkfs.xfs <drive> or with ext4
- mount drive with DAX(Direct Attached Access) option
"""
log.info("Configuring SSD/PMEM cache client....")
self.pwl_path = f"/mnt/{self.rbd.random_string()}"
cmds = [
f"mkdir -p {self.pwl_path}",
f"mkfs.ext4 {self.drive}",
f"mount -O dax {self.drive} {self.pwl_path}",
]
# Cleanup drive and get ready for mount.
self.cleanup()
for cmd in cmds:
self.client.exec_command(cmd=cmd, sudo=True)
def configure_pwl_cache(self, mode, level, entity, size="1073741824"):
"""Set PWL cache mode (disabled, rwl, ssd).
Args:
level: cache mode applied at client or image or pool
mode: cache mode ( disabled or rwl or ssd )
entity: entity level ( client or image-name or pool-name )
size: cache size ( default: 1073741824 )
"""
log.info(f"Configuring RBD PWL cache setting at {level}:{entity}")
configs = [
("global", "client", "rbd_cache", "false"),
(level, entity, "rbd_plugins", "pwl_cache"),
(level, entity, "rbd_persistent_cache_mode", mode),
(level, entity, "rbd_persistent_cache_size", size),
(level, entity, "rbd_persistent_cache_path", self.pwl_path),
]
for config in configs:
if self.rbd.set_config(*config):
raise PWLConfigurationError(f"{config} - failed to add configuration")
def remove_pwl_configuration(self, level, entity):
"""Unset PWL cache mode (disabled, rwl, ssd).
Args:
level: cache mode applied at client or image or pool
entity: entity level ( client or image-name or pool-name )
"""
log.info(f"Removing RBD PWL cache setting at {level}:{entity}")
configs = [
("global", "client", "rbd_cache"),
(level, entity, "rbd_plugins"),
(level, entity, "rbd_persistent_cache_mode"),
(level, entity, "rbd_persistent_cache_size"),
(level, entity, "rbd_persistent_cache_path"),
]
for config in configs:
if self.rbd.remove_config(*config):
raise PWLConfigurationError(
f"{config} - failed to remove configuration"
)
def get_image_cache_status(self, image):
"""Get image persistent cache status.
Args:
image: image name
Returns:
image_status
"""
args = {"format": "json"}
return loads(self.rbd.image_status(image, cmd_args=args, output=True))
@staticmethod
def validate_cache_size(rbd_status, cache_size):
"""Compare cache size."""
configured_cache_size = rbd_status["persistent_cache"]["size"]
if configured_cache_size != cache_size:
raise PWLException(
f"Cache size {configured_cache_size} from RBD status did not match to {cache_size}"
)
log.info(
f"Cache size {configured_cache_size} from RBD status matched to {cache_size}"
)
def validate_cache_path(self, rbd_status):
"""Compare cache file path."""
configured_cache_path = rbd_status["persistent_cache"]["path"]
if self.pwl_path not in configured_cache_path:
raise PWLException(
f"{self.pwl_path} is not been used as cache path as configured {configured_cache_path}"
)
log.info(
f"{self.pwl_path} is used as cache path as configured {configured_cache_path}"
)
def METHOD_NAME(self, image, timeout=120, **kw):
"""Validate cache file existence.
Args:
image: name of the image.
timeout: timeout in seconds
kw: validate arguments
Raises:
PWLException
"""
log.info("Validate RBD PWL cache file existence and size.")
# Validate cache file
end_time = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
while end_time > datetime.datetime.now():
out = self.get_image_cache_status(image)
log.debug(f"RBD status of image: {out}")
cache_file_path = out.get("persistent_cache", {}).get("path")
if cache_file_path:
# validate cache from rbd status
if kw.get("validate_cache_size"):
self.validate_cache_size(out, kw["cache_file_size"])
if kw.get("validate_cache_path"):
self.validate_cache_path(out)
try:
# validate cache file existence
self.client.exec_command(
cmd=f"ls -l {cache_file_path}",
check_ec=True,
)
log.info(
f"{self.client.hostname}:{cache_file_path} cache file found..."
)
break
except Exception as err:
log.warning(err)
else:
raise PWLException(
f"{self.client.hostname}:{self.pwl_path} cache file did not found!!!"
)
def flush(self, image_spec):
"""Perform cache flush on the image
Args:
image_spec: <pool>/<image> where cache flush needs to be performed.
"""
log.info(f"Perform cache flush on image {image_spec}....")
cmd = f"rbd persistent-cache flush {image_spec}"
return self.client.exec_command(cmd=cmd, sudo=True)
def invalidate(self, image):
pass
# utils
def get_entity_level(config):
"""Method to get config level and entity."""
config_level = config["level"]
entity = "client"
pool = config["rep_pool_config"]["pool"]
image = f"{config['rep_pool_config']['pool']}/{config['rep_pool_config']['image']}"
if config_level == "client":
config_level = "global"
elif config_level == "pool":
entity = pool
elif config_level == "image":
entity = image
return config_level, entity
def fio_ready(config, client):
"""Method to prepare FIO config args."""
fio_args = config["fio"]
fio_args["client_node"] = client
fio_args["long_running"] = True
return fio_args |
3,001 | test failed trait definition | # (C) Copyright 2005-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
""" Tests for the trait documenter. """
import contextlib
import io
import os
import shutil
import tempfile
import textwrap
import tokenize
import unittest
import unittest.mock as mock
from traits.api import Bool, HasTraits, Int, Property
from traits.testing.optional_dependencies import sphinx, requires_sphinx
if sphinx is not None:
from sphinx.ext.autodoc import ClassDocumenter, INSTANCEATTR, Options
from sphinx.ext.autodoc.directive import DocumenterBridge
from sphinx.testing.path import path
from sphinx.testing.util import SphinxTestApp
from sphinx.util.docutils import LoggingReporter
from traits.util.trait_documenter import (
_get_definition_tokens,
trait_definition,
TraitDocumenter,
)
# Configuration file content for testing.
CONF_PY = """\
extensions = ['sphinx.ext.autodoc']
# The suffix of source filenames.
source_suffix = '.rst'
autodoc_mock_imports = [
'dummy'
]
"""
class MyTestClass(HasTraits):
"""
Class-level docstring.
"""
#: I'm a troublesome trait with a long definition.
bar = Int(42, desc=""" First line
The answer to
Life,
the Universe,
and Everything.
""")
class Fake(HasTraits):
#: Test attribute
test_attribute = Property(Bool, label="ミスあり")
class FindTheTraits(HasTraits):
"""
Class for testing the can_document_member functionality.
"""
#: A TraitType subclass on the right-hand side.
an_int = Int
#: A TraitType instance on the right-hand side.
another_int = Int()
#: A non-trait integer
magic_number = 1729
@property
def not_a_trait(self):
"""
I'm a regular property, not a trait.
"""
@requires_sphinx
class TestTraitDocumenter(unittest.TestCase):
""" Tests for the trait documenter. """
def setUp(self):
self.source = """
depth_interval = Property(Tuple(Float, Float),
depends_on="_depth_interval")
"""
string_io = io.StringIO(self.source)
tokens = tokenize.generate_tokens(string_io.readline)
self.tokens = tokens
def test_get_definition_tokens(self):
src = textwrap.dedent(
"""\
depth_interval = Property(Tuple(Float, Float),
depends_on="_depth_interval")
"""
)
string_io = io.StringIO(src)
tokens = tokenize.generate_tokens(string_io.readline)
definition_tokens = _get_definition_tokens(tokens)
# Check if they are correctly untokenized. This should not raise.
string = tokenize.untokenize(definition_tokens)
self.assertEqual(src.rstrip(), string)
def test_add_line(self):
mocked_directive = mock.MagicMock()
documenter = TraitDocumenter(mocked_directive, "test", " ")
documenter.object_name = "test_attribute"
documenter.parent = Fake
with mock.patch(
(
"traits.util.trait_documenter.ClassLevelDocumenter"
".add_directive_header"
)
):
documenter.add_directive_header("")
self.assertEqual(
len(documenter.directive.result.append.mock_calls), 1)
def test_abbreviated_annotations(self):
# Regression test for enthought/traits#493.
with self.create_directive() as directive:
documenter = TraitDocumenter(
directive, __name__ + ".MyTestClass.bar")
documenter.generate(all_members=True)
result = directive.result
# Find annotations line.
for item in result:
if item.lstrip().startswith(":annotation:"):
break
else:
self.fail("Didn't find the expected trait :annotation:")
# Annotation should be a single line.
self.assertIn("First line", item)
self.assertNotIn("\n", item)
def test_successful_trait_definition(self):
definition = trait_definition(cls=Fake, trait_name="test_attribute")
self.assertEqual(
definition, 'Property(Bool, label="ミスあり")',
)
def METHOD_NAME(self):
with self.assertRaises(ValueError):
trait_definition(cls=Fake, trait_name="not_a_trait")
def test_can_document_member(self):
# Regression test for enthought/traits#1238
with self.create_directive() as directive:
class_documenter = ClassDocumenter(
directive, __name__ + ".FindTheTraits"
)
class_documenter.parse_name()
class_documenter.import_object()
self.assertTrue(
TraitDocumenter.can_document_member(
INSTANCEATTR, "an_int", True, class_documenter,
)
)
self.assertTrue(
TraitDocumenter.can_document_member(
INSTANCEATTR, "another_int", True, class_documenter,
)
)
self.assertFalse(
TraitDocumenter.can_document_member(
INSTANCEATTR, "magic_number", True, class_documenter,
)
)
self.assertFalse(
TraitDocumenter.can_document_member(
INSTANCEATTR, "not_a_trait", True, class_documenter,
)
)
@contextlib.contextmanager
def create_directive(self):
"""
Helper function to create a a "directive" suitable
for instantiating the TraitDocumenter with, along with resources
to support that directive, and clean up the resources afterwards.
Returns
-------
contextmanager
A context manager that returns a DocumenterBridge instance.
"""
with self.tmpdir() as tmpdir:
# Ensure configuration file exists.
conf_file = os.path.join(tmpdir, "conf.py")
with open(conf_file, "w", encoding="utf-8") as f:
f.write(CONF_PY)
app = SphinxTestApp(srcdir=path(tmpdir))
app.builder.env.app = app
app.builder.env.temp_data["docname"] = "dummy"
kwds = {}
state = mock.Mock()
state.document.settings.tab_width = 8
kwds["state"] = state
yield DocumenterBridge(
app.env, LoggingReporter(''), Options(), 1, **kwds)
@contextlib.contextmanager
def tmpdir(self):
"""
Helper function to create a temporary directory.
Returns
-------
contextmanager
Context manager that returns the path to a temporary directory.
"""
tmpdir = tempfile.mkdtemp()
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir) |
3,002 | mock thread safe | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import warnings
from concurrent.futures import Future
from unittest.mock import MagicMock
from lte.protos.mconfig.mconfigs_pb2 import PipelineD
from lte.protos.pipelined_pb2 import SetupUEMacRequest, UEMacFlowRequest
from magma.pipelined.app.base import global_epoch
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.tests.app.start_pipelined import (
PipelinedController,
TestSetup,
)
from magma.pipelined.tests.pipelined_test_util import (
SnapshotVerifier,
create_service_manager,
fake_cwf_setup,
start_ryu_app_thread,
stop_ryu_app_thread,
)
from magma.subscriberdb.sid import SIDUtils
from orc8r.protos.directoryd_pb2 import DirectoryRecord
class CWFRestartResilienceTest(unittest.TestCase):
BRIDGE = 'testing_br'
IFACE = 'testing_br'
MAC_DEST = "5e:cc:cc:b1:49:4b"
BRIDGE_IP_ADDRESS = '192.168.128.1'
UE_BLOCK = '192.168.128.0/24'
DPI_PORT = 'mon1'
DPI_IP = '1.1.1.1'
@classmethod
@unittest.mock.patch(
'netifaces.ifaddresses',
return_value={0: [{'addr': '00:11:22:33:44:55'}]},
)
@unittest.mock.patch('netifaces.AF_LINK', 0)
def setUpClass(cls, *_):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures, mocks the redis policy_dictionary
of ue_mac_controller
"""
super(CWFRestartResilienceTest, cls).setUpClass()
warnings.simplefilter('ignore')
cls.service_manager = create_service_manager([], ['ue_mac', 'arpd'])
ue_mac_controller_reference = Future()
arp_controller_reference = Future()
testing_controller_reference = Future()
def METHOD_NAME(cmd, body):
cmd(body)
loop_mock = MagicMock()
loop_mock.call_soon_threadsafe = METHOD_NAME
test_setup = TestSetup(
apps=[
PipelinedController.UEMac,
PipelinedController.Arp,
PipelinedController.Testing,
PipelinedController.StartupFlows,
],
references={
PipelinedController.UEMac:
ue_mac_controller_reference,
PipelinedController.Arp:
arp_controller_reference,
PipelinedController.Testing:
testing_controller_reference,
PipelinedController.StartupFlows:
Future(),
},
config={
'setup_type': 'CWF',
'bridge_name': cls.BRIDGE,
'bridge_ip_address': cls.BRIDGE_IP_ADDRESS,
'enforcement': {'poll_interval': 5},
'internal_ip_subnet': '192.168.0.0/16',
'nat_iface': 'eth2',
'local_ue_eth_addr': False,
'allow_unknown_arps': False,
'enodeb_iface': 'eth1',
'qos': {'enable': False},
'clean_restart': False,
'quota_check_ip': '1.2.3.4',
'enable_nat': False,
'dpi': {
'enabled': False,
'mon_port': 'mon1',
'mon_port_number': 32769,
'idle_timeout': 42,
},
},
mconfig=PipelineD(
ue_ip_block=cls.UE_BLOCK,
),
loop=loop_mock,
service_manager=cls.service_manager,
integ_test=False,
)
BridgeTools.create_bridge(cls.BRIDGE, cls.IFACE)
BridgeTools.create_internal_iface(
cls.BRIDGE, cls.DPI_PORT,
cls.DPI_IP,
)
cls.thread = start_ryu_app_thread(test_setup)
cls.ue_mac_controller = ue_mac_controller_reference.result()
cls.testing_controller = testing_controller_reference.result()
cls.arp_controller = arp_controller_reference.result()
cls.arp_controller.add_arp_response_flow = MagicMock()
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
@unittest.mock.patch('magma.pipelined.app.arp.get_all_records')
def test_ue_mac_restart(self, directoryd_mock):
"""
Verify that default flows are properly installed with empty setup
Verify that ue mac flows are properly restored, with arp recovery from
directoryd
"""
imsi1 = 'IMSI111111111111111'
imsi2 = 'IMSI222222222222222'
ip1 = '152.81.12.41'
mac1 = '5e:cc:cc:b1:aa:aa'
mac2 = 'b2:6a:f3:b3:2f:4c'
ap_mac_addr1 = '11:22:33:44:55:66'
ap_mac_addr2 = '12:12:13:24:25:26'
directoryd_mock.return_value = [
DirectoryRecord(
id=imsi1, fields={
'ipv4_addr': ip1,
'mac_addr': mac1,
},
),
]
fake_cwf_setup(
ue_mac_controller=self.ue_mac_controller,
)
snapshot_verifier = SnapshotVerifier(
self, self.BRIDGE,
self.service_manager,
'default_flows',
include_stats=False,
)
with snapshot_verifier:
pass
setup_ue_mac_request = SetupUEMacRequest(
requests=[
UEMacFlowRequest(
sid=SIDUtils.to_pb(imsi1),
mac_addr=mac1,
msisdn='123456',
ap_mac_addr=ap_mac_addr1,
ap_name='magma',
pdp_start_time=1,
),
UEMacFlowRequest(
sid=SIDUtils.to_pb(imsi2),
mac_addr=mac2,
msisdn='654321',
ap_mac_addr=ap_mac_addr2,
ap_name='amgam',
pdp_start_time=9,
),
],
epoch=global_epoch,
)
fake_cwf_setup(
ue_mac_controller=self.ue_mac_controller,
setup_ue_mac_request=setup_ue_mac_request,
)
snapshot_verifier = SnapshotVerifier(
self, self.BRIDGE,
self.service_manager,
'recovery_flows',
include_stats=False,
)
with snapshot_verifier:
pass |
3,003 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAzureTrafficCollectorResult',
'AwaitableGetAzureTrafficCollectorResult',
'get_azure_traffic_collector',
'get_azure_traffic_collector_output',
]
@pulumi.output_type
class GetAzureTrafficCollectorResult:
"""
Azure Traffic Collector resource.
"""
def __init__(__self__, collector_policies=None, etag=None, METHOD_NAME=None, location=None, name=None, provisioning_state=None, system_data=None, tags=None, type=None, virtual_hub=None):
if collector_policies and not isinstance(collector_policies, list):
raise TypeError("Expected argument 'collector_policies' to be a list")
pulumi.set(__self__, "collector_policies", collector_policies)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_hub and not isinstance(virtual_hub, dict):
raise TypeError("Expected argument 'virtual_hub' to be a dict")
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="collectorPolicies")
def collector_policies(self) -> Sequence['outputs.ResourceReferenceResponse']:
"""
Collector Policies for Azure Traffic Collector.
"""
return pulumi.get(self, "collector_policies")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the application rule collection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.TrackedResourceResponseSystemData':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional['outputs.ResourceReferenceResponse']:
"""
The virtualHub to which the Azure Traffic Collector belongs.
"""
return pulumi.get(self, "virtual_hub")
class AwaitableGetAzureTrafficCollectorResult(GetAzureTrafficCollectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAzureTrafficCollectorResult(
collector_policies=self.collector_policies,
etag=self.etag,
METHOD_NAME=self.METHOD_NAME,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
tags=self.tags,
type=self.type,
virtual_hub=self.virtual_hub)
def get_azure_traffic_collector(azure_traffic_collector_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAzureTrafficCollectorResult:
"""
Gets the specified Azure Traffic Collector in a specified resource group
:param str azure_traffic_collector_name: Azure Traffic Collector name
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['azureTrafficCollectorName'] = azure_traffic_collector_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:networkfunction/v20221101:getAzureTrafficCollector', __args__, opts=opts, typ=GetAzureTrafficCollectorResult).value
return AwaitableGetAzureTrafficCollectorResult(
collector_policies=pulumi.get(__ret__, 'collector_policies'),
etag=pulumi.get(__ret__, 'etag'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'),
virtual_hub=pulumi.get(__ret__, 'virtual_hub'))
@_utilities.lift_output_func(get_azure_traffic_collector)
def get_azure_traffic_collector_output(azure_traffic_collector_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAzureTrafficCollectorResult]:
"""
Gets the specified Azure Traffic Collector in a specified resource group
:param str azure_traffic_collector_name: Azure Traffic Collector name
:param str resource_group_name: The name of the resource group.
"""
... |
3,004 | test fc3d | import siconos.numerics as sn
import os.path
import scipy.sparse
import scipy.sparse.linalg
import numpy as np
# this could be changed at install time
data_dir = "data/"
# solvers = [sn.SICONOS_GLOBAL_FRICTION_3D_NSGS, sn.SICONOS_GLOBAL_FRICTION_3D_NSN_AC]
solvers = (sn.SICONOS_GLOBAL_FRICTION_3D_NSGS,)
solvers_reduced1 = (sn.SICONOS_FRICTION_3D_NSGS, sn.SICONOS_FRICTION_3D_NSN_AC)
solvers_reduced2 = (sn.SICONOS_FRICTION_3D_NSN_AC,) # sn.SICONOS_FRICTION_3D_NSN_FB)
solvers_reduced3 = (sn.SICONOS_FRICTION_3D_NSGS,)
def condensed_from_global(fcp):
# spsolve expect the indices to be cint aka 32 bits int
# Hence, we do some magic to cirumvent those issues.
Mcoo = scipy.sparse.coo_matrix(fcp.M)
Mcsc = scipy.sparse.csc_matrix(Mcoo)
Hcoo = scipy.sparse.coo_matrix(fcp.H)
Hcsc = scipy.sparse.csc_matrix(Hcoo)
WW = Hcsc.T.dot(scipy.sparse.linalg.spsolve(Mcsc, Hcsc))
qprime = fcp.H.T.dot(scipy.sparse.linalg.spsolve(Mcsc, fcp.q))
fcp_reduced = sn.FrictionContactProblem()
fcp_reduced.dimension = fcp.dimension
fcp_reduced.numberOfContacts = fcp.numberOfContacts
# this is a hack to deal with the inability of fc3d solvers to work with
# sparse matrices
_, Wsbm = sn.SBM_from_csparse(3, WW)
fcp_reduced.M = Wsbm
fcp_reduced.mu = fcp.mu
fcp_reduced.q = fcp.b + qprime
return fcp_reduced
def solve_reduced(fcp, solver_reduced):
SO_reduced = sn.SolverOptions(solver_reduced)
SO_reduced.iparam[0] = 100000
SO_reduced.dparam[0] = np.sqrt(fcp.numberOfContacts) * 1e-9
size_reaction = fcp.numberOfContacts * 3
reaction_reduced = np.zeros((size_reaction,))
velocities_reduced = np.zeros((size_reaction,))
return sn.fc3d_driver(fcp, reaction_reduced, velocities_reduced, SO_reduced)
def solve_global(fcp, solver):
SO = sn.SolverOptions(solver)
SO.dparam[0] = np.sqrt(fcp.numberOfContacts) * 1e-9
SO.iparam[0] = 10000
n, m = fcp.H.shape
size_reaction = m
reaction = np.zeros((size_reaction,))
velocities = np.zeros((size_reaction,))
global_velocities = np.zeros((n,))
return sn.gfc3d_driver(fcp, reaction, velocities, global_velocities, SO)
def test_gfc3d():
data_files = (
"LMGC_GFC3D_CubeH8.hdf5",
"LMGC_GlobalFrictionContactProblem00046.hdf5",
)
mark_as_failed = False
sn.numerics_set_verbose(1)
for d in data_files:
full_path = data_dir + d
if os.path.isfile(full_path):
fcp = sn.globalFrictionContact_fclib_read(full_path)
for s in solvers:
res = solve_global(fcp, s)
if res:
print(
"Solver {:} on problem {:} failed with info = {:}".format(
sn.solver_options_id_to_name(s), d, res
)
)
mark_as_failed = True
else:
print(
"Solver {:} on problem {:} is ok".format(
sn.solver_options_id_to_name(s), d
)
)
fcp_reduced = condensed_from_global(fcp)
for s in solvers_reduced1:
res = solve_reduced(fcp_reduced, s)
if res:
print(
"Solver {:} on problem {:} in reduced form failed with info = {:}".format(
sn.solver_options_id_to_name(s), d, res
)
)
mark_as_failed = True
else:
print(
"Solver {:} on problem {:} is ok".format(
sn.solver_options_id_to_name(s), d
)
)
assert mark_as_failed is False
def METHOD_NAME():
data_files = ["Capsules-i125-1213.hdf5"]
mark_as_failed = False
for d in data_files:
full_path = data_dir + d
if os.path.isfile(full_path):
sn.numerics_set_verbose(1)
fcp = sn.frictionContact_fclib_read(full_path)
for s in solvers_reduced3:
res = solve_reduced(fcp, s)
if res:
print(
"Solver {:} on problem {:} failed with info = {:}".format(
sn.solver_options_id_to_name(s), d, res
)
)
mark_as_failed = True
else:
print(
"Solver {:} on problem {:} is ok".format(
sn.solver_options_id_to_name(s), d
)
)
assert mark_as_failed is False
if __name__ == "__main__":
test_gfc3d()
METHOD_NAME() |
3,005 | test thread env | # Copyright (c) 2020 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import torch
import alf
from alf.environments import suite_safety_gym, alf_environment
from alf.environments import thread_environment, parallel_environment
import alf.nest as nest
class SuiteSafetyGymTest(alf.test.TestCase):
def setUp(self):
super().setUp()
if not suite_safety_gym.is_available():
self.skipTest('suite_safety_gym is not available.')
def tearDown(self):
super().tearDown()
self._env.close()
def test_unwrapped_env(self):
self._env = suite_safety_gym.load(
environment_name='Safexp-PointGoal1-v0')
self.assertIsInstance(self._env, alf_environment.AlfEnvironment)
self.assertEqual(torch.float32, self._env.observation_spec().dtype)
self.assertEqual((suite_safety_gym.VectorReward.REWARD_DIMENSION, ),
self._env.reward_spec().shape)
actions = self._env.action_spec().sample()
for _ in range(10):
# unwrapped env (not thread_env or parallel_env) needs to convert
# from tensor to array
time_step = self._env.step(actions.cpu().numpy())
def METHOD_NAME(self):
self._env = thread_environment.ThreadEnvironment(
lambda: suite_safety_gym.load(environment_name=
'Safexp-PointGoal1-v0'))
self.assertIsInstance(self._env, alf_environment.AlfEnvironment)
self.assertEqual(torch.float32, self._env.observation_spec().dtype)
self.assertEqual((suite_safety_gym.VectorReward.REWARD_DIMENSION, ),
self._env.reward_spec().shape)
actions = self._env.action_spec().sample()
for _ in range(10):
time_step = self._env.step(actions)
def test_parallel_env(self):
env_num = 8
def ctor(env_id=None):
return suite_safety_gym.load(
environment_name='Safexp-PointGoal1-v0')
constructor = functools.partial(ctor)
self._env = parallel_environment.ParallelAlfEnvironment(
[constructor] * env_num)
self.assertTrue(self._env.batched)
self.assertEqual(self._env.batch_size, env_num)
self.assertEqual(torch.float32, self._env.observation_spec().dtype)
self.assertEqual((suite_safety_gym.VectorReward.REWARD_DIMENSION, ),
self._env.reward_spec().shape)
actions = self._env.action_spec().sample(outer_dims=(env_num, ))
for _ in range(10):
time_step = self._env.step(actions)
def test_env_info(self):
# test creating multiple envs in the same process
l0_env = suite_safety_gym.load(environment_name="Safexp-PointGoal0-v0")
l1_env = suite_safety_gym.load(environment_name='Safexp-PointGoal1-v0')
# level 0 envs don't have costs
actions = l0_env.action_spec().sample()
time_step = l0_env.step(actions.cpu().numpy())
# ['cost_exception', 'goal_met', 'cost']
self.assertEqual(len(time_step.env_info.keys()), 3)
self.assertFalse('cost_hazards' in time_step.env_info.keys())
l0_env.close()
actions = l1_env.action_spec().sample()
time_step = l1_env.step(actions.cpu().numpy())
self.assertGreater(len(time_step.env_info.keys()), 3)
self.assertTrue('cost_hazards' in time_step.env_info.keys())
self._env = l1_env
if __name__ == '__main__':
alf.test.main() |
3,006 | load personas | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
from parlai.core.worlds import create_task
from parlai.agents.fixed_response.fixed_response import FixedResponseAgent
from parlai.tasks.self_chat.worlds import SelfChatWorld as SelfChatBaseWorld
from parlai.tasks.interactive.worlds import InteractiveWorld as InteractiveBaseWorld
import random
def get_personas(opt, shared=None):
if shared and 'personas_list' in shared:
return shared['personas_list']
return METHOD_NAME(opt=opt)
def METHOD_NAME(opt):
print('[ loading personas.. ]')
# Create ConvAI2 data so we can assign personas.
convai2_opt = opt.copy()
convai2_opt['task'] = 'convai2:both'
if convai2_opt['datatype'].startswith('train'):
convai2_opt['datatype'] = 'train:evalmode'
convai2_opt['interactive_task'] = False
convai2_opt['selfchat_task'] = False
convai2_agent = FixedResponseAgent({'fixed_response': None})
convai2_world = create_task(convai2_opt, convai2_agent)
personas = set()
while not convai2_world.epoch_done():
convai2_world.parley()
msg = convai2_world.get_acts()[0]
# Find a new episode
if msg.get('episode_done', False) and not convai2_world.epoch_done():
convai2_world.parley()
msg = convai2_world.get_acts()[0]
txt = msg.get('text', '').split('\n')
a1_persona = []
a2_persona = []
for t in txt:
if t.startswith("partner's persona:"):
a1_persona.append(t.replace("partner's persona:", 'your persona:'))
if t.startswith('your persona:'):
a2_persona.append(t)
personas.add('\n'.join(a1_persona))
personas.add('\n'.join(a2_persona))
print('[ loaded ' + str(len(personas)) + ' personas ]')
return list(personas)
class InteractiveWorld(InteractiveBaseWorld):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
parser = parser.add_argument_group('ConvAI2 Interactive World')
parser.add_argument(
'--display-partner-persona',
type='bool',
default=True,
help='Display your partner persona at the end of the chat',
)
return parser
def __init__(self, opt, agents, shared=None):
super().__init__(opt, agents, shared)
self.display_partner_persona = self.opt['display_partner_persona']
def init_contexts(self, shared=None):
self.personas_list = get_personas(opt=self.opt, shared=shared)
def get_contexts(self):
random.seed()
personas_1 = random.choice(self.personas_list)
personas_2 = random.choice(self.personas_list)
return personas_1, personas_2
def finalize_episode(self):
print("\nCHAT DONE.\n")
if self.display_partner_persona:
partner_persona = self.p2.replace('your persona:', 'partner\'s persona:')
print(f"Your partner was playing the following persona:\n{partner_persona}")
if not self.epoch_done():
print("[ Preparing new chat ... ]\n")
def share(self):
shared_data = super().share()
shared_data['personas_list'] = self.personas_list
return shared_data
class SelfChatWorld(SelfChatBaseWorld):
def init_contexts(self, shared=None):
self.personas_list = get_personas(self.opt, shared=shared)
def get_contexts(self):
random.seed()
personas_1 = random.choice(self.personas_list)
personas_2 = random.choice(self.personas_list)
return [personas_1, personas_2] |
3,007 | test attentive email schema | import pytest
from fides.api.schemas.connection_configuration import SovrnSchema
from fides.api.schemas.connection_configuration.connection_secrets_attentive import (
AttentiveSchema,
)
from fides.api.schemas.connection_configuration.connection_secrets_email import (
AdvancedSettings,
AdvancedSettingsWithExtendedIdentityTypes,
EmailSchema,
ExtendedEmailSchema,
ExtendedIdentityTypes,
IdentityTypes,
)
from fides.api.service.connectors.email.sovrn_connector import SOVRN_REQUIRED_IDENTITY
class TestEmailSchema:
def test_email_schema(self):
assert EmailSchema(
third_party_vendor_name="Dawn's Bakery",
recipient_email_address="test@example.com",
advanced_settings=AdvancedSettings(
identity_types=IdentityTypes(email=True, phone_number=False)
),
)
def test_email_schema_default_identity_types(self):
schema = EmailSchema(
third_party_vendor_name="Dawn's Bakery",
recipient_email_address="test@example.com",
)
assert schema.advanced_settings == AdvancedSettings(
identity_types=IdentityTypes(email=True, phone_number=False)
)
def test_email_schema_invalid_email(self) -> None:
with pytest.raises(ValueError) as exc:
EmailSchema(
third_party_vendor_name="Dawn's Bakery",
recipient_email_address="to_email",
advanced_settings=AdvancedSettings(
identity_types=IdentityTypes(email=True, phone_number=False)
),
)
assert exc.value.errors()[0]["msg"] == "value is not a valid email address"
def test_no_identities_supplied(self):
with pytest.raises(ValueError) as exc:
EmailSchema(
third_party_vendor_name="Dawn's Bakery",
recipient_email_address="test@example.com",
advanced_settings=AdvancedSettings(
identity_types=IdentityTypes(email=False, phone_number=False)
),
)
assert exc.value.errors()[0]["msg"] == "Must supply at least one identity_type."
def test_missing_third_party_vendor_name(self):
with pytest.raises(ValueError) as exc:
EmailSchema(
recipient_email_address="test@example.com",
advanced_settings=AdvancedSettings(
identity_types=IdentityTypes(email=True, phone_number=False)
),
)
assert exc.value.errors()[0]["msg"] == "field required"
assert exc.value.errors()[0]["loc"][0] == "third_party_vendor_name"
def test_missing_recipient(self):
with pytest.raises(ValueError) as exc:
EmailSchema(
third_party_vendor_name="Dawn's Bakery",
advanced_settings=AdvancedSettings(
identity_types=IdentityTypes(email=True, phone_number=False)
),
)
assert exc.value.errors()[0]["msg"] == "field required"
assert exc.value.errors()[0]["loc"][0] == "recipient_email_address"
def test_extra_field(self):
with pytest.raises(ValueError) as exc:
EmailSchema(
third_party_vendor_name="Dawn's Bakery",
recipient_email_address="test@example.com",
advanced_settings=AdvancedSettings(
identity_types=IdentityTypes(email=True, phone_number=False)
),
extra_field="extra_value",
)
assert exc.value.errors()[0]["msg"] == "extra fields not permitted"
class TestExtendedEmailSchema:
def test_extended_email_schema(self):
schema = ExtendedEmailSchema(
third_party_vendor_name="Test Vendor Name",
test_email_address="my_email@example.com",
recipient_email_address="vendor@example.com",
advanced_settings=AdvancedSettingsWithExtendedIdentityTypes(
identity_types=ExtendedIdentityTypes(
email=False, phone_number=False, cookie_ids=["new_cookie_id"]
)
),
)
assert schema.third_party_vendor_name == "Test Vendor Name"
assert schema.test_email_address == "my_email@example.com"
assert schema.recipient_email_address == "vendor@example.com"
assert schema.advanced_settings.identity_types.cookie_ids == ["new_cookie_id"]
def test_extended_email_schema_default_identity_types(self):
schema = ExtendedEmailSchema(
third_party_vendor_name="Test Vendor Name",
test_email_address="my_email@example.com",
recipient_email_address="vendor@example.com",
)
assert schema.advanced_settings.identity_types == ExtendedIdentityTypes(
email=True, phone_number=False, cookie_ids=[]
)
def test_extended_consent_email_schema_no_identities(self):
with pytest.raises(ValueError):
ExtendedEmailSchema(
third_party_vendor_name="Test Vendor Name",
test_email_address="my_email@example.com",
recipient_email_address="vendor@example.com",
advanced_settings=AdvancedSettingsWithExtendedIdentityTypes(
identity_types=ExtendedIdentityTypes(
email=False, phone_number=False, cookie_ids=[]
)
),
)
class TestSovrnSchema:
def test_sovrn_email_schema(self):
schema = SovrnSchema(
recipient_email_address="sovrn@example.com",
)
assert schema.third_party_vendor_name == "Sovrn"
assert schema.test_email_address is None
assert schema.recipient_email_address == "sovrn@example.com"
assert schema.advanced_settings == AdvancedSettingsWithExtendedIdentityTypes(
identity_types=ExtendedIdentityTypes(
email=False,
phone_number=False,
cookie_ids=[SOVRN_REQUIRED_IDENTITY],
)
)
class TestAttentiveSchema:
def METHOD_NAME(self):
schema = AttentiveSchema(recipient_email_address="attentive@example.com")
assert schema.third_party_vendor_name == "Attentive"
assert schema.test_email_address is None
assert schema.recipient_email_address == "attentive@example.com"
assert schema.advanced_settings == AdvancedSettings(
identity_types=IdentityTypes(email=True, phone_number=False)
) |
3,008 | test compute no instances | from re import search
from unittest import mock
GCP_PROJECT_ID = "123456789012"
class Test_compute_instance_block_project_wide_ssh_keys_disabled:
def METHOD_NAME(self):
compute_client = mock.MagicMock
compute_client.project_ids = [GCP_PROJECT_ID]
compute_client.instances = []
with mock.patch(
"prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled.compute_client",
new=compute_client,
):
from prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled import (
compute_instance_block_project_wide_ssh_keys_disabled,
)
check = compute_instance_block_project_wide_ssh_keys_disabled()
result = check.execute()
assert len(result) == 0
def test_one_compliant_instance_with_block_project_ssh_keys_true(self):
from prowler.providers.gcp.services.compute.compute_service import Instance
instance = Instance(
name="test",
id="1234567890",
zone="us-central1-a",
public_ip=True,
metadata={"items": [{"key": "block-project-ssh-keys", "value": "true"}]},
shielded_enabled_vtpm=True,
shielded_enabled_integrity_monitoring=True,
confidential_computing=True,
service_accounts=[],
ip_forward=False,
disks_encryption=[("disk1", False), ("disk2", False)],
project_id=GCP_PROJECT_ID,
)
compute_client = mock.MagicMock
compute_client.project_ids = [GCP_PROJECT_ID]
compute_client.instances = [instance]
with mock.patch(
"prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled.compute_client",
new=compute_client,
):
from prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled import (
compute_instance_block_project_wide_ssh_keys_disabled,
)
check = compute_instance_block_project_wide_ssh_keys_disabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert search(
f"The VM Instance {instance.name} is not making use of common/shared project-wide SSH key",
result[0].status_extended,
)
assert result[0].resource_id == instance.id
def test_one_instance_without_metadata(self):
from prowler.providers.gcp.services.compute.compute_service import Instance
instance = Instance(
name="test",
id="1234567890",
zone="us-central1-a",
public_ip=True,
metadata={},
shielded_enabled_vtpm=True,
shielded_enabled_integrity_monitoring=True,
confidential_computing=True,
service_accounts=[],
ip_forward=False,
disks_encryption=[("disk1", False), ("disk2", False)],
project_id=GCP_PROJECT_ID,
)
compute_client = mock.MagicMock
compute_client.project_ids = [GCP_PROJECT_ID]
compute_client.instances = [instance]
with mock.patch(
"prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled.compute_client",
new=compute_client,
):
from prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled import (
compute_instance_block_project_wide_ssh_keys_disabled,
)
check = compute_instance_block_project_wide_ssh_keys_disabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert search(
f"The VM Instance {instance.name} is making use of common/shared project-wide SSH key",
result[0].status_extended,
)
assert result[0].resource_id == instance.id
def test_one_instance_with_block_project_ssh_keys_false(self):
from prowler.providers.gcp.services.compute.compute_service import Instance
instance = Instance(
name="test",
id="1234567890",
zone="us-central1-a",
public_ip=True,
metadata={"items": [{"key": "block-project-ssh-keys", "value": "false"}]},
shielded_enabled_vtpm=True,
shielded_enabled_integrity_monitoring=True,
confidential_computing=True,
service_accounts=[],
ip_forward=False,
disks_encryption=[("disk1", False), ("disk2", False)],
project_id=GCP_PROJECT_ID,
)
compute_client = mock.MagicMock
compute_client.project_ids = [GCP_PROJECT_ID]
compute_client.instances = [instance]
with mock.patch(
"prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled.compute_client",
new=compute_client,
):
from prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled import (
compute_instance_block_project_wide_ssh_keys_disabled,
)
check = compute_instance_block_project_wide_ssh_keys_disabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert search(
f"The VM Instance {instance.name} is making use of common/shared project-wide SSH key",
result[0].status_extended,
)
assert result[0].resource_id == instance.id |
3,009 | make root commitment | from hashlib import blake2b
import struct
from typing import (List, Optional)
from .mininode import (CBlockHeader, block_work_from_compact, ser_compactsize, ser_uint256)
from .util import (
NU5_BRANCH_ID,
assert_equal,
)
def H(msg: bytes, consensusBranchId: int) -> bytes:
digest = blake2b(
digest_size=32,
person=b'ZcashHistory' + struct.pack("<I", consensusBranchId))
digest.update(msg)
return digest.digest()
class ZcashMMRNode():
# leaf nodes have no children
left_child: Optional['ZcashMMRNode']
right_child: Optional['ZcashMMRNode']
# commitments
hashSubtreeCommitment: bytes
nEarliestTimestamp: int
nLatestTimestamp: int
nEarliestTargetBits: int
nLatestTargetBits: int
hashEarliestSaplingRoot: bytes # left child's sapling root
hashLatestSaplingRoot: bytes # right child's sapling root
nSubTreeTotalWork: int # total difficulty accumulated within each subtree
nEarliestHeight: int
nLatestHeight: int
nSaplingTxCount: int # number of Sapling transactions in block
# NU5 only.
hashEarliestOrchardRoot: Optional[bytes] # left child's Orchard root
hashLatestOrchardRoot: Optional[bytes] # right child's Orchard root
nOrchardTxCount: Optional[int] # number of Orchard transactions in block
consensusBranchId: bytes
@classmethod
def from_block(
Z, block: CBlockHeader, height,
sapling_root, sapling_tx_count,
consensusBranchId,
v2_data=None
) -> 'ZcashMMRNode':
'''Create a leaf node from a block'''
if v2_data is not None:
assert_equal(consensusBranchId, NU5_BRANCH_ID)
orchard_root = v2_data[0]
orchard_tx_count = v2_data[1]
else:
orchard_root = None
orchard_tx_count = None
node = Z()
node.left_child = None
node.right_child = None
node.hashSubtreeCommitment = ser_uint256(block.rehash())
node.nEarliestTimestamp = block.nTime
node.nLatestTimestamp = block.nTime
node.nEarliestTargetBits = block.nBits
node.nLatestTargetBits = block.nBits
node.hashEarliestSaplingRoot = sapling_root
node.hashLatestSaplingRoot = sapling_root
node.nSubTreeTotalWork = block_work_from_compact(block.nBits)
node.nEarliestHeight = height
node.nLatestHeight = height
node.nSaplingTxCount = sapling_tx_count
node.hashEarliestOrchardRoot = orchard_root
node.hashLatestOrchardRoot = orchard_root
node.nOrchardTxCount = orchard_tx_count
node.consensusBranchId = consensusBranchId
return node
def serialize(self) -> bytes:
'''serializes a node'''
buf = b''
buf += self.hashSubtreeCommitment
buf += struct.pack("<I", self.nEarliestTimestamp)
buf += struct.pack("<I", self.nLatestTimestamp)
buf += struct.pack("<I", self.nEarliestTargetBits)
buf += struct.pack("<I", self.nLatestTargetBits)
buf += self.hashEarliestSaplingRoot
buf += self.hashLatestSaplingRoot
buf += ser_uint256(self.nSubTreeTotalWork)
buf += ser_compactsize(self.nEarliestHeight)
buf += ser_compactsize(self.nLatestHeight)
buf += ser_compactsize(self.nSaplingTxCount)
if self.hashEarliestOrchardRoot is not None:
buf += self.hashEarliestOrchardRoot
buf += self.hashLatestOrchardRoot
buf += ser_compactsize(self.nOrchardTxCount)
return buf
def make_parent(
left_child: ZcashMMRNode,
right_child: ZcashMMRNode) -> ZcashMMRNode:
parent = ZcashMMRNode()
parent.left_child = left_child
parent.right_child = right_child
parent.hashSubtreeCommitment = H(
left_child.serialize() + right_child.serialize(),
left_child.consensusBranchId,
)
parent.nEarliestTimestamp = left_child.nEarliestTimestamp
parent.nLatestTimestamp = right_child.nLatestTimestamp
parent.nEarliestTargetBits = left_child.nEarliestTargetBits
parent.nLatestTargetBits = right_child.nLatestTargetBits
parent.hashEarliestSaplingRoot = left_child.hashEarliestSaplingRoot
parent.hashLatestSaplingRoot = right_child.hashLatestSaplingRoot
parent.nSubTreeTotalWork = left_child.nSubTreeTotalWork + right_child.nSubTreeTotalWork
parent.nEarliestHeight = left_child.nEarliestHeight
parent.nLatestHeight = right_child.nLatestHeight
parent.nSaplingTxCount = left_child.nSaplingTxCount + right_child.nSaplingTxCount
parent.hashEarliestOrchardRoot = left_child.hashEarliestOrchardRoot
parent.hashLatestOrchardRoot = right_child.hashLatestOrchardRoot
parent.nOrchardTxCount = (
left_child.nOrchardTxCount + right_child.nOrchardTxCount
if left_child.nOrchardTxCount is not None and right_child.nOrchardTxCount is not None
else None)
parent.consensusBranchId = left_child.consensusBranchId
return parent
def METHOD_NAME(root: ZcashMMRNode) -> bytes:
'''Makes the root commitment for a blockheader'''
return H(root.serialize(), root.consensusBranchId)
def get_peaks(node: ZcashMMRNode) -> List[ZcashMMRNode]:
peaks: List[ZcashMMRNode] = []
# Get number of leaves.
leaves = node.nLatestHeight - (node.nEarliestHeight - 1)
assert(leaves > 0)
# Check if the number of leaves in this subtree is a power of two.
if (leaves & (leaves - 1)) == 0:
# This subtree is full, and therefore a single peak. This also covers
# the case of a single isolated leaf.
peaks.append(node)
else:
# This is one of the generated nodes; search within its children.
peaks.extend(get_peaks(node.left_child))
peaks.extend(get_peaks(node.right_child))
return peaks
def bag_peaks(peaks: List[ZcashMMRNode]) -> ZcashMMRNode:
'''
"Bag" a list of peaks, and return the final root
'''
root = peaks[0]
for i in range(1, len(peaks)):
root = make_parent(root, peaks[i])
return root
def append(root: ZcashMMRNode, leaf: ZcashMMRNode) -> ZcashMMRNode:
'''Append a leaf to an existing tree, return the new tree root'''
# recursively find a list of peaks in the current tree
peaks: List[ZcashMMRNode] = get_peaks(root)
merged: List[ZcashMMRNode] = []
# Merge peaks from right to left.
# This will produce a list of peaks in reverse order
current = leaf
for peak in peaks[::-1]:
current_leaves = current.nLatestHeight - (current.nEarliestHeight - 1)
peak_leaves = peak.nLatestHeight - (peak.nEarliestHeight - 1)
if current_leaves == peak_leaves:
current = make_parent(peak, current)
else:
merged.append(current)
current = peak
merged.append(current)
# finally, bag the merged peaks
return bag_peaks(merged[::-1])
def delete(root: ZcashMMRNode) -> ZcashMMRNode:
'''
Delete the rightmost leaf node from an existing MMR
Return the new tree root
'''
n_leaves = root.nLatestHeight - (root.nEarliestHeight - 1)
# if there were an odd number of leaves,
# simply replace root with left_child
if n_leaves & 1:
return root.left_child
# otherwise, we need to re-bag the peaks.
else:
# first peak
peaks = [root.left_child]
# we do this traversing the right (unbalanced) side of the tree
# we keep the left side (balanced subtree or leaf) of each subtree
# until we reach a leaf
subtree_root = root.right_child
while subtree_root.left_child:
peaks.append(subtree_root.left_child)
subtree_root = subtree_root.right_child
new_root = bag_peaks(peaks)
return new_root |
3,010 | open inclusive | # Copyright (C) 2016-2018 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import errno
import hashlib
import logging
import os
import shutil
import tempfile
from pathlib import PureWindowsPath
from lib.cuckoo.common.config import Config
from lib.cuckoo.common.exceptions import CuckooOperationalError
from lib.cuckoo.common.misc import getuser
from lib.cuckoo.common.path_utils import path_exists, path_mkdir
cuckoo_conf = Config()
log = logging.getLogger()
def temppath():
"""Return the true temporary directory."""
tmppath = cuckoo_conf.cuckoo.tmppath
# Backwards compatibility with older configuration.
if not tmppath or tmppath == "/tmp":
return os.path.join(tempfile.gettempdir(), f"cuckoo-tmp-{getuser()}")
return tmppath
def open_exclusive(path, mode="xb"):
"""Open a file with O_EXCL, failing if it already exists
[In Python 3, use open with x]"""
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o644)
try:
return os.fdopen(fd, mode)
except OSError as e:
log.error(e, "You might need to add whitelist folder in resultserver.py")
os.close(fd)
raise
def METHOD_NAME(path, mode="ab"):
fd = os.open(path, os.O_CREAT | os.O_APPEND | os.O_WRONLY, 0o644)
try:
return os.fdopen(fd, mode)
except OSError as e:
log.error(e, "You might need to add whitelist folder in resultserver.py")
os.close(fd)
raise
class Storage:
@staticmethod
def get_filename_from_path(path):
"""Cross-platform filename extraction from path.
@param path: file path.
@return: filename.
"""
return PureWindowsPath(path).name
class Folders(Storage):
@staticmethod
def create(root=".", folders=None):
"""Create a directory or multiple directories.
@param root: root path.
@param folders: folders list to be created.
@raise CuckooOperationalError: if fails to create folder.
If folders is None, we try to create the folder provided by `root`.
"""
if isinstance(root, (tuple, list)):
root = os.path.join(*root)
if folders is None:
folders = [""]
elif isinstance(folders, str):
folders = (folders,)
for folder in folders:
folder_path = os.path.join(root, folder)
if not os.path.isdir(folder_path):
try:
path_mkdir(folder_path)
except OSError as e:
if e.errno == errno.EEXIST:
# Race condition, ignore
continue
raise CuckooOperationalError(f"Unable to create folder: {folder_path}") from e
@staticmethod
def copy(src, dest):
if path_exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
@staticmethod
def create_temp(path=None):
return tempfile.mkdtemp(dir=path or temppath())
@staticmethod
def delete(*folder):
"""Delete a folder and all its subdirectories.
@param folder: path or components to path to delete.
@raise CuckooOperationalError: if fails to delete folder.
"""
folder = os.path.join(*folder)
if path_exists(folder):
try:
shutil.rmtree(folder)
except OSError as e:
raise CuckooOperationalError(f"Unable to delete folder: {folder}") from e
class Files(Storage):
@staticmethod
def temp_put(content, path=None):
"""Store a temporary file or files.
@param content: the content of this file
@param path: directory path to store the file
"""
fd, filepath = tempfile.mkstemp(prefix="upload_", dir=path or temppath())
if hasattr(content, "read"):
chunk = content.read(1024)
while chunk:
os.write(fd, chunk)
chunk = content.read(1024)
else:
os.write(fd, content)
os.close(fd)
return filepath
@staticmethod
def temp_named_put(content, filename, path=None):
"""Store a named temporary file.
@param content: the content of this file
@param filename: filename that the file should have
@param path: directory path to store the file
@return: full path to the temporary file
"""
filename = Storage.get_filename_from_path(filename)
dirpath = tempfile.mkdtemp(dir=path or temppath())
Files.create(dirpath, filename, content)
return os.path.join(dirpath, filename)
@staticmethod
def create(root, filename, content):
if isinstance(root, (tuple, list)):
root = os.path.join(*root)
filepath = os.path.join(root, filename)
with open(filepath, "wb") as f:
if hasattr(content, "read"):
chunk = content.read(1024 * 1024)
while chunk:
f.write(chunk)
chunk = content.read(1024 * 1024)
else:
f.write(content)
return filepath
@staticmethod
def copy(path_target, path_dest):
"""Copy a file. The destination may be a directory.
@param path_target: The
@param path_dest: path_dest
@return: path to the file or directory
"""
shutil.copy(src=path_target, dst=path_dest)
return os.path.join(path_dest, os.path.basename(path_target))
@staticmethod
def hash_file(method, filepath):
"""Calculate a hash on a file by path.
@param method: callable hashing method
@param path: file path
@return: computed hash string
"""
with open(filepath, "rb") as f:
h = method()
buf = f.read(1024 * 1024)
while buf:
h.update(buf)
buf = f.read(1024 * 1024)
return h.hexdigest()
@staticmethod
def md5_file(filepath):
return Files.hash_file(hashlib.md5, filepath)
@staticmethod
def sha1_file(filepath):
return Files.hash_file(hashlib.sha1, filepath)
@staticmethod
def sha256_file(filepath):
return Files.hash_file(hashlib.sha256, filepath) |
3,011 | test logout | from test import test_support as support
# If we end up with a significant number of tests that don't require
# threading, this test module should be split. Right now we skip
# them all if we don't have threading.
threading = support.import_module('threading')
from contextlib import contextmanager
import imaplib
import os.path
import SocketServer
import time
from test_support import reap_threads, verbose, transient_internet
import unittest
try:
import ssl
except ImportError:
ssl = None
CERTFILE = None
class TestImaplib(unittest.TestCase):
def test_that_Time2Internaldate_returns_a_result(self):
# We can check only that it successfully produces a result,
# not the correctness of the result itself, since the result
# depends on the timezone the machine is in.
timevalues = [2000000000, 2000000000.0, time.localtime(2000000000),
'"18-May-2033 05:33:20 +0200"']
for t in timevalues:
imaplib.Time2Internaldate(t)
if ssl:
class SecureTCPServer(SocketServer.TCPServer):
def get_request(self):
newsocket, fromaddr = self.socket.accept()
connstream = ssl.wrap_socket(newsocket,
server_side=True,
certfile=CERTFILE)
return connstream, fromaddr
IMAP4_SSL = imaplib.IMAP4_SSL
else:
class SecureTCPServer:
pass
IMAP4_SSL = None
class SimpleIMAPHandler(SocketServer.StreamRequestHandler):
timeout = 1
def _send(self, message):
if verbose: print "SENT:", message.strip()
self.wfile.write(message)
def handle(self):
# Send a welcome message.
self._send('* OK IMAP4rev1\r\n')
while 1:
# Gather up input until we receive a line terminator or we timeout.
# Accumulate read(1) because it's simpler to handle the differences
# between naked sockets and SSL sockets.
line = ''
while 1:
try:
part = self.rfile.read(1)
if part == '':
# Naked sockets return empty strings..
return
line += part
except IOError:
# ..but SSLSockets raise exceptions.
return
if line.endswith('\r\n'):
break
if verbose: print 'GOT:', line.strip()
splitline = line.split()
tag = splitline[0]
cmd = splitline[1]
args = splitline[2:]
if hasattr(self, 'cmd_%s' % (cmd,)):
getattr(self, 'cmd_%s' % (cmd,))(tag, args)
else:
self._send('%s BAD %s unknown\r\n' % (tag, cmd))
def cmd_CAPABILITY(self, tag, args):
self._send('* CAPABILITY IMAP4rev1\r\n')
self._send('%s OK CAPABILITY completed\r\n' % (tag,))
class BaseThreadedNetworkedTests(unittest.TestCase):
def make_server(self, addr, hdlr):
class MyServer(self.server_class):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
if verbose: print "creating server"
server = MyServer(addr, hdlr)
self.assertEqual(server.server_address, server.socket.getsockname())
if verbose:
print "server created"
print "ADDR =", addr
print "CLASS =", self.server_class
print "HDLR =", server.RequestHandlerClass
t = threading.Thread(
name='%s serving' % self.server_class,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print "server running"
return server, t
def reap_server(self, server, thread):
if verbose: print "waiting for server"
server.shutdown()
thread.join()
if verbose: print "done"
@contextmanager
def reaped_server(self, hdlr):
server, thread = self.make_server((support.HOST, 0), hdlr)
try:
yield server
finally:
self.reap_server(server, thread)
@reap_threads
def test_connect(self):
with self.reaped_server(SimpleIMAPHandler) as server:
client = self.imap_class(*server.server_address)
client.shutdown()
@reap_threads
def test_issue5949(self):
class EOFHandler(SocketServer.StreamRequestHandler):
def handle(self):
# EOF without sending a complete welcome message.
self.wfile.write('* OK')
with self.reaped_server(EOFHandler) as server:
self.assertRaises(imaplib.IMAP4.abort,
self.imap_class, *server.server_address)
class ThreadedNetworkedTests(BaseThreadedNetworkedTests):
server_class = SocketServer.TCPServer
imap_class = imaplib.IMAP4
@unittest.skipUnless(ssl, "SSL not available")
class ThreadedNetworkedTestsSSL(BaseThreadedNetworkedTests):
server_class = SecureTCPServer
imap_class = IMAP4_SSL
class RemoteIMAPTest(unittest.TestCase):
host = 'cyrus.andrew.cmu.edu'
port = 143
username = 'anonymous'
password = 'pass'
imap_class = imaplib.IMAP4
def setUp(self):
with transient_internet(self.host):
self.server = self.imap_class(self.host, self.port)
def tearDown(self):
if self.server is not None:
self.server.logout()
def test_logincapa(self):
self.assertTrue('LOGINDISABLED' in self.server.capabilities)
def test_anonlogin(self):
self.assertTrue('AUTH=ANONYMOUS' in self.server.capabilities)
rs = self.server.login(self.username, self.password)
self.assertEqual(rs[0], 'OK')
def METHOD_NAME(self):
rs = self.server.logout()
self.server = None
self.assertEqual(rs[0], 'BYE')
@unittest.skipUnless(ssl, "SSL not available")
class RemoteIMAP_SSLTest(RemoteIMAPTest):
port = 993
imap_class = IMAP4_SSL
def test_logincapa(self):
self.assertFalse('LOGINDISABLED' in self.server.capabilities)
self.assertTrue('AUTH=PLAIN' in self.server.capabilities)
def test_main():
tests = [TestImaplib]
if support.is_resource_enabled('network'):
if ssl:
global CERTFILE
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
if not os.path.exists(CERTFILE):
raise support.TestFailed("Can't read certificate files!")
tests.extend([
ThreadedNetworkedTests, ThreadedNetworkedTestsSSL,
RemoteIMAPTest, RemoteIMAP_SSLTest,
])
support.run_unittest(*tests)
if __name__ == "__main__":
support.use_resources = ['network']
test_main() |
3,012 | markdown2html | #############################################################################
# Copyright (c) 2018, Voilà Contributors #
# Copyright (c) 2018, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
import mimetypes
from typing import Optional
import traitlets
from traitlets.config import Config
try:
from jinja2 import pass_context
except ImportError:
from jinja2 import contextfilter as pass_context
from nbconvert.exporters.html import HTMLExporter
from nbconvert.exporters.templateexporter import TemplateExporter
from nbconvert.filters.highlight import Highlight2HTML
from nbconvert.filters.markdown_mistune import (
IPythonRenderer,
MarkdownWithMath,
)
from .static_file_handler import TemplateStaticFileHandler
from .utils import create_include_assets_functions
try:
from nbconvert.filters.markdown_mistune import MISTUNE_V3 # noqa
NB_CONVERT_760 = True
except ImportError:
NB_CONVERT_760 = False
class VoilaMarkdownRenderer(IPythonRenderer):
"""Custom markdown renderer that inlines images"""
def __init__(self, contents_manager, *args, **kwargs):
self.contents_manager = contents_manager
super().__init__(*args, **kwargs)
def image(self, text: str, url: str, title: Optional[str] = None):
contents_manager = self.contents_manager
# for nbconvert <7.6.0, the first argument is the URL
src = url if NB_CONVERT_760 else text
if contents_manager.file_exists(src):
content = contents_manager.get(src, format="base64")
data = content["content"].replace("\n", "") # remove the newline
mime_type, encoding = mimetypes.guess_type(src)
src = f"data:{mime_type};base64,{data}"
if NB_CONVERT_760:
return super().image(text, src, title)
else:
return super().image(src, url, title)
class VoilaExporter(HTMLExporter):
"""Custom HTMLExporter that inlines the images using VoilaMarkdownRenderer"""
base_url = traitlets.Unicode(help="Base url for resources").tag(config=True)
markdown_renderer_class = traitlets.Type(VoilaMarkdownRenderer).tag(config=True)
# Can be a ContentsManager from notebook or jupyter_server, so Any will have to do for now
contents_manager = traitlets.Any()
show_margins = traitlets.Bool(
True,
help='show left and right margins for the "lab" template, this gives a "classic" template look',
).tag(config=True)
@traitlets.validate("template_name")
def _validate_template_name(self, template_name):
if template_name.value == "classic":
self.log.warning(
'"classic" template support will be removed in Voila 1.0.0, '
'please use the "lab" template instead with the "--show-margins" '
"option for a similar look"
)
return template_name.value
@pass_context
def METHOD_NAME(self, context, source):
# The voila exporter overrides the markdown renderer from the HTMLExporter
# to inline images.
cell = context["cell"]
attachments = cell.get("attachments", {})
cls = self.markdown_renderer_class
renderer = cls(
escape=False,
attachments=attachments,
contents_manager=self.contents_manager,
anchor_link_text=self.anchor_link_text,
)
return MarkdownWithMath(renderer=renderer).render(source)
# The voila exporter disables the CSSHTMLHeaderPreprocessor from the HTMLExporter.
@property
def default_config(self):
c = Config(
{
"VoilaExporter": {
"markdown_renderer_class": "voila.exporter.VoilaMarkdownRenderer"
}
}
)
c.merge(super().default_config)
return c
# Overriding the default template file.
@traitlets.default("template_file")
def default_template_file(self):
return "index.html.j2"
async def generate_from_notebook_node(
self, nb, resources=None, extra_context={}, page_config={}, **kw
):
# this replaces from_notebook_node, but calls template.generate instead of template.render
langinfo = nb.metadata.get("language_info", {})
lexer = langinfo.get("pygments_lexer", langinfo.get("name", None))
highlight_code = self.filters.get(
"highlight_code", Highlight2HTML(pygments_lexer=lexer, parent=self)
)
self.register_filter("highlight_code", highlight_code)
# We need outputs to be marked as trusted for the JupyterLab mimerendering logic
def trusted_output(output):
output["trusted"] = True
return output
self.register_filter("trusted", trusted_output)
# NOTE: we don't call HTML or TemplateExporter' from_notebook_node
nb_copy, resources = super(TemplateExporter, self).from_notebook_node(
nb, resources, **kw
)
resources.setdefault("raw_mimetypes", self.raw_mimetypes)
resources["global_content_filter"] = {
"include_code": not self.exclude_code_cell,
"include_markdown": not self.exclude_markdown,
"include_raw": not self.exclude_raw,
"include_unknown": not self.exclude_unknown,
"include_input": not self.exclude_input,
"include_output": not self.exclude_output,
"include_input_prompt": not self.exclude_input_prompt,
"include_output_prompt": not self.exclude_output_prompt,
"no_prompt": self.exclude_input_prompt and self.exclude_output_prompt,
}
async for output in self.template.generate_async(
nb=nb_copy,
resources=resources,
**extra_context,
static_url=self.static_url,
page_config=page_config,
):
yield (output, resources)
@property
def environment(self):
# enable Jinja async template execution
self.enable_async = True
env = super().environment
if "jinja2.ext.do" not in env.extensions:
env.add_extension("jinja2.ext.do")
return env
def get_template_paths(self):
return self.template_path
def static_url(self, path):
"""Mimics tornado.web.RequestHandler.static_url"""
settings = {
"static_url_prefix": f"{self.base_url}voila/templates/",
"static_path": None, # not used in TemplateStaticFileHandler.get_absolute_path
}
return TemplateStaticFileHandler.make_static_url(
settings, f"{self.template_name}/static/{path}"
)
def _init_resources(self, resources):
resources = super()._init_resources(resources)
include_assets_functions = create_include_assets_functions(
self.template_name, self.base_url
)
resources.update(include_assets_functions)
resources["show_margins"] = self.show_margins
return resources |
3,013 | test flatten module with random data | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.automated_test_util import *
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _test_flatten(test_case, device):
m = flow.nn.Flatten()
x = flow.Tensor(32, 2, 5, 5, device=flow.device(device))
flow.nn.init.uniform_(x)
y = m(x)
test_case.assertTrue(y.shape == flow.Size((32, 50)))
test_case.assertTrue(np.array_equal(y.numpy().flatten(), x.numpy().flatten()))
y2 = flow.flatten(x, start_dim=2)
test_case.assertTrue(y2.shape == flow.Size((32, 2, 25)))
test_case.assertTrue(np.array_equal(y2.numpy().flatten(), x.numpy().flatten()))
y3 = x.flatten(start_dim=1)
test_case.assertTrue(y3.shape == flow.Size((32, 50)))
test_case.assertTrue(np.array_equal(y3.numpy().flatten(), x.numpy().flatten()))
y4 = x.flatten(start_dim=1, end_dim=2)
test_case.assertTrue(y4.shape == flow.Size((32, 10, 5)))
test_case.assertTrue(np.array_equal(y4.numpy().flatten(), x.numpy().flatten()))
y5 = flow.flatten(x)
test_case.assertTrue(y5.shape == flow.Size((1600,)))
test_case.assertTrue(np.array_equal(y5.numpy().flatten(), x.numpy().flatten()))
def _test_flatten_backward(test_case, device):
m = flow.nn.Flatten().to(flow.device(device))
x = flow.Tensor(2, 3, 4, 5, device=flow.device(device))
x.requires_grad = True
flow.nn.init.uniform_(x)
y = m(x)
z = y.sum()
z.backward()
test_case.assertTrue(np.array_equal(np.ones(shape=(2, 3, 4, 5)), x.grad.numpy()))
@flow.unittest.skip_unless_1n1d()
class TestFlattenModule(flow.unittest.TestCase):
def test_cast(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_flatten, _test_flatten_backward]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(n=5)
def METHOD_NAME(test_case):
m = torch.nn.Flatten(
start_dim=random(1, 6) | nothing(), end_dim=random(1, 6) | nothing()
)
m.train(random())
device = random_device()
m.to(device)
x = random_tensor().to(device)
y = m(x)
return y
@autotest(n=5)
def test_flatten_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
y = torch.flatten(
x,
start_dim=random(1, 6).to(int) | nothing(),
end_dim=random(1, 6).to(int) | nothing(),
)
return y
@autotest(n=5, auto_backward=False, check_graph=True)
def test_flatten_bool_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device=device, dtype=torch.bool)
y = torch.flatten(
x,
start_dim=random(1, 6).to(int) | nothing(),
end_dim=random(1, 6).to(int) | nothing(),
)
return y
@autotest(n=5)
def test_flatten_with_0dim_data(test_case):
device = random_device()
x = random_tensor(ndim=0).to(device)
y = torch.flatten(
x,
start_dim=random(1, 6).to(int) | nothing(),
end_dim=random(1, 6).to(int) | nothing(),
)
return y
@profile(torch.flatten)
def profile_flatten(test_case):
torch.flatten(torch.ones(1000, 1000))
if __name__ == "__main__":
unittest.main() |
3,014 | claim period | # flake8: noqa: E402
from dataclasses import dataclass
import ape
import pytest
from beamer.tests.util import alloc_accounts
@dataclass(frozen=True)
class Contracts:
resolver: ape.project.Resolver
fill_manager: ape.project.FillManager
request_manager: ape.project.RequestManager
l1_messenger: ape.project.TestL1Messenger
l2_messenger: ape.project.TestL2Messenger
# ape local account, to be used for fulfilling requests.
# The private key here corresponds to the 10th account ganache creates on
# startup.
@pytest.fixture
def local_account():
return ape.accounts.test_accounts[-1]
@pytest.fixture
def deployer():
return alloc_accounts(1)[0]
@pytest.fixture(autouse=True)
def _add_default_sender(deployer):
with ape.accounts.use_sender(deployer):
yield
# Point XDG_ directories to alternate locations to
# avoid interference with real locations. We're using
# only XDG_STATE_HOME for now.
@pytest.fixture(autouse=True)
def _set_xdg_dirs(tmpdir, monkeypatch):
monkeypatch.setenv("XDG_STATE_HOME", str(tmpdir))
# Make sure that the chain is reset after each test since ape
# launches ganache only once for the entire test suite run.
@pytest.fixture(autouse=True)
def _reset_chain():
snap_id = ape.chain.snapshot()
yield
ape.chain.restore(snap_id)
@pytest.fixture
def claim_stake():
return 100
@pytest.fixture
def claim_request_extension():
return 100
@pytest.fixture
def METHOD_NAME():
return 100
@pytest.fixture
def challenge_period_extension():
return 50
@pytest.fixture()
def request_manager_params(
claim_stake, claim_request_extension, METHOD_NAME, challenge_period_extension
):
return (
claim_stake,
claim_request_extension,
METHOD_NAME,
challenge_period_extension,
)
@pytest.fixture
def min_fee_ppm():
return 300_000
@pytest.fixture
def lp_fee_ppm():
return 0
@pytest.fixture
def protocol_fee_ppm():
return 0
@pytest.fixture
def fees_params(min_fee_ppm, lp_fee_ppm, protocol_fee_ppm):
return min_fee_ppm, lp_fee_ppm, protocol_fee_ppm
@pytest.fixture
def finality_period():
return 200
@pytest.fixture
def transfer_cost():
return 0
@pytest.fixture
def target_weight_ppm():
return 300_000
@pytest.fixture
def chain_params(finality_period, transfer_cost, target_weight_ppm):
return finality_period, transfer_cost, target_weight_ppm
@pytest.fixture
def token_params():
return int(10_000e18), int(1_500e18)
@pytest.fixture
def forward_state():
return False
@pytest.fixture
def contracts(
deployer,
token,
forward_state,
request_manager_params,
fees_params,
chain_params,
token_params,
local_account,
):
# L1 contracts
l1_messenger = deployer.deploy(ape.project.TestL1Messenger)
l1_messenger.setForwardState(forward_state)
resolver = deployer.deploy(ape.project.Resolver)
# L2b contracts
l2_messenger = deployer.deploy(ape.project.TestL2Messenger)
l2_messenger.setForwardState(forward_state)
fill_manager = deployer.deploy(ape.project.FillManager, l2_messenger.address)
fill_manager.setResolver(resolver.address)
# L2a contracts
request_manager = deployer.deploy(ape.project.RequestManager, *request_manager_params)
# Add allowed LPs
fill_manager.addAllowedLp(local_account)
request_manager.addAllowedLp(local_account)
# Explicitly allow calls between contracts. The chain of trust:
#
# fill_manager -> L2 messenger -> L1 resolver ->
# L1 messenger -> request_manager
l1_chain_id = l2_chain_id = ape.chain.chain_id
l2_messenger.addCaller(fill_manager.address)
resolver.addCaller(l2_chain_id, l2_messenger.address, l1_messenger.address)
l1_messenger.addCaller(resolver.address)
request_manager.addCaller(l1_chain_id, l1_messenger.address, l2_messenger.address)
resolver.addRequestManager(l2_chain_id, request_manager.address, l1_messenger.address)
request_manager.updateFees(*fees_params)
request_manager.updateChain(l2_chain_id, *chain_params)
request_manager.updateToken(token.address, *token_params)
return Contracts(
l1_messenger=l1_messenger,
l2_messenger=l2_messenger,
resolver=resolver,
fill_manager=fill_manager,
request_manager=request_manager,
)
@pytest.fixture()
def allowance():
return None
@pytest.fixture()
def token_list(token, allowance):
if allowance is None:
return [[[ape.chain.chain_id, token.address]]]
return [[[ape.chain.chain_id, token.address, allowance]]]
@pytest.fixture
def token(deployer):
return deployer.deploy(ape.project.MintableToken, int(1e18))
@pytest.fixture
def request_manager(contracts):
return contracts.request_manager
@pytest.fixture
def l1_messenger(contracts):
return contracts.l1_messenger
@pytest.fixture
def resolver(contracts):
return contracts.resolver
@pytest.fixture
def fill_manager(contracts):
return contracts.fill_manager
@pytest.fixture
def fee_sub(deployer, request_manager, token):
fee_sub = deployer.deploy(ape.project.FeeSub, request_manager.address)
fee_sub.setMinimumAmount(token.address, 2)
token.transfer(fee_sub.address, 100)
return fee_sub |
3,015 | add medical folder dataset | import os
import re
from cache import cached
from db import node_database
from flask import request, g
from middlewares import middleware, medical_folder_dataset, common
from schemas import ValidateMedicalFolderReferenceCSV, \
ValidateMedicalFolderRoot, \
ValidateSubjectsHasAllModalities, \
ValidateMedicalFolderAddRequest, \
ValidateDataLoadingPlanAddRequest, \
ValidateDataLoadingPlanDeleteRequest, \
PreviewDatasetRequest
from utils import error, validate_request_data, response
from fedbiomed.common.data import MedicalFolderController, MapperBlock, MedicalFolderLoadingBlockTypes
from fedbiomed.common.exceptions import FedbiomedError
from fedbiomed.node.dataset_manager import DatasetManager
from . import api
from config import config
dataset_manager = DatasetManager()
# Medical Folder Controller
mf_controller = MedicalFolderController()
# Path to write and read the datafiles
DATA_PATH_RW = config['DATA_PATH_RW']
# Database table (default datasets table of TinyDB) and query object
table = node_database.table_datasets()
query = node_database.query()
@api.route('/datasets/medical-folder-dataset/validate-reference-column', methods=['POST'])
@validate_request_data(schema=ValidateMedicalFolderReferenceCSV)
@middleware(middlewares=[medical_folder_dataset.read_medical_folder_reference,
medical_folder_dataset.validate_available_subjects])
def validate_reference_csv_column():
""" Validate selected reference CSV and column shows folder names """
subjects = g.available_subjects
return response({"valid": True, "subjects": subjects}), 200
@api.route('/datasets/medical-folder-dataset/validate-root', methods=['POST'])
@validate_request_data(schema=ValidateMedicalFolderRoot)
@middleware(middlewares=[medical_folder_dataset.validate_medical_folder_root])
def validate_root_path():
"""Validates MedicalFolder Dataset root path"""
return response(data={"valid": True, "modalities": g.modalities}), 200
@api.route('/datasets/medical-folder-dataset/validate-all-modalities', methods=['POST'])
@validate_request_data(schema=ValidateSubjectsHasAllModalities)
@middleware(middlewares=[medical_folder_dataset.validate_all_modalities])
def validate_subjects_has_all_modalities():
"""Validates MedicalFolder Dataset has subjects with all modalities"""
return response(data={"valid": True, "subjects": g.subjects}), 200
@api.route('/datasets/medical-folder-dataset/add', methods=['POST'])
@validate_request_data(schema=ValidateMedicalFolderAddRequest)
@middleware(middlewares=[common.check_tags_already_registered,
medical_folder_dataset.load_dlp,
medical_folder_dataset.validate_medical_folder_root,
medical_folder_dataset.read_medical_folder_reference,
medical_folder_dataset.validate_available_subjects])
def METHOD_NAME():
""" Adds MedicalFolder dataset into database of NODE """
# Request object as JSON
req = request.json
data_path_save = os.path.join(config['DATA_PATH_SAVE'], *req['medical_folder_root'])
if req["reference_csv_path"] is None:
dataset_parameters = {}
else:
reference_csv = os.path.join(config['DATA_PATH_SAVE'], *req["reference_csv_path"])
dataset_parameters = {"index_col": req["index_col"],
"tabular_file": reference_csv}
try:
dataset_id = dataset_manager.add_database(
name=req["name"],
data_type="medical-folder",
tags=req['tags'],
description=req['desc'],
path=data_path_save,
dataset_parameters=dataset_parameters,
data_loading_plan=g.dlp,
save_dlp=False)
except FedbiomedError as e:
return error(str(e)), 400
except Exception as e:
return error("Unexpected error: " + str(e)), 400
# Get saved dataset document
res = table.get(query.dataset_id == dataset_id)
if not res:
return error("Medical Folder Dataset is not properly deployed. "
"Please try again."), 400
return response(data=res), 200
@api.route('/datasets/medical-folder-dataset/add-dlp', methods=['POST'])
@validate_request_data(schema=ValidateDataLoadingPlanAddRequest)
@middleware(middlewares=[medical_folder_dataset.create_dlp])
def add_data_loading_plan():
"""Adds DataLoadingPlan into database of NODE """
try:
dlp_id = dataset_manager.save_data_loading_plan(g.dlp)
except FedbiomedError as e:
return error(f"Cannot save data loading plan for customizations: {e}"), 400
if dlp_id is None:
return error("Cannot save data loading plan for customizations: no DLP id"), 400
return response(data=dlp_id), 200
@api.route('/datasets/medical-folder-dataset/delete-dlp', methods=['POST'])
@validate_request_data(schema=ValidateDataLoadingPlanDeleteRequest)
def remove_data_loading_plan():
"""Remove DataLoadingPlan from database of NODE """
# Request object as JSON
req = request.json
try:
dataset_manager.remove_dlp_by_id(req['dlp_id'], True)
except FedbiomedError as e:
return error(f"Cannot remove data loading plan for customizations: {e}"), 400
return response(data=True), 200
@api.route('/datasets/medical-folder-dataset/preview', methods=['POST'])
@validate_request_data(schema=PreviewDatasetRequest)
@cached(key="dataset_id", prefix="medical_folder_dataset-preview", timeout=600)
def medical_folder_preview():
"""Gets preview of MedicalFolder dataset by providing a table of subject and available modalities"""
# Request object as JSON
req = request.json
dataset = table.get(query.dataset_id == req['dataset_id'])
# Extract data path where the files are saved in the local GUI repository
rexp = re.match('^' + config['DATA_PATH_SAVE'], dataset['path'])
data_path = dataset['path'].replace(rexp.group(0), config['DATA_PATH_RW'])
mf_controller.root = data_path
if "index_col" in dataset["dataset_parameters"]:
# Extract data path where the files are saved in the local GUI repository
rexp = re.match('^' + config['DATA_PATH_SAVE'], dataset['path'])
reference_path = dataset["dataset_parameters"]["tabular_file"].replace(rexp.group(0),
config['DATA_PATH_RW'])
reference_csv = mf_controller.read_demographics(
path=reference_path,
index_col=dataset["dataset_parameters"]["index_col"]
)
subject_table = mf_controller.subject_modality_status(index=reference_csv.index)
else:
subject_table = mf_controller.subject_modality_status()
modalities, _ = mf_controller.modalities()
data = {
"subject_table": subject_table,
"modalities": modalities,
}
return response(data=data), 200
@api.route('/datasets/medical-folder-dataset/default-modalities', methods=['GET'])
def get_default_modalities():
formatted_modalities = [{'value': name, 'label': name} for name in MedicalFolderController.default_modality_names]
return response(data={'default_modalities': formatted_modalities}), 200 |
3,016 | mode | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fix is no longer needed.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck
#
# License: BSD 3 clause
import sys
from importlib import resources
import numpy as np
import scipy
import scipy.sparse.linalg
import scipy.stats
import threadpoolctl
import sklearn
from ..externals._packaging.version import parse as parse_version
from .deprecation import deprecated
np_version = parse_version(np.__version__)
sp_version = parse_version(scipy.__version__)
sp_base_version = parse_version(sp_version.base_version)
# TODO: We can consider removing the containers and importing
# directly from SciPy when sparse matrices will be deprecated.
CSR_CONTAINERS = [scipy.sparse.csr_matrix]
CSC_CONTAINERS = [scipy.sparse.csc_matrix]
COO_CONTAINERS = [scipy.sparse.coo_matrix]
LIL_CONTAINERS = [scipy.sparse.lil_matrix]
DOK_CONTAINERS = [scipy.sparse.dok_matrix]
BSR_CONTAINERS = [scipy.sparse.bsr_matrix]
if parse_version(scipy.__version__) >= parse_version("1.8"):
# Sparse Arrays have been added in SciPy 1.8
# TODO: When SciPy 1.8 is the minimum supported version,
# those list can be created directly without this condition.
# See: https://github.com/scikit-learn/scikit-learn/issues/27090
CSR_CONTAINERS.append(scipy.sparse.csr_array)
CSC_CONTAINERS.append(scipy.sparse.csc_array)
COO_CONTAINERS.append(scipy.sparse.coo_array)
LIL_CONTAINERS.append(scipy.sparse.lil_array)
DOK_CONTAINERS.append(scipy.sparse.dok_array)
BSR_CONTAINERS.append(scipy.sparse.bsr_array)
try:
from scipy.optimize._linesearch import line_search_wolfe1, line_search_wolfe2
except ImportError: # SciPy < 1.8
from scipy.optimize.linesearch import line_search_wolfe2, line_search_wolfe1 # type: ignore # noqa
def _object_dtype_isnan(X):
return X != X
# Rename the `method` kwarg to `interpolation` for NumPy < 1.22, because
# `interpolation` kwarg was deprecated in favor of `method` in NumPy >= 1.22.
def _percentile(a, q, *, method="linear", **kwargs):
return np.percentile(a, q, interpolation=method, **kwargs)
if np_version < parse_version("1.22"):
percentile = _percentile
else: # >= 1.22
from numpy import percentile # type: ignore # noqa
# compatibility fix for threadpoolctl >= 3.0.0
# since version 3 it's possible to setup a global threadpool controller to avoid
# looping through all loaded shared libraries each time.
# the global controller is created during the first call to threadpoolctl.
def _get_threadpool_controller():
if not hasattr(threadpoolctl, "ThreadpoolController"):
return None
if not hasattr(sklearn, "_sklearn_threadpool_controller"):
sklearn._sklearn_threadpool_controller = threadpoolctl.ThreadpoolController()
return sklearn._sklearn_threadpool_controller
def threadpool_limits(limits=None, user_api=None):
controller = _get_threadpool_controller()
if controller is not None:
return controller.limit(limits=limits, user_api=user_api)
else:
return threadpoolctl.threadpool_limits(limits=limits, user_api=user_api)
threadpool_limits.__doc__ = threadpoolctl.threadpool_limits.__doc__
def threadpool_info():
controller = _get_threadpool_controller()
if controller is not None:
return controller.info()
else:
return threadpoolctl.threadpool_info()
threadpool_info.__doc__ = threadpoolctl.threadpool_info.__doc__
@deprecated(
"The function `delayed` has been moved from `sklearn.utils.fixes` to "
"`sklearn.utils.parallel`. This import path will be removed in 1.5."
)
def delayed(function):
from sklearn.utils.parallel import delayed
return delayed(function)
# TODO: Remove when SciPy 1.11 is the minimum supported version
def METHOD_NAME(a, axis=0):
if sp_version >= parse_version("1.9.0"):
mode = scipy.stats.mode(a, axis=axis, keepdims=True)
if sp_version >= parse_version("1.10.999"):
# scipy.stats.mode has changed returned array shape with axis=None
# and keepdims=True, see https://github.com/scipy/scipy/pull/17561
if axis is None:
mode = np.ravel(mode)
return mode
return scipy.stats.mode(a, axis=axis)
# TODO: Remove when Scipy 1.12 is the minimum supported version
if sp_base_version >= parse_version("1.12.0"):
_sparse_linalg_cg = scipy.sparse.linalg.cg
else:
def _sparse_linalg_cg(A, b, **kwargs):
if "rtol" in kwargs:
kwargs["tol"] = kwargs.pop("rtol")
if "atol" not in kwargs:
kwargs["atol"] = "legacy"
return scipy.sparse.linalg.cg(A, b, **kwargs)
###############################################################################
# Backport of Python 3.9's importlib.resources
# TODO: Remove when Python 3.9 is the minimum supported version
def _open_text(data_module, data_file_name):
if sys.version_info >= (3, 9):
return resources.files(data_module).joinpath(data_file_name).open("r")
else:
return resources.open_text(data_module, data_file_name)
def _open_binary(data_module, data_file_name):
if sys.version_info >= (3, 9):
return resources.files(data_module).joinpath(data_file_name).open("rb")
else:
return resources.open_binary(data_module, data_file_name)
def _read_text(descr_module, descr_file_name):
if sys.version_info >= (3, 9):
return resources.files(descr_module).joinpath(descr_file_name).read_text()
else:
return resources.read_text(descr_module, descr_file_name)
def _path(data_module, data_file_name):
if sys.version_info >= (3, 9):
return resources.as_file(resources.files(data_module).joinpath(data_file_name))
else:
return resources.path(data_module, data_file_name)
def _is_resource(data_module, data_file_name):
if sys.version_info >= (3, 9):
return resources.files(data_module).joinpath(data_file_name).is_file()
else:
return resources.is_resource(data_module, data_file_name)
def _contents(data_module):
if sys.version_info >= (3, 9):
return (
resource.name
for resource in resources.files(data_module).iterdir()
if resource.is_file()
)
else:
return resources.contents(data_module)
# For +1.25 NumPy versions exceptions and warnings are being moved
# to a dedicated submodule.
if np_version >= parse_version("1.25.0"):
from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning
else:
from numpy import ComplexWarning, VisibleDeprecationWarning # type: ignore # noqa
# TODO: Remove when Scipy 1.6 is the minimum supported version
try:
from scipy.integrate import trapezoid # type: ignore # noqa
except ImportError:
from scipy.integrate import trapz as trapezoid # type: ignore # noqa |
3,017 | render | """DMD (hardware device)."""
import struct
from kivy.graphics.instructions import Callback
from kivy.uix.effectwidget import EffectWidget
from kivy.clock import Clock
from kivy.graphics.fbo import Fbo
from kivy.graphics.opengl import glReadPixels, GL_RGB, GL_UNSIGNED_BYTE
from kivy.graphics.texture import Texture
from mpfmc.effects.gain import GainEffect
from mpfmc.effects.flip_vertical import FlipVerticalEffect
from mpfmc.effects.gamma import GammaEffect
MYPY = False
if MYPY: # pragma: no cover
from mpfmc.core.mc import MpfMc # pylint: disable-msg=cyclic-import,unused-import
class DmdBase:
"""Base class for DMD devices."""
dmd_name_string = 'DMD'
def __init__(self, mc: "MpfMc", name: str, config: dict) -> None:
"""Initialise DMD."""
self.mc = mc
self.name = name
self.mc.log.info('Initializing DMD')
self.config = self._get_validated_config(config)
self.source = self.mc.displays[self.config['source_display']]
self.prev_data = None
self._dirty = True
# put the widget canvas on a Fbo
texture = Texture.create(size=self.source.size, colorfmt='rgb')
self.fbo = Fbo(size=self.source.size, texture=texture)
self.effect_widget = EffectWidget()
effect_list = list()
effect_list.append(FlipVerticalEffect())
if self.config['brightness'] != 1.0:
if not 0.0 <= self.config['brightness'] <= 1.0:
raise ValueError("DMD brightness value should be between 0.0 "
"and 1.0. Yours is {}".format(self.config['brightness']))
effect_list.append(GainEffect(gain=self.config['brightness']))
if self.config['gamma'] != 1.0:
effect_list.append(GammaEffect(gamma=self.config['gamma']))
self.effect_widget.effects = effect_list
self.effect_widget.size = self.source.size
self.fbo.add(self.effect_widget.canvas)
with self.source.canvas:
self.callback = Callback(self._trigger_rendering)
self._set_dmd_fps()
def _trigger_rendering(self, *args):
del args
self._dirty = True
def _get_validated_config(self, config: dict) -> dict:
raise NotImplementedError
def _set_dmd_fps(self) -> None:
# fps is the rate that the connected client requested. We'll use the
# lower of the two
mc_fps = self.config['fps']
if mc_fps == 0:
# pylint: disable-msg=protected-access
mc_fps = Clock._max_fps
# pylint: disable-msg=protected-access
if mc_fps > Clock._max_fps:
self.mc.log.warning("%s fps is higher than mpf-mc fps. "
"Will use mpf-mc fps setting for the DMD.",
DmdBase.dmd_name_string)
# pylint: disable-msg=protected-access
fps = Clock._max_fps
update = 0
# pylint: disable-msg=protected-access
elif Clock._max_fps > mc_fps > 0:
fps = mc_fps
update = 1 / fps
else:
# pylint: disable-msg=protected-access
fps = Clock._max_fps
update = 0
Clock.schedule_interval(self.tick, update)
self.mc.log.info("Setting %s to %sfps",
DmdBase.dmd_name_string, fps)
def tick(self, *args) -> None:
"""Draw image for DMD and send it."""
del args
# run this at the end of the tick to make sure all kivy bind callbacks have executed
if self._dirty:
Clock.schedule_once(self.METHOD_NAME, -1)
def METHOD_NAME(self, dt):
del dt
self._dirty = False
widget = self.source
fbo = self.fbo
# detach the widget from the parent
parent = widget.parent
if parent and hasattr(parent, "remove_display_source"):
parent.remove_display_source(widget)
# clear the fbo background
fbo.bind()
fbo.clear_buffer()
fbo.release()
self.effect_widget.add_widget(widget.container)
fbo.draw()
fbo.bind()
data = glReadPixels(0, 0, widget.native_size[0], widget.native_size[1],
GL_RGB, GL_UNSIGNED_BYTE)
fbo.release()
self.effect_widget.remove_widget(widget.container)
# reattach to the parent
if parent and hasattr(parent, "add_display_source"):
parent.add_display_source(widget)
if not self.config['only_send_changes'] or self.prev_data != data:
self.prev_data = data
self.send(data)
def send(self, data: bytes) -> None:
"""Send data to DMD via BCP."""
raise NotImplementedError
class Dmd(DmdBase):
"""Monochrome DMD."""
def _get_validated_config(self, config: dict) -> dict:
return self.mc.config_validator.validate_config('dmds', config)
@classmethod
def _convert_to_single_bytes(cls, data, config: dict) -> bytes:
new_data = bytearray()
loops = 0
config.setdefault('luminosity', (.299, .587, .114))
luminosity = config['luminosity']
for r, g, b in struct.iter_unpack('BBB', data):
loops += 1
try:
pixel_weight = ((r * luminosity[0]) + (g * luminosity[1]) + (b * luminosity[2])) / 255.
new_data.append(int(round(pixel_weight * 15)))
except ValueError:
raise ValueError(loops, r, g, b)
return bytes(new_data)
def send(self, data: bytes) -> None:
"""Send data to DMD via BCP."""
data = self._convert_to_single_bytes(data, self.config)
self.mc.bcp_processor.send('dmd_frame', rawbytes=data, name=self.name)
class RgbDmd(DmdBase):
"""RGB DMD."""
dmd_name_string = 'RGB DMD'
def _get_validated_config(self, config: dict) -> dict:
return self.mc.config_validator.validate_config('rgb_dmds', config)
@staticmethod
def _reorder_channels(data, order):
new_data = bytearray()
for r, g, b in struct.iter_unpack('BBB', data):
for channel in order:
if channel == "r":
new_data.append(r)
elif channel == "g":
new_data.append(g)
elif channel == "b":
new_data.append(b)
else:
raise ValueError("Unknown channel {}".format(channel))
return bytes(new_data)
def send(self, data: bytes) -> None:
"""Send data to RGB DMD via BCP."""
if self.config['channel_order'] != 'rgb':
data = self._reorder_channels(data, self.config['channel_order'])
self.mc.bcp_processor.send('rgb_dmd_frame', rawbytes=data, name=self.name) |
3,018 | get resource output | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetResourceResult',
'AwaitableGetResourceResult',
'get_resource',
'get_resource_output',
]
@pulumi.output_type
class GetResourceResult:
"""
A collection of values returned by getResource.
"""
def __init__(__self__, id=None, identifier=None, properties=None, role_arn=None, type_name=None, type_version_id=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identifier and not isinstance(identifier, str):
raise TypeError("Expected argument 'identifier' to be a str")
pulumi.set(__self__, "identifier", identifier)
if properties and not isinstance(properties, str):
raise TypeError("Expected argument 'properties' to be a str")
pulumi.set(__self__, "properties", properties)
if role_arn and not isinstance(role_arn, str):
raise TypeError("Expected argument 'role_arn' to be a str")
pulumi.set(__self__, "role_arn", role_arn)
if type_name and not isinstance(type_name, str):
raise TypeError("Expected argument 'type_name' to be a str")
pulumi.set(__self__, "type_name", type_name)
if type_version_id and not isinstance(type_version_id, str):
raise TypeError("Expected argument 'type_version_id' to be a str")
pulumi.set(__self__, "type_version_id", type_version_id)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identifier(self) -> str:
return pulumi.get(self, "identifier")
@property
@pulumi.getter
def properties(self) -> str:
"""
JSON string matching the CloudFormation resource type schema with current configuration.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[str]:
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="typeName")
def type_name(self) -> str:
return pulumi.get(self, "type_name")
@property
@pulumi.getter(name="typeVersionId")
def type_version_id(self) -> Optional[str]:
return pulumi.get(self, "type_version_id")
class AwaitableGetResourceResult(GetResourceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetResourceResult(
id=self.id,
identifier=self.identifier,
properties=self.properties,
role_arn=self.role_arn,
type_name=self.type_name,
type_version_id=self.type_version_id)
def get_resource(identifier: Optional[str] = None,
role_arn: Optional[str] = None,
type_name: Optional[str] = None,
type_version_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetResourceResult:
"""
Provides details for a Cloud Control API Resource. The reading of these resources is proxied through Cloud Control API handlers to the backend service.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.cloudcontrol.get_resource(identifier="example",
type_name="AWS::ECS::Cluster")
```
:param str identifier: Identifier of the CloudFormation resource type. For example, `vpc-12345678`.
:param str role_arn: ARN of the IAM Role to assume for operations.
:param str type_name: CloudFormation resource type name. For example, `AWS::EC2::VPC`.
The following arguments are optional:
:param str type_version_id: Identifier of the CloudFormation resource type version.
"""
__args__ = dict()
__args__['identifier'] = identifier
__args__['roleArn'] = role_arn
__args__['typeName'] = type_name
__args__['typeVersionId'] = type_version_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:cloudcontrol/getResource:getResource', __args__, opts=opts, typ=GetResourceResult).value
return AwaitableGetResourceResult(
id=pulumi.get(__ret__, 'id'),
identifier=pulumi.get(__ret__, 'identifier'),
properties=pulumi.get(__ret__, 'properties'),
role_arn=pulumi.get(__ret__, 'role_arn'),
type_name=pulumi.get(__ret__, 'type_name'),
type_version_id=pulumi.get(__ret__, 'type_version_id'))
@_utilities.lift_output_func(get_resource)
def METHOD_NAME(identifier: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[Optional[str]]] = None,
type_name: Optional[pulumi.Input[str]] = None,
type_version_id: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetResourceResult]:
"""
Provides details for a Cloud Control API Resource. The reading of these resources is proxied through Cloud Control API handlers to the backend service.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.cloudcontrol.get_resource(identifier="example",
type_name="AWS::ECS::Cluster")
```
:param str identifier: Identifier of the CloudFormation resource type. For example, `vpc-12345678`.
:param str role_arn: ARN of the IAM Role to assume for operations.
:param str type_name: CloudFormation resource type name. For example, `AWS::EC2::VPC`.
The following arguments are optional:
:param str type_version_id: Identifier of the CloudFormation resource type version.
"""
... |
3,019 | make test message | # Copyright iris-grib contributors
#
# This file is part of iris-grib and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the :mod:`iris_grib` package."""
# import iris_grib.tests first so that some things can be initialised
# before importing anything else.
import iris_grib.tests as tests
import eccodes
import numpy as np
from unittest import mock
import iris
import iris_grib
from iris_grib.message import GribMessage
def METHOD_NAME(sections):
raw_message = mock.Mock(sections=sections)
recreate_raw = mock.Mock(return_value=raw_message)
return GribMessage(raw_message, recreate_raw)
def _mock_eccodes_fetch(message, key):
"""
Fake the ecCodes key-fetch.
Fetch key-value from the fake message (dictionary).
If the key is not present, raise the diagnostic exception.
"""
if key in message:
return message[key]
else:
raise _mock_eccodes.CodesInternalError
def _mock_eccodes__codes_is_missing(grib_message, keyname):
"""
Fake the ecCodes key-existence enquiry.
Return whether the key exists in the fake message (dictionary).
"""
return (keyname not in grib_message)
def _mock_eccodes__codes_get_native_type(grib_message, keyname):
"""
Fake the ecCodes type-discovery operation.
Return type of key-value in the fake message (dictionary).
If the key is not present, raise the diagnostic exception.
"""
if keyname in grib_message:
return type(grib_message[keyname])
raise _mock_eccodes.CodesInternalError(keyname)
# Construct a mock object to mimic the eccodes for GribWrapper testing.
_mock_eccodes = mock.Mock(spec=eccodes)
_mock_eccodes.CodesInternalError = Exception
_mock_eccodes.codes_get_long = mock.Mock(side_effect=_mock_eccodes_fetch)
_mock_eccodes.codes_get_string = mock.Mock(side_effect=_mock_eccodes_fetch)
_mock_eccodes.codes_get_double = mock.Mock(side_effect=_mock_eccodes_fetch)
_mock_eccodes.codes_get_double_array = mock.Mock(
side_effect=_mock_eccodes_fetch)
_mock_eccodes.codes_is_missing = mock.Mock(
side_effect=_mock_eccodes__codes_is_missing)
_mock_eccodes.codes_get_native_type = mock.Mock(
side_effect=_mock_eccodes__codes_get_native_type)
class FakeGribMessage(dict):
"""
A 'fake grib message' object, for testing GribWrapper construction.
Behaves as a dictionary, containing key-values for message keys.
"""
def __init__(self, **kwargs):
"""
Create a fake message object.
General keys can be set/add as required via **kwargs.
The 'time_code' key is specially managed.
"""
# Start with a bare dictionary
dict.__init__(self)
# Extract specially-recognised keys.
time_code = kwargs.pop('time_code', None)
# Set the minimally required keys.
self._init_minimal_message()
# Also set a time-code, if given.
if time_code is not None:
self.set_timeunit_code(time_code)
# Finally, add any remaining passed key-values.
self.update(**kwargs)
def _init_minimal_message(self):
# Set values for all the required keys.
self.update({
'edition': 1,
'Ni': 1,
'Nj': 1,
'numberOfValues': 1,
'alternativeRowScanning': 0,
'centre': 'ecmf',
'year': 2007,
'month': 3,
'day': 23,
'hour': 12,
'minute': 0,
'indicatorOfUnitOfTimeRange': 1,
'gridType': 'rotated_ll',
'angleOfRotation': 0.0,
'resolutionAndComponentFlags': 128,
'iDirectionIncrementInDegrees': 0.036,
'jDirectionIncrementInDegrees': 0.036,
'iScansNegatively': 0,
'jScansPositively': 1,
'longitudeOfFirstGridPointInDegrees': -5.70,
'latitudeOfFirstGridPointInDegrees': -4.452,
'jPointsAreConsecutive': 0,
'values': np.array([[1.0]]),
'indicatorOfParameter': 9999,
'parameterNumber': 9999,
'startStep': 24,
'timeRangeIndicator': 1,
'P1': 2, 'P2': 0,
# time unit - needed AS WELL as 'indicatorOfUnitOfTimeRange'
'unitOfTime': 1,
'table2Version': 9999,
})
def set_timeunit_code(self, timecode):
self['indicatorOfUnitOfTimeRange'] = timecode
# for some odd reason, GRIB1 code uses *both* of these
# NOTE kludge -- the 2 keys are really the same thing
self['unitOfTime'] = timecode
class TestField(tests.IrisGribTest):
def _test_for_coord(self, field, convert, coord_predicate, expected_points,
expected_bounds):
(factories, references, standard_name, long_name, units,
attributes, cell_methods, dim_coords_and_dims,
aux_coords_and_dims) = convert(field)
# Check for one and only one matching coordinate.
coords_and_dims = dim_coords_and_dims + aux_coords_and_dims
matching_coords = [coord for coord, _ in coords_and_dims if
coord_predicate(coord)]
self.assertEqual(len(matching_coords), 1, str(matching_coords))
coord = matching_coords[0]
# Check points and bounds.
if expected_points is not None:
self.assertArrayEqual(coord.points, expected_points)
if expected_bounds is None:
self.assertIsNone(coord.bounds)
else:
self.assertArrayEqual(coord.bounds, expected_bounds)
def assertCoordsAndDimsListsMatch(self, coords_and_dims_got,
coords_and_dims_expected):
"""
Check that coords_and_dims lists are equivalent.
The arguments are lists of pairs of (coordinate, dimensions).
The elements are compared one-to-one, by coordinate name (so the order
of the lists is _not_ significant).
It also checks that the coordinate types (DimCoord/AuxCoord) match.
"""
def sorted_by_coordname(list):
return sorted(list, key=lambda item: item[0].name())
coords_and_dims_got = sorted_by_coordname(coords_and_dims_got)
coords_and_dims_expected = sorted_by_coordname(
coords_and_dims_expected)
self.assertEqual(coords_and_dims_got, coords_and_dims_expected)
# Also check coordinate type equivalences (as Coord.__eq__ does not).
self.assertEqual(
[type(coord) for coord, dims in coords_and_dims_got],
[type(coord) for coord, dims in coords_and_dims_expected])
class TestGribSimple(tests.IrisGribTest):
# A testing class that does not need the test data.
def mock_grib(self):
# A mock grib message, with attributes that can't be Mocks themselves.
grib = mock.Mock()
grib.startStep = 0
grib.phenomenon_points = lambda unit: 3
grib._forecastTimeUnit = "hours"
grib.productDefinitionTemplateNumber = 0
# define a level type (NB these 2 are effectively the same)
grib.levelType = 1
grib.typeOfFirstFixedSurface = 1
grib.typeOfSecondFixedSurface = 1
return grib
def cube_from_message(self, grib):
# Parameter translation now uses the GribWrapper, so we must convert
# the Mock-based fake message to a FakeGribMessage.
with mock.patch('iris_grib.eccodes', _mock_eccodes):
grib_message = FakeGribMessage(**grib.__dict__)
wrapped_msg = iris_grib.GribWrapper(grib_message)
cube, _, _ = iris.fileformats.rules._make_cube(
wrapped_msg, iris_grib._grib1_load_rules.grib1_convert)
return cube |
3,020 | additive phylogeny rec | from collections import defaultdict
import numpy as np
import sys
class DistanceMatrix:
def __init__(self, *args):
self.D = np.array(*args)
return
def __str__(self):
return str([[float(a) for a in x] for x in self.D])
def __repr__(self):
return type(self).__name__ + "(" + str([[float(a) for a in x] for x in self.D]) + ")"
@staticmethod
def loadtxt(file_name, dtype=None,F comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0):
D = np.loadtxt(file_name, dtype, comments, delimiter, converters, skiprows, usecols, unpack, ndmin)
return DistanceMatrix(D)
def savetxt(self, output_file, fmt='%.18e', delimiter=' ', newline='\n', header='', footer='', comments='# '):
np.savetxt(output_file, self.D, fmt, delimiter, newline, header, footer, comments)
return
def nr_leaves(self):
return len(self.D)
def limb_length(self, j):
n = self.nr_leaves()
assert(j < n)
minimum = sys.maxsize
for i in range(n):
if i != j:
for k in range(n):
if k != j:
Dij = self.D[i][j]
Djk = self.D[j][k]
Dik = self.D[i][k]
minimum = min([minimum, (Dij+Djk-Dik)/2])
return minimum
def additive_phylogeny(self):
self.node_count = self.nr_leaves()
return self.METHOD_NAME(self, self.nr_leaves())
def find_i_n_k(self, n):
for i in range(n-1):
for k in range(n-1):
if i != k:
if self.D[i][k] == self.D[i][n-1] + self.D[n-1][k]:
return (i, n, k)
return "nop"
def METHOD_NAME(self, D, n):
if n == 3:
ll1 = (D.D[0][1] + D.D[1][2] - D.D[0][2])/2
ll2 = D.D[1][2] - ll1
ll0 = D.D[0][1] - ll1
edges = {(0, self.node_count, ll0), (1, self.node_count, ll1), (2, self.node_count, ll2)}
self.node_count += 1
return UnrootedTree(*edges)
ll = D.limb_length(n-1)
D_bald = DistanceMatrix(D.D[:])
for x in range(n-1):
D_bald.D[n-1][x] -= ll
D_bald.D[x][n-1] -= ll
i,n,k = D_bald.find_i_n_k(n)
x = D_bald.D[i][n-1]
trimmed_D = DistanceMatrix([[D_bald.D[a][b] for a in range(n-1)] for b in range(n-1)])
T = self.METHOD_NAME(trimmed_D, n-1)
path = T.path(i,k)
i = 1
while i < len(path) -1 and T.distance(path[0],path[i]) < x:
i += 1
if i is not 0 and T.distance(path[0],path[i]) == x:
T.add_edge(path[i-1],n-1,ll)
else:
a,b = path[i-1],path[i]
new_d = D.D[path[0]][b] - x if b < len(D.D) else T.distance(path[0],b) - x
T.add_edge(self.node_count, b, new_d)
T.add_edge(a, self.node_count, T.distance(a,b) - new_d)
T.add_edge(n-1, self.node_count, ll)
T.remove_edge(a, b)
self.node_count += 1
return T
def UPGMA(self):
self.nr_count = self.nr_leaves()
clusters = [{i} for i in range(self.nr_leaves())]
trees = [Tree(i) for i in range(self.nr_leaves())]
ages = [0 for _ in range(self.nr_leaves())]
while len(clusters) > 1:
min_d = sys.maxsize
min_C1, min_C2 = None, None
n = len(clusters)
for i in range(n):
for j in range(i+1,n):
C1, C2 = clusters[i], clusters[j]
d = self.pairwise_distance(C1,C2)
if d < min_d:
min_d = d
min_C1, min_C2 = C1, C2
C1_index, C2_index = clusters.index(min_C1), clusters.index(min_C2)
age = min_d/2
clusters[C1_index] = min_C1 | min_C2
clusters.pop(C2_index)
trees[C1_index] = Tree(self.nr_count, (trees[C1_index], age - ages[C1_index]), (trees[C2_index], age - ages[C2_index] ))
trees.pop(C2_index)
ages[C1_index] = age
ages.pop(C2_index)
self.nr_count += 1
return trees[0]
def pairwise_distance(self,C1, C2):
n, m = len(C1), len(C2)
s = sum([self.D[i][j] for i in C1 for j in C2])
return s/(n*m)
class UnrootedTree:
def __init__(self, *edges):
self.edges = list()
for edge in edges:
a,b,c = edge
a, b, c = int(a), int(b), float(c)
self.edges.append((a,b,c))
d = dict()
for edge in self.edges:
x, y, w = edge
d[(x, y)] = w
d[(y, x)] = w
self.d = d
nb = defaultdict(list)
for edge in self.edges:
x, y, w = edge
nb[x].append(y)
nb[y].append(x)
self.nb = nb
def __str__(self):
return type(self).__name__ + str(tuple(self.edges))
def __repr__(self):
return type(self).__name__ + str(tuple(self.edges))
def add_edge(self, a,b,w):
self.edges.append((a,b,w))
self.d[(a,b)] = w
self.d[(b,a)] = w
self.nb[a].append(b)
self.nb[b].append(a)
def remove_edge(self,a,b):
for edge in self.edges:
x,y,w = edge
if (x == a and b == y) or (x == b and y == a):
self.edges.remove(edge)
break
del self.d[(a,b)]
del self.d[(b,a)]
self.nb[a].remove(b)
self.nb[b].remove(a)
@staticmethod
def loadtxt(input_file):
edges = list()
f = open(input_file)
for line in f:
line = line.rstrip().split(":")
line[0] = line[0].split("<->")
edges.append((line[0][0],line[0][1],line[1]))
return UnrootedTree(*edges)
def path(self, i, j):
self.visited = [i]
p = self.path_dfs(self.nb, i, j, [i])
if p[0] != i:
p = p [::-1]
return p
def distance(self, i,j):
if (i,j) in self.d:
return self.d[(i,j)]
else:
path = self.path(i,j)
return self.path_weight(path)
def path_dfs(self, graph, current_i, j, current_path):
nb = graph[current_i]
for n in nb:
if n not in self.visited:
self.visited.append(n)
if n == j:
return current_path + [j]
else:
r = self.path_dfs(graph, n, j, current_path + [n])
if r:
return r
def nr_leaf_nodes(self):
s = set()
for edge in self.edges:
x,y,w = edge
if len(self.nb[x]) == 1:
s.add(x)
if len(self.nb[y]) == 1:
s.add(y)
return len(s)
def path_weight(self, path):
s = 0
for i in range(len(path) -1):
s += self.d[(path[i],path[i+1])]
return s
def distance_matrix(self):
n = self.nr_leaf_nodes()
D = [[0 for _ in range(n)] for _ in range(n)]
self.path_weight(self.path(0,2))
for i in range(n):
for j in range(i+1,n):
path = self.path(i,j)
w = self.path_weight(path)
D[i][j], D[j][i] = w, w
return DistanceMatrix(D)
class Tree:
def __init__(self, root, *subtrees):
self.root = root
self.subtrees = subtrees
def __str__(self):
subtrees_str = ", ".join([str(tree) for tree in self.subtrees])
return type(self).__name__ + "(" + str(self.root) + (", " if len(self.subtrees) > 0 else "") + subtrees_str + ")"
def __repr__(self):
return self.__str__() |
3,021 | test accum wo reroute | #!/usr/env/python
from collections import deque
import numpy as np
import pytest
from landlab import FieldError, HexModelGrid, RasterModelGrid
from landlab.components import (
FlowAccumulator,
FlowDirectorDINF,
FlowDirectorSteepest,
LakeMapperBarnes,
)
from landlab.utils import StablePriorityQueue
"""
These tests test specific aspects of LakeMapperBarnes not picked up in the
various docstrings.
"""
def test_route_to_multiple_error_raised_init():
mg = RasterModelGrid((10, 10))
z = mg.add_zeros("topographic__elevation", at="node")
z += mg.x_of_node + mg.y_of_node
fa = FlowAccumulator(mg, flow_director="MFD")
fa.run_one_step()
with pytest.raises(NotImplementedError):
LakeMapperBarnes(mg)
def test_bad_init_method1():
rmg = RasterModelGrid((5, 5), xy_spacing=2.0)
rmg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(rmg, flow_director="D8")
with pytest.raises(ValueError):
LakeMapperBarnes(rmg, method="Nope")
def test_bad_init_method2():
rmg = RasterModelGrid((5, 5), xy_spacing=2.0)
rmg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(rmg, flow_director="D8")
with pytest.raises(ValueError):
LakeMapperBarnes(rmg, method="d8")
def test_bad_init_gridmethod():
hmg = HexModelGrid((30, 29), spacing=3.0)
hmg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(hmg, flow_director="Steepest")
with pytest.raises(ValueError):
LakeMapperBarnes(hmg, method="D8")
def test_closed_up_grid():
mg = RasterModelGrid((5, 5))
for edge in ("left", "right", "top", "bottom"):
mg.status_at_node[mg.nodes_at_edge(edge)] = mg.BC_NODE_IS_CLOSED
mg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(mg, flow_director="D8")
with pytest.raises(ValueError):
LakeMapperBarnes(mg)
def test_neighbor_shaping_no_fldir():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
with pytest.raises(FieldError):
LakeMapperBarnes(mg, method="D8", redirect_flow_steepest_descent=True)
def test_neighbor_shaping_no_creation():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(mg)
lmb = LakeMapperBarnes(mg, method="D8", redirect_flow_steepest_descent=False)
with pytest.raises(AttributeError):
lmb._neighbor_arrays
def test_neighbor_shaping_D8():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(mg)
lmb = LakeMapperBarnes(mg, method="D8", redirect_flow_steepest_descent=True)
for arr in (lmb._neighbor_arrays, lmb._link_arrays):
assert len(arr) == 2
assert arr[0].shape == (25, 4)
assert arr[1].shape == (25, 4)
assert len(lmb._neighbor_lengths) == mg.number_of_d8
def test_neighbor_shaping_D4():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(mg)
lmb = LakeMapperBarnes(mg, method="Steepest", redirect_flow_steepest_descent=True)
for arr in (lmb._neighbor_arrays, lmb._link_arrays):
assert len(arr) == 1
assert arr[0].shape == (25, 4)
assert len(lmb._neighbor_lengths) == mg.number_of_links
def test_neighbor_shaping_hex():
hmg = HexModelGrid((6, 5), spacing=1.0)
hmg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(hmg)
lmb = LakeMapperBarnes(hmg, redirect_flow_steepest_descent=True)
for arr in (lmb._neighbor_arrays, lmb._link_arrays):
assert len(arr) == 1
assert arr[0].shape == (hmg.number_of_nodes, 6)
assert len(lmb._neighbor_lengths) == hmg.number_of_links
def METHOD_NAME():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(mg)
with pytest.raises(ValueError):
LakeMapperBarnes(
mg,
method="Steepest",
redirect_flow_steepest_descent=False,
reaccumulate_flow=True,
)
def test_redirect_no_lakes():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
_ = FlowAccumulator(mg)
with pytest.raises(ValueError):
LakeMapperBarnes(
mg, method="D8", track_lakes=False, redirect_flow_steepest_descent=True
)
def test_route_to_many():
mg = RasterModelGrid((5, 5))
mg.add_zeros("topographic__elevation", at="node", dtype=float)
fd = FlowDirectorDINF(mg, "topographic__elevation")
_ = FlowAccumulator(mg)
fd.run_one_step()
assert mg.at_node["flow__receiver_node"].shape == (mg.number_of_nodes, 2)
with pytest.raises(NotImplementedError):
LakeMapperBarnes(mg, method="D8", redirect_flow_steepest_descent=True)
def test_permitted_overfill():
mg = RasterModelGrid((3, 7))
for edge in ("top", "right", "bottom"):
mg.status_at_node[mg.nodes_at_edge(edge)] = mg.BC_NODE_IS_CLOSED
z = mg.add_zeros("topographic__elevation", at="node", dtype=float)
z.reshape(mg.shape)[1, 1:-1] = [1.0, 0.2, 0.1, 1.0000000000000004, 1.5]
_ = FlowAccumulator(mg)
lmb = LakeMapperBarnes(mg, method="Steepest")
lmb._closed = mg.zeros("node", dtype=bool)
lmb._closed[mg.status_at_node == mg.BC_NODE_IS_CLOSED] = True
open = StablePriorityQueue()
edges = np.array([7])
for edgenode in edges:
open.add_task(edgenode, priority=z[edgenode])
lmb._closed[edges] = True
while True:
try:
lmb._fill_one_node_to_slant(
z, mg.adjacent_nodes_at_node, lmb._pit, open, lmb._closed, True
)
except KeyError:
break
def test_no_reroute():
mg = RasterModelGrid((5, 5), xy_spacing=2.0)
z = mg.add_zeros("topographic__elevation", at="node", dtype=float)
z[1] = -1.0
z[6] = -2.0
z[19] = -2.0
z[18] = -1.0
z[17] = -3.0
fd = FlowDirectorSteepest(mg)
fa = FlowAccumulator(mg)
lmb = LakeMapperBarnes(
mg,
method="Steepest",
fill_flat=True,
redirect_flow_steepest_descent=True,
track_lakes=True,
)
openq = StablePriorityQueue()
lake_dict = {1: deque([6]), 18: deque([17])}
fd.run_one_step() # fill the director fields
fa.run_one_step() # get a drainage_area
orig_surf = lmb._track_original_surface()
lmb._redirect_flowdirs(orig_surf, lake_dict, openq)
assert mg.at_node["flow__receiver_node"][6] == 1
assert mg.at_node["flow__receiver_node"][17] == 18
assert mg.at_node["flow__receiver_node"][18] == 19 |
3,022 | get foo | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""The tests for decorators_async.py"""
try:
from unittest import mock
except ImportError:
import mock
import sys
import time
import pytest
from azure.core.pipeline import Pipeline, PipelineResponse
from azure.core.pipeline.policies import HTTPPolicy
from azure.core.pipeline.transport import HttpTransport
from azure.core.settings import settings
from azure.core.tracing import SpanKind
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from tracing_common import FakeSpan
from utils import HTTP_REQUESTS
@pytest.fixture(scope="module")
def fake_span():
settings.tracing_implementation.set_value(FakeSpan)
class MockClient:
@distributed_trace
def __init__(self, http_request, policies=None, assert_current_span=False):
time.sleep(0.001)
self.request = http_request("GET", "http://localhost")
if policies is None:
policies = []
policies.append(mock.Mock(spec=HTTPPolicy, send=self.verify_request))
self.policies = policies
self.transport = mock.Mock(spec=HttpTransport)
self.pipeline = Pipeline(self.transport, policies=policies)
self.expected_response = mock.Mock(spec=PipelineResponse)
self.assert_current_span = assert_current_span
def verify_request(self, request):
if self.assert_current_span:
assert execution_context.get_current_span() is not None
return self.expected_response
@distributed_trace_async
async def make_request(self, numb_times, **kwargs):
time.sleep(0.001)
if numb_times < 1:
return None
response = self.pipeline.run(self.request, **kwargs)
await self.METHOD_NAME(merge_span=True)
kwargs["merge_span"] = True
await self.make_request(numb_times - 1, **kwargs)
return response
@distributed_trace_async
async def merge_span_method(self):
return await self.METHOD_NAME(merge_span=True)
@distributed_trace_async
async def no_merge_span_method(self):
return await self.METHOD_NAME()
@distributed_trace_async
async def METHOD_NAME(self):
time.sleep(0.001)
return 5
@distributed_trace_async(name_of_span="different name")
async def check_name_is_different(self):
time.sleep(0.001)
@distributed_trace_async(tracing_attributes={"foo": "bar"})
async def tracing_attr(self):
time.sleep(0.001)
@distributed_trace_async(kind=SpanKind.PRODUCER)
async def kind_override(self):
time.sleep(0.001)
@distributed_trace_async
async def raising_exception(self):
raise ValueError("Something went horribly wrong here")
@pytest.mark.usefixtures("fake_span")
class TestAsyncDecorator(object):
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
async def test_decorator_tracing_attr(self, http_request):
with FakeSpan(name="parent") as parent:
client = MockClient(http_request)
await client.tracing_attr()
assert len(parent.children) == 2
assert parent.children[0].name == "MockClient.__init__"
assert parent.children[1].name == "MockClient.tracing_attr"
assert parent.children[1].kind == SpanKind.INTERNAL
assert parent.children[1].attributes == {"foo": "bar"}
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
async def test_decorator_has_different_name(self, http_request):
with FakeSpan(name="parent") as parent:
client = MockClient(http_request)
await client.check_name_is_different()
assert len(parent.children) == 2
assert parent.children[0].name == "MockClient.__init__"
assert parent.children[1].name == "different name"
assert parent.children[1].kind == SpanKind.INTERNAL
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
async def test_kind_override(self, http_request):
with FakeSpan(name="parent") as parent:
client = MockClient(http_request)
await client.kind_override()
assert len(parent.children) == 2
assert parent.children[0].name == "MockClient.__init__"
assert parent.children[1].name == "MockClient.kind_override"
assert parent.children[1].kind == SpanKind.PRODUCER
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
async def test_used(self, http_request):
with FakeSpan(name="parent") as parent:
client = MockClient(http_request, policies=[])
await client.METHOD_NAME(parent_span=parent)
await client.METHOD_NAME()
assert len(parent.children) == 3
assert parent.children[0].name == "MockClient.__init__"
assert not parent.children[0].children
assert parent.children[1].name == "MockClient.get_foo"
assert not parent.children[1].children
assert parent.children[2].name == "MockClient.get_foo"
assert not parent.children[2].children
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
async def test_span_merge_span(self, http_request):
with FakeSpan(name="parent") as parent:
client = MockClient(http_request)
await client.merge_span_method()
await client.no_merge_span_method()
assert len(parent.children) == 3
assert parent.children[0].name == "MockClient.__init__"
assert not parent.children[0].children
assert parent.children[1].name == "MockClient.merge_span_method"
assert not parent.children[1].children
assert parent.children[2].name == "MockClient.no_merge_span_method"
assert parent.children[2].children[0].name == "MockClient.get_foo"
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
async def test_span_complicated(self, http_request):
with FakeSpan(name="parent") as parent:
client = MockClient(http_request)
await client.make_request(2)
with parent.span("child") as child:
time.sleep(0.001)
await client.make_request(2, parent_span=parent)
assert FakeSpan.get_current_span() == child
await client.make_request(2)
assert len(parent.children) == 4
assert parent.children[0].name == "MockClient.__init__"
assert not parent.children[0].children
assert parent.children[1].name == "MockClient.make_request"
assert not parent.children[1].children
assert parent.children[2].name == "child"
assert parent.children[2].children[0].name == "MockClient.make_request"
assert parent.children[3].name == "MockClient.make_request"
assert not parent.children[3].children
@pytest.mark.asyncio
@pytest.mark.parametrize("http_request", HTTP_REQUESTS)
async def test_span_with_exception(self, http_request):
"""Assert that if an exception is raised, the next sibling method is actually a sibling span."""
with FakeSpan(name="parent") as parent:
client = MockClient(http_request)
try:
await client.raising_exception()
except:
pass
await client.METHOD_NAME()
assert len(parent.children) == 3
assert parent.children[0].name == "MockClient.__init__"
assert parent.children[1].name == "MockClient.raising_exception"
# Exception should propagate status for Opencensus
assert parent.children[1].status == "Something went horribly wrong here"
assert parent.children[2].name == "MockClient.get_foo" |
3,023 | has object | from os.path import split, join
from PyQt5.QtWidgets import QWidget
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from PyQt5.uic import loadUiType
class Overview3D(QWidget):
"""
This class is the 3D widget seen in the bottom right corner in ilastik.
It is basically a container for the actual 3D view, a busy progress bar and some buttons.
The buttons are:
toggle_slice_x: QToolButton, to toggle the display of the x slicing plane
toggle_slice_y: QToolButton, to toggle the display of the y slicing plane
toggle_slice_z: QToolButton, to toggle the display of the z slicing plane
dock: QToolButton, to toggle the docking status of the widget
The progress bar:
progress: QProgressBar
It is used to indicate whether a numpy volume is converted into a mesh right now
The 3d view:
view: volumina.view3d.glview.GLView (promoted from QGraphicsView)
It displays the slicing planes and the labeling in 3d
slots:
slice_changed: emitted when the user changes the slicing in the 3d view
reinitialized: probably obsolete, used to indicate to some containers that this view is ready?
dock_status_changed: indicates that the dock button was toggled
"""
slice_changed = pyqtSignal()
reinitialized = pyqtSignal() # TODO: this should not be necessary: remove
dock_status_changed = pyqtSignal(bool)
def __init__(self, is_3d_widget_visible=False, *args, **kwargs):
"""
Creates the 3D widget object and sets its visibility.
:param is_3d_widget_visible: if True, the 3D widget will be visible
"""
super(QWidget, self).__init__(*args, **kwargs)
cls, _ = loadUiType(join(split(__file__)[0], "ui/view3d.ui"))
self._ui = cls()
self._ui.setupUi(self)
self._view = self._ui.view
# Set the visibility of 3D widget
self._view.setVisible(is_3d_widget_visible)
self._ui.show_3D_view.setChecked(is_3d_widget_visible)
self._progress = self._ui.progress
self._progress.setVisible(False) # this can't be set in QDesigner for some reason
self._mesh_generator_thread = None # the thread need to be stored so it doesn't get destroyed when out of scope
self.reinitialized.emit() # TODO: this should not be necessary: remove
self._view.slice_changed.connect(self.slice_changed)
@staticmethod
def _adjust_axes(x, y, z):
"""
The axes in ilastik are flipped so we need to adjust the order here.
"""
return x, y, z
def set_shape(self, shape):
"""
Set the shape for the 3d view.
When changed the slicing planes in the 3d view will be resized.
"""
self._view.shape = self._adjust_axes(*shape)
def get_slice(self):
"""
Get the current slice from the 3d view.
not used right now, as the slice cannot be changed easily in pyqtgraph.
"""
return self._adjust_axes(*self._view.slice)
def set_slice(self, slice_):
"""
Set the current slice for the 3d view.
Setting the slice will move the slice planes in the 3d view.
"""
self._view.slice = self._adjust_axes(*slice_)
def add_object(self, name, object_=None):
"""
Add an object to the 3d view
See glview.GLView.add_mesh for more details.
:param str name: the name to identify the object
:param Optional[GLMeshItem] object_: the object to add
"""
self._view.add_mesh(name, object_)
def remove_object(self, name):
"""
Remove the object with the given name from the 3d view.
:param str name: the identifying name
"""
self._view.remove_mesh(name)
def invalidate_object(self, name):
"""
Remove the object with the given name fron the cache in the 3d view
:param str name: the identifying name
"""
self._view.invalidate_cache(name)
def METHOD_NAME(self, name):
"""
Check if the object given by the name is cached
:rtype: bool
"""
return self._view.is_cached(name)
def get_visible_objects(self):
"""
Get the label of all currently visible objects in the 3d view.
:rtype: Set[int]
"""
return set(self._view.visible_meshes)
@pyqtSlot(bool, name="on_toggle_slice_x_clicked")
@pyqtSlot(bool, name="on_toggle_slice_y_clicked")
@pyqtSlot(bool, name="on_toggle_slice_z_clicked")
def _on_toggle_slice(self, down):
"""
The slot for the slice plane toggle button presses.
When a toggle slice button is pressed the corresponding slice plane is toggled in the 3d view.
"""
sender = self.sender()
self._view.toggle_slice(str(sender.objectName()[-1]), down)
@pyqtSlot(bool, name="on_dock_clicked")
def _on_dock_status_changed(self, status):
"""
The slot for the dock status button.
When the button is toggled the corresponding signal is emitted.
This simply "forwards" the dock.clicked signal to the containing class's signal.
"""
self.dock_status_changed.emit(status)
def set_busy(self, busy):
"""
Set the busy state for this widget.
Setting it to busy will show the progress bar.
:param bool busy: True or False for the busy state
"""
self._progress.setVisible(busy)
@pyqtSlot(int, name="on_show_3D_view_stateChanged")
def _on_toggle_3d_view(self, state):
"""
Toggles the 3D widget.
:param state: checkbox state (0 for unchecked)
:return: None
"""
self._view.setVisible(state != 0)
self.update() |
3,024 | resources | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2018-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import os
import edalize
import re
import subprocess
from toolchains.toolchain import Toolchain
from utils.utils import Timed, have_exec
class Radiant(Toolchain):
'''Lattice Radiant based toolchains'''
strategies = ('Timing', 'Area')
def __init__(self, rootdir):
Toolchain.__init__(self, rootdir)
self.radiantdir = os.getenv(
"RADIANT",
os.path.expanduser("~") + "/lscc/radiant/3.0"
)
self.files = []
self.edam = None
self.backend = None
self.resources_map = {
'LUT':
(
'LUT', 'LUTS', 'LUT1', 'LUT2', 'LUT3', 'LUT4', 'LUT5',
'LUT6'
),
'DFF': ('DFF', 'SB_DFF'),
'CARRY': ('CARRY', 'SB_CARRY'),
'IOB': ('SEIO33', 'IOB'),
'PLL': ('PLL'),
'BRAM': ('BRAM', 'LRAM', 'EBR'),
'DSP': ('PREADD9', 'MULT9', 'MULT18', 'MULT18X36', 'MULT36'),
'GLB': ('GLB'),
}
def prepare_edam(self):
os.makedirs(self.out_dir, exist_ok=True)
part = f'{self.device}-{self.package}'.upper()
if self.family == "ice40":
part = "iCE40" + part
radiant_options = {
'part': part,
'synth': self.synth_tool(),
'strategy': self.strategy
}
edam = {
'files': self.files,
'name': self.project_name,
'toplevel': self.top,
'parameters':
{
'RADIANT':
{
'paramtype': 'vlogdefine',
'datatype': 'int',
'default': 1,
},
},
'tool_options': {
'radiant': radiant_options
}
}
return edam
def run(self):
with Timed(self, 'total'):
with Timed(self, 'prepare'):
self.edam = self.prepare_edam()
self.backend = edalize.get_edatool('radiant')(
edam=self.edam, work_root=self.out_dir
)
self.backend.configure("")
self.backend.build()
self.add_maximum_memory_use()
def add_maximum_memory_use(self):
log_file = os.path.join(
self.out_dir, "impl", self.project_name + "_impl.par"
)
with open(log_file, 'r') as file:
for line in file:
line = line.strip()
if "Peak Memory Usage:" in line:
self.maximum_memory_use = line.split()[3]
return
def radiant_ver(self):
for l in open(os.path.join(self.radiantdir, 'data', 'ispsys.ini')):
if l.find('ProductType') == 0:
return l.split('=')[1].strip()
def check_env():
return {
'Radiant': have_exec('radiantc'),
}
@staticmethod
def seedable():
return False
def METHOD_NAME(self):
res_file = os.path.join(
self.out_dir, "impl", self.project_name + "_impl.par"
)
METHOD_NAME = dict()
with open(res_file, "r") as file:
processing = False
for line in file:
line = line.strip()
if "Device utilization" in line:
processing = True
next(file)
continue
if not processing:
continue
else:
if len(line) == 0:
break
res = line.split()
if len(res) == 3:
continue
res_type = res[0]
regex = "(\d+)"
match = re.search(regex, line)
assert match
res_count = int(match.groups()[0])
METHOD_NAME[res_type] = res_count
METHOD_NAME = self.get_resources_count(METHOD_NAME)
return {"synth": METHOD_NAME, "impl": METHOD_NAME}
def max_freq(self):
freqs = dict()
res_name = None
freq_file_exts = ["twr", "tws"]
path = ""
for ext in freq_file_exts:
temp_path = os.path.join(
self.out_dir, "impl", self.project_name + "_impl." + ext
)
if os.path.isfile(temp_path):
path = temp_path
break
assert path, "Path to the timing report file is empty"
with open(path, "r") as file:
for line in file:
line = line.strip()
if "From" in line:
res = line.split()
res_name = res[1]
freqs[res_name] = dict()
freqs[res_name]['requested'] = float(res[8])
line = next(file)
res = line.split()
freqs[res_name]['actual'] = float(res[8])
freqs[res_name]['met'] = freqs[res_name]['actual'] > freqs[
res_name]['requested']
if "Total N" in line:
match = re.match(
"^Total.* *.(\d+\.\d+).* *.(\d+\.\d+).*", line
)
if match and res_name is not None:
setup_viol = float(match.groups()[0])
hold_viol = float(match.groups()[1])
freqs[res_name]['setup_violation'] = setup_viol
freqs[res_name]['hold_violation'] = hold_viol
return freqs
class RadiantLSE(Radiant):
'''Lattice Radiant using LSE for synthesis'''
def __init__(self, rootdir):
Radiant.__init__(self, rootdir)
self.toolchain = 'lse-radiant'
self.synthtool = 'lse'
def synth_tool(self):
return self.synthtool
def versions(self):
return {'Radiant': self.radiant_ver()}
class RadiantSynpro(Radiant):
'''Lattice Radiant using Synplify for synthesis'''
def __init__(self, rootdir):
Radiant.__init__(self, rootdir)
self.toolchain = 'synpro-radiant'
self.synthtool = 'synplify'
def versions(self):
return {'Radiant': self.radiant_ver()}
def synth_tool(self):
return self.synthtool |
3,025 | test streamlit activate | #!/usr/bin/env python
# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
import subprocess
from typing import Optional
import pytest
CONFIG_FILE_PATH: str
CREDENTIALS_FILE_PATH: str
REPO_ROOT: str
STREAMLIT_RELEASE_VERSION: Optional[str]
class TestCLIRegressions:
"""Suite of CLI regression tests to be run against a release build of the Streamlit library.
Before running, ensure that you have:
- An isolated environment with Streamlit installed in production mode (not development) as
well as pytest. This can include the current version, nightly, or local build/wheel, like
one of the following:
pip install streamlit-nightly=[nightly tag]
pip install lib/dist/<WHEEL_FILE>
pip install streamlit
- The STREAMLIT_RELEASE_VERSION environment variable must be set, such as:
export STREAMLIT_RELEASE_VERSION=1.5.1
You can then run the tests from the root of the Streamlit repository using one of the following:
pytest scripts/cli_regression_tests.py
make cli-regression-tests
This test suite makes use of Python's built-in assert statement. Note that assertions in the
form of `assert <expression>` use Pytest's assertion introspection. In some cases, a more clear
error message is specified manually by using `assert <expression>, <message>`. See
https://docs.pytest.org/en/7.0.x/how-to/assert.html#assert-details for more details.
"""
@pytest.fixture(scope="module", autouse=True)
def setup(self):
# ---- Initialization
global CONFIG_FILE_PATH
CONFIG_FILE_PATH = os.path.expanduser("~/.streamlit/config.toml")
global CREDENTIALS_FILE_PATH
CREDENTIALS_FILE_PATH = os.path.expanduser("~/.streamlit/credentials.toml")
global REPO_ROOT
REPO_ROOT = os.getcwd()
global STREAMLIT_RELEASE_VERSION
STREAMLIT_RELEASE_VERSION = os.environ.get("STREAMLIT_RELEASE_VERSION", None)
# Ensure that there aren't any previously stored credentials
if os.path.exists(CREDENTIALS_FILE_PATH):
os.remove(CREDENTIALS_FILE_PATH)
yield # Run tests
# ---- Tear Down
# Remove testing credentials
if os.path.exists(CREDENTIALS_FILE_PATH):
os.remove(CREDENTIALS_FILE_PATH)
if os.path.exists(CONFIG_FILE_PATH):
os.remove(CONFIG_FILE_PATH)
self.run_command("streamlit cache clear")
def parameterize(self, params):
return params.split(" ")
def read_process_output(self, proc, num_lines_to_read):
num_lines_read = 0
output = ""
while num_lines_read < num_lines_to_read:
output += proc.stdout.readline().decode("UTF-8")
num_lines_read += 1
return output
def run_command(self, command):
return subprocess.check_output(self.parameterize(command)).decode("UTF-8")
def run_single_proc(self, command, num_lines_to_read=4):
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=os.setpgrp,
)
output = self.read_process_output(proc, num_lines_to_read)
try:
os.kill(os.getpgid(proc.pid), signal.SIGTERM)
except ProcessLookupError:
# The process may have exited already. If so, we don't need to do anything
pass
return output
def run_double_proc(
self, command_one, command_two, wait_in_seconds=2, num_lines_to_read=4
):
proc_one = subprocess.Popen(
command_one,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=os.setpgrp,
)
# Getting the output from process one ensures the process started first
output_one = self.read_process_output(proc_one, num_lines_to_read)
proc_two = subprocess.Popen(
command_two,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=os.setpgrp,
)
output_two = self.read_process_output(proc_two, num_lines_to_read)
try:
os.killpg(os.getpgid(proc_one.pid), signal.SIGKILL)
os.killpg(os.getpgid(proc_two.pid), signal.SIGKILL)
except ProcessLookupError:
# The process may have exited already. If so, we don't need to do anything
pass
return output_one, output_two
@pytest.mark.skipif(
bool(os.environ.get("SKIP_VERSION_CHECK", False)) == True,
reason="Skip version verification when `SKIP_VERSION_CHECK` env var is set",
)
def test_streamlit_version(self):
assert (
STREAMLIT_RELEASE_VERSION != None and STREAMLIT_RELEASE_VERSION != ""
), "You must set the $STREAMLIT_RELEASE_VERSION env variable"
assert STREAMLIT_RELEASE_VERSION in self.run_command(
"streamlit version"
), f"Package version does not match the desired version of {STREAMLIT_RELEASE_VERSION}"
def METHOD_NAME(self):
process = subprocess.Popen(
"streamlit activate", stdin=subprocess.PIPE, shell=True
)
process.stdin.write(b"regressiontest@streamlit.io\n") # type: ignore
process.stdin.flush() # type: ignore
process.communicate()
with open(CREDENTIALS_FILE_PATH) as f:
assert (
"regressiontest@streamlit.io" in f.read()
), "Email address was not found in the credentials file"
def test_port_reassigned(self):
"""When starting a new Streamlit session, it will run on port 8501 by default. If 8501 is
not available, it will use the next available port.
"""
out_one, out_two = self.run_double_proc(
f"streamlit run --server.headless=true {REPO_ROOT}/examples/file_uploader.py",
f"streamlit run --server.headless=true {REPO_ROOT}/examples/file_uploader.py",
)
assert ":8501" in out_one, f"Incorrect port. See output:\n{out_one}"
assert ":8502" in out_two, f"Incorrect port. See output:\n{out_two}"
def test_conflicting_port(self):
out_one, out_two = self.run_double_proc(
f"streamlit run --server.headless=true {REPO_ROOT}/examples/file_uploader.py",
f"streamlit run --server.headless=true --server.port=8501 {REPO_ROOT}/examples/file_uploader.py",
)
assert ":8501" in out_one, f"Incorrect port. See output:\n{out_one}"
assert (
"Port 8501 is already in use" in out_two
), f"Incorrect conflict. See output:\n{out_one}"
def test_cli_defined_port(self):
out = self.run_single_proc(
f"streamlit run --server.headless=true --server.port=9999 {REPO_ROOT}/examples/file_uploader.py",
)
assert ":9999" in out, f"Incorrect port. See output:\n{out}"
def test_config_toml_defined_port(self):
with open(CONFIG_FILE_PATH, "w") as file:
file.write("[server]\n port=8888")
out = self.run_single_proc(
f"streamlit run --server.headless=true {REPO_ROOT}/examples/file_uploader.py",
)
assert ":8888" in out, f"Incorrect port. See output:\n{out}" |
3,026 | set | import asyncio
from .. import fhem
from .. import fhem_pythonbinding as fhepy
from .. import generic
# imports for dynamical usage, do NOT remove
from .devices.gateway import Gateway # noqa: F401
from .devices.sensor import (
ContactSensor,
HTSensor, # noqa: F401
MotionSensor,
SmokeSensor,
WaterLeakSensor,
)
device_type_mapping = {
"lumi.sensor_magnet": "ContactSensor",
"lumi.sensor_magnet.v2": "ContactSensor",
"lumi.sensor_magnet.aq2": "ContactSensor",
"lumi.sensor_wleak": "WaterLeakSensor",
"lumi.sensor_wleak.aq1": "WaterLeakSensor",
"lumi.sensor_ht": "HTSensor",
"lumi.sensor_ht.v1": "HTSensor",
"lumi.sensor_ht.v2": "HTSensor",
"lumi.weather": "HTSensor",
"lumi.weather.v1": "HTSensor",
"lumi.sensor_motion": "MotionSensor",
"lumi.sensor_motion.v1": "MotionSensor",
"lumi.sensor_motion.v2": "MotionSensor",
"lumi.gateway": "Gateway",
"lumi.gateway.mgl03": "Gateway",
# Bluetooth sensors here
"1371": "HTSensor",
"2455": "SmokeSensor",
}
class xiaomi_gateway3_device(generic.FhemModule):
def __init__(self, logger):
super().__init__(logger)
self._fhempy_gateway = None
self._fhempy_device = None
self.loop = asyncio.get_event_loop()
# FHEM FUNCTION
async def Define(self, hash, args, argsh):
await super().Define(hash, args, argsh)
if len(args) < 5:
return (
"Usage: define devname fhempy xiaomi_gateway3_device"
" <GATEWAY_NAME> <DID>"
)
self.gw_name = args[3]
self.did = args[4]
hash["GATEWAY"] = self.gw_name
hash["DID"] = self.did
# change gateway did to 0, we just needed it for DID internals
if self.did.find("0x") >= 0:
self.did = "lumi.0"
await fhem.readingsSingleUpdateIfChanged(self.hash, "state", "offline", 1)
self.create_async_task(self.connect_gw())
async def connect_gw(self):
while self._fhempy_gateway is None:
self._fhempy_gateway = fhepy.getFhemPyDeviceByName(self.gw_name)
if self._fhempy_gateway:
try:
await self._fhempy_gateway.register_device(self, self.update)
await fhem.readingsSingleUpdateIfChanged(
self.hash, "state", "online", 1
)
except Exception:
self._fhempy_gateway = None
pass
else:
await fhem.readingsSingleUpdateIfChanged(
self.hash, "state", f"gateway {self.gw_name} not found", 1
)
await asyncio.sleep(10)
async def initialize(self, device):
if self._fhempy_gateway is None:
return
# first update, set attributes and device readings like model, sid, ...
if str(device["model"]) not in device_type_mapping:
self.logger.error(
f"{device['model']} not yet supported, please report an issue here: "
f"https://github.com/fhempy/fhempy/issues"
)
await fhem.readingsSingleUpdateIfChanged(
self.hash, "state", f"unsupported device: {device['model']}", 1
)
return
# create device based on device model
self._fhempy_device = globals()[device_type_mapping[str(device["model"])]](
self.logger, self._fhempy_gateway
)
self._fhempy_device.set_hash(self.hash)
await self._fhempy_device.initialize(device)
self._fhempy_gateway.gateway3.set_entity(self._fhempy_device)
self._fhempy_gateway.gateway3.set_stats(self._fhempy_device)
def update(self, data):
if self._fhempy_device is not None:
self._fhempy_device.update(data)
# FHEM functions which will be redirected to device type class
async def FW_detailFn(self, hash, args, argsh):
if self._fhempy_device is None:
return await super().FW_detailFn(hash, args, argsh)
return await self._fhempy_device.FW_detailFn(hash, args, argsh)
async def METHOD_NAME(self, hash, args, argsh):
if self._fhempy_device is None:
return await super().METHOD_NAME(hash, args, argsh)
return await self._fhempy_device.METHOD_NAME(hash, args, argsh)
async def Attr(self, hash, args, argsh):
if self._fhempy_device is None:
return await super().Attr(hash, args, argsh)
return await self._fhempy_device.Attr(hash, args, argsh)
async def Undefine(self, hash):
await super().Undefine(hash)
if self._fhempy_device is not None:
await self._fhempy_device.Undefine(hash) |
3,027 | test logging level | # coding: utf-8
import sys
import pytest
import logging
import warnings
from sentry_sdk.integrations.logging import LoggingIntegration, ignore_logger
other_logger = logging.getLogger("testfoo")
logger = logging.getLogger(__name__)
@pytest.fixture(autouse=True)
def reset_level():
other_logger.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
@pytest.mark.parametrize("logger", [logger, other_logger])
def test_logging_works_with_many_loggers(sentry_init, capture_events, logger):
sentry_init(integrations=[LoggingIntegration(event_level="ERROR")])
events = capture_events()
logger.info("bread")
logger.critical("LOL")
(event,) = events
assert event["level"] == "fatal"
assert not event["logentry"]["params"]
assert event["logentry"]["message"] == "LOL"
assert any(crumb["message"] == "bread" for crumb in event["breadcrumbs"]["values"])
@pytest.mark.parametrize("integrations", [None, [], [LoggingIntegration()]])
@pytest.mark.parametrize(
"kwargs", [{"exc_info": None}, {}, {"exc_info": 0}, {"exc_info": False}]
)
def test_logging_defaults(integrations, sentry_init, capture_events, kwargs):
sentry_init(integrations=integrations)
events = capture_events()
logger.info("bread")
logger.critical("LOL", **kwargs)
(event,) = events
assert event["level"] == "fatal"
assert any(crumb["message"] == "bread" for crumb in event["breadcrumbs"]["values"])
assert not any(
crumb["message"] == "LOL" for crumb in event["breadcrumbs"]["values"]
)
assert "threads" not in event
def test_logging_extra_data(sentry_init, capture_events):
sentry_init(integrations=[LoggingIntegration()], default_integrations=False)
events = capture_events()
logger.info("bread", extra=dict(foo=42))
logger.critical("lol", extra=dict(bar=69))
(event,) = events
assert event["level"] == "fatal"
assert event["extra"] == {"bar": 69}
assert any(
crumb["message"] == "bread" and crumb["data"] == {"foo": 42}
for crumb in event["breadcrumbs"]["values"]
)
def test_logging_extra_data_integer_keys(sentry_init, capture_events):
sentry_init(integrations=[LoggingIntegration()], default_integrations=False)
events = capture_events()
logger.critical("integer in extra keys", extra={1: 1})
(event,) = events
assert event["extra"] == {"1": 1}
@pytest.mark.xfail(sys.version_info[:2] == (3, 4), reason="buggy logging module")
def test_logging_stack(sentry_init, capture_events):
sentry_init(integrations=[LoggingIntegration()], default_integrations=False)
events = capture_events()
logger.error("first", exc_info=True)
logger.error("second")
(
event_with,
event_without,
) = events
assert event_with["level"] == "error"
assert event_with["threads"]["values"][0]["stacktrace"]["frames"]
assert event_without["level"] == "error"
assert "threads" not in event_without
def METHOD_NAME(sentry_init, capture_events):
sentry_init(integrations=[LoggingIntegration()], default_integrations=False)
events = capture_events()
logger.setLevel(logging.WARNING)
logger.error("hi")
(event,) = events
assert event["level"] == "error"
assert event["logentry"]["message"] == "hi"
del events[:]
logger.setLevel(logging.ERROR)
logger.warning("hi")
assert not events
def test_custom_log_level_names(sentry_init, capture_events):
levels = {
logging.DEBUG: "debug",
logging.INFO: "info",
logging.WARN: "warning",
logging.WARNING: "warning",
logging.ERROR: "error",
logging.CRITICAL: "fatal",
logging.FATAL: "fatal",
}
# set custom log level names
# fmt: off
logging.addLevelName(logging.DEBUG, u"custom level debüg: ")
# fmt: on
logging.addLevelName(logging.INFO, "")
logging.addLevelName(logging.WARN, "custom level warn: ")
logging.addLevelName(logging.WARNING, "custom level warning: ")
logging.addLevelName(logging.ERROR, None)
logging.addLevelName(logging.CRITICAL, "custom level critical: ")
logging.addLevelName(logging.FATAL, "custom level 🔥: ")
for logging_level, sentry_level in levels.items():
logger.setLevel(logging_level)
sentry_init(
integrations=[LoggingIntegration(event_level=logging_level)],
default_integrations=False,
)
events = capture_events()
logger.log(logging_level, "Trying level %s", logging_level)
assert events
assert events[0]["level"] == sentry_level
assert events[0]["logentry"]["message"] == "Trying level %s"
assert events[0]["logentry"]["params"] == [logging_level]
del events[:]
def test_logging_filters(sentry_init, capture_events):
sentry_init(integrations=[LoggingIntegration()], default_integrations=False)
events = capture_events()
should_log = False
class MyFilter(logging.Filter):
def filter(self, record):
return should_log
logger.addFilter(MyFilter())
logger.error("hi")
assert not events
should_log = True
logger.error("hi")
(event,) = events
assert event["logentry"]["message"] == "hi"
def test_logging_captured_warnings(sentry_init, capture_events, recwarn):
sentry_init(
integrations=[LoggingIntegration(event_level="WARNING")],
default_integrations=False,
)
events = capture_events()
logging.captureWarnings(True)
warnings.warn("first", stacklevel=2)
warnings.warn("second", stacklevel=2)
logging.captureWarnings(False)
warnings.warn("third", stacklevel=2)
assert len(events) == 2
assert events[0]["level"] == "warning"
# Captured warnings start with the path where the warning was raised
assert "UserWarning: first" in events[0]["logentry"]["message"]
assert events[0]["logentry"]["params"] == []
assert events[1]["level"] == "warning"
assert "UserWarning: second" in events[1]["logentry"]["message"]
assert events[1]["logentry"]["params"] == []
# Using recwarn suppresses the "third" warning in the test output
assert len(recwarn) == 1
assert str(recwarn[0].message) == "third"
def test_ignore_logger(sentry_init, capture_events):
sentry_init(integrations=[LoggingIntegration()], default_integrations=False)
events = capture_events()
ignore_logger("testfoo")
other_logger.error("hi")
assert not events
def test_ignore_logger_wildcard(sentry_init, capture_events):
sentry_init(integrations=[LoggingIntegration()], default_integrations=False)
events = capture_events()
ignore_logger("testfoo.*")
nested_logger = logging.getLogger("testfoo.submodule")
logger.error("hi")
nested_logger.error("bye")
(event,) = events
assert event["logentry"]["message"] == "hi" |
3,028 | return json | #!/usr/bin/python
import json
from collections import OrderedDict
from sys import argv
def METHOD_NAME(payload):
return(json.dumps(payload,
sort_keys=True,
indent=4
)
)
if argv[1] == 'update_order':
fw_manifest = argv[2]
ver_manifest = argv[3]
updateItems = {}
updateOrder = OrderedDict()
with open(fw_manifest) as f:
manifest_jsonify = json.load(f)
with open(ver_manifest) as f:
version_jsonify = json.load(f)
# Grab sequence type info from FW Manifest..
for obj in manifest_jsonify:
try:
for component in manifest_jsonify[obj]['Items']:
updateItems[component['CompName']] = \
[
component['Sequence'],
component['CompModel'],
obj
]
except KeyError as e:
pass
# Iterate through FW Versioning, write Update Condition to updateItems
for item in version_jsonify:
for component in version_jsonify[item]['Items']:
if not component['IsUpToDate']:
try:
updateItems[component['ID']].append({'NeedsUpdate': True})
except:
try:
updateItems[component['Model']].append({'NeedsUpdate': True})
except:
continue
if component['IsUpToDate']:
try:
updateItems[component['ID']].append({'NeedsUpdate': False})
except:
try:
updateItems[component['Model']].append({'NeedsUpdate': False})
except:
continue
for i in updateItems:
try:
needsUpdate = updateItems[i][3]
pass
except IndexError:
group = updateItems[i][2]
for item in version_jsonify[group]['Items']:
if not item['IsUpToDate']:
updateItems[i].append({'NeedsUpdate': True})
break
if item['IsUpToDate']:
updateItems[i].append({'NeedsUpdate': False})
continue
for k in updateItems:
updateItems[k] = [i for n, i in enumerate(updateItems[k]) if i not in updateItems[k][n + 1:]]
sortedUpdateItems = sorted(updateItems.items(), key=lambda x: x[1][0])
try:
if argv[4] == 'order_length':
count = 0
for i in sortedUpdateItems:
if i[1][3]['NeedsUpdate']:
count += 1
print(count - 1)
exit(0)
except IndexError:
pass
itemsToUpdate = OrderedDict()
for i in sortedUpdateItems:
if i[1][3]['NeedsUpdate']:
if i[0] == 'MB_CEC':
itemsToUpdate[(str(i[0]))] = True
elif i[0] == 'Delta_CEC':
itemsToUpdate[(str(i[0]))] = True
else:
itemsToUpdate[str(i[1][2])] = True
for item in itemsToUpdate:
print(item)
exit(0)
if argv[1] == 'parse_update_json':
file_path = argv[2]
fw_update_json = {
'Error': True,
'State': 'Unknown',
'Action': 'Check Output Log'
}
with open(file_path) as f:
fw_update = f.readlines()
for line in fw_update:
try:
lineJson = json.loads(line)
if 'FirmwareLoadAction' in json.loads(line).keys(): # Detects if chassis-level power cycle is required
fw_update_json = json.loads(line)
if 'Reboot required' in lineJson['Message']: # Detects if host-level reboot is required
fw_update_json['RebootRequired'] = True
if lineJson['State'] == 'Failed':
fw_update_json['State'] = 'Failed'
fw_update_json['Message'] = lineJson['Message']
break
if lineJson['State'] == 'Canceled':
fw_update_json['State'] = 'Canceled'
fw_update_json['Message'] = lineJson['Message']
break
if lineJson['State'] == 'Done':
fw_update_json['State'] = 'Done'
fw_update_json['Message'] = lineJson['Message']
except Exception as e:
continue
print(METHOD_NAME(fw_update_json))
if argv[1] == 'parse_versioning':
file_path = argv[2]
manifest_json = {
'ErrorWritingVersioning': True
}
with open(file_path) as f:
output_all = f.readlines()
# Grab JSON from raw output
for line in output_all:
try:
manifest_json = json.loads(line)
except ValueError:
pass
try:
if manifest_json['ErrorWritingVersioning']:
print('No JSON could be loaded, is the container already running?')
exit(1)
except KeyError:
print(json.dumps(manifest_json,
sort_keys=True,
indent=4
)
) |
3,029 | tr | import qgis.gui
from qgis.core import Qgis
from qgis.PyQt import QtCore
from qgis.PyQt.QtCore import QCoreApplication
from qgis.PyQt.QtCore import QEventLoop
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtCore import QThread
from qgis.PyQt.QtWidgets import QProgressBar
from qgis.PyQt.QtWidgets import QPushButton
from qgis.utils import iface
from .logger import log
class tr_worker:
def METHOD_NAME(message):
return QCoreApplication.translate("tr_worker", message)
class AbstractWorker(QtCore.QObject):
"""Abstract worker, inherit from this and implement the work method"""
# available signals to be used in the concrete worker
finished = QtCore.pyqtSignal(object)
was_killed = QtCore.pyqtSignal(object)
error = QtCore.pyqtSignal(Exception)
progress = QtCore.pyqtSignal(float)
toggle_show_progress = QtCore.pyqtSignal(bool)
set_message = QtCore.pyqtSignal(str)
toggle_show_cancel = QtCore.pyqtSignal(bool)
# private signal, don't use in concrete workers this is automatically
# emitted if the result is not None
successfully_finished = QtCore.pyqtSignal(object)
def __init__(self):
QtCore.QObject.__init__(self)
self.killed = False
def run(self):
try:
result = self.work()
self.finished.emit(result)
except UserAbortedNotification:
self.finished.emit(None)
except Exception as e:
# forward the exception upstream
self.error.emit(e)
self.finished.emit(None)
def work(self):
"""Reimplement this putting your calculation here
available are:
self.progress.emit(0-100)
self.killed
:returns a python object - use None if killed is true
"""
raise NotImplementedError
def kill(self):
self.killed = True
self.set_message.emit("Aborting...")
self.toggle_show_progress.emit(False)
self.was_killed.emit(None)
class UserAbortedNotification(Exception):
pass
def start_worker(worker, iface, message, with_progress=True):
message_bar_item = qgis.gui.QgsMessageBar.createMessage(message)
progress_bar = QProgressBar()
progress_bar.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
if not with_progress:
progress_bar.setMinimum(0)
progress_bar.setMaximum(0)
cancel_button = QPushButton()
cancel_button.setText("Cancel")
cancel_button.clicked.connect(worker.kill)
message_bar_item.layout().addWidget(progress_bar)
message_bar_item.layout().addWidget(cancel_button)
message_bar = iface.messageBar()
message_bar.pushWidget(message_bar_item, Qgis.Info)
# start the worker in a new thread
# let Qt take ownership of the QThread
thread = QThread(iface.mainWindow())
worker.moveToThread(thread)
worker.set_message.connect(
lambda message: set_worker_message(message, message_bar_item)
)
worker.toggle_show_progress.connect(
lambda show: toggle_worker_progress(show, progress_bar)
)
worker.toggle_show_cancel.connect(
lambda show: toggle_worker_cancel(show, cancel_button)
)
worker.finished.connect(
lambda result: worker_finished(result, thread, worker, iface, message_bar_item)
)
worker.error.connect(lambda e: worker_error(e, message))
worker.was_killed.connect(
lambda result: worker_killed(result, thread, worker, iface, message_bar_item)
)
def _set_progress_bar_value(value: float):
progress_bar.setValue(int(value))
worker.progress.connect(_set_progress_bar_value)
thread.started.connect(worker.run)
thread.start()
return thread, message_bar_item
def worker_killed(result, thread, worker, iface, message_bar_item):
pass
def worker_finished(result, thread, worker, iface, message_bar_item):
message_bar = iface.messageBar()
message_bar.popWidget(message_bar_item)
if result is not None:
worker.successfully_finished.emit(result)
# clean up the worker and thread
worker.deleteLater()
thread.quit()
thread.wait()
thread.deleteLater()
def worker_error(e, message):
log(f"Exception in worker thread ({message}): {e}")
def set_worker_message(message, message_bar_item):
message_bar_item.setText(message)
def toggle_worker_progress(show_progress, progress_bar):
progress_bar.setMinimum(0)
if show_progress:
progress_bar.setMaximum(100)
else:
# show an undefined progress
progress_bar.setMaximum(0)
def toggle_worker_cancel(show_cancel, cancel_button):
cancel_button.setVisible(show_cancel)
class StartWorker:
def __init__(self, worker_class, process_name, *args):
self.exception = None
self.success = None
self.return_val = None
self.worker = worker_class(*args)
pause = QEventLoop()
self.worker.finished.connect(pause.quit)
self.worker.successfully_finished.connect(self.save_success)
self.worker.error.connect(self.save_exception)
start_worker(
self.worker, iface, tr_worker.METHOD_NAME("Processing: {}".format(process_name))
)
pause.exec_()
if self.exception:
raise self.exception
def save_success(self, val=None):
self.return_val = val
self.success = True
def get_return(self):
return self.return_val
def was_killed(self):
return self.worker.killed
def save_exception(self, exception):
self.exception = exception
def get_exception(self):
return self.exception |
3,030 | check job | # Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract
# DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government
# retains certain rights in this software.
import RemoteComputationInterface
import subprocess
import os
import configparser
import threading
import time
class RemoteSlurmComputation(RemoteComputationInterface):
def connect(host, username=None, password=None, token=None):
pass
def get_session(sid):
pass
def disconnect(sid):
pass
def run_command(command):
results = {}
command = command.split(' ')
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
results["output"], results["errors"] = p.communicate
return results
def generate_job_file(params):
with open(params["job_file_path"]) as job_file:
job_file.write("#!/bin/bash\n\n")
job_file.write("#SBATCH --account=%s\n" % params["account_id"])
job_file.write("#SBATCH --job-name=%s\n" % params["job_name"])
job_file.write("#SBATCH --partition=%s\n\n" % params["partition"])
job_file.write("#SBATCH --nodes=%s\n" % params["number_of_nodes"])
job_file.write("#SBATCH --ntasks-per-node=%s\n" % params["number_of_tasks_per_node"])
job_file.write("#SBATCH --time=%s:%s:%s\n" % (params["time_hours"], params["time_minutes"], params["time_seconds"]))
for c in params["commands"]:
job_file.write("%s\n" % c)
def submit_job(job_file_path):
results = run_command("sbatch %s" % job_file_path)
jid = [int(s) for s in results["output"].split() if s.isdigit()][0]
return jid
def METHOD_NAME(jid):
results = run_command("checkjob %s" % jid)
status = "UNKNOWN"
for line in results["output"]:
if "State" in line:
try:
status = line.split(':')[1].strip().upper()
except Exception as e:
status = "UNKNOWN"
break
return status
def check_job_thread(interval, jid, success, fail, logger, stop_event):
retry_count = 5
while True:
try:
status = METHOD_NAME(jid)
except Exception as e:
logger("Something went wrong while checking on job %s status, trying again..." % jid)
retry_counter = retry_counter - 1
if retry_counter == 0:
fail("Something went wrong while checking on job %s status: check for the generated files when the job completes" % jid)
stop_event.set()
break
status = "ERROR"
time.sleep(60)
pass
logger("Job %s returned with status %s" % (jid, status))
if status == "RUNNING":
retry_counter = 5
if status == "CANCELLED" or status == "REMOVED":
fail("Job %s was cancelled" % jid)
stop_event.set()
break
if status == "VACATED":
fail("Job %s was vacated due to system failure" % jid)
stop_event.set()
break
if status == "REMOVED":
fail("Job %s was removed by the scheduler due to exceeding walltime or violating another policy" % jid)
stop_event.set()
break
if status == "COMPLETED":
success()
stop_event.set()
break
if status == "FAILED" or status == "UNKNOWN" or status == "NOTQUEUED":
retry_counter = retry_counter - 1
if retry_counter == 0:
fail("Job %s has failed" % jid)
stop_event.set()
break
# in case something went wrong and still willing to try, wait for 30
# seconds and try another check
time.sleep(30)
# interval between each of the checks
time.sleep(interval)
def check_job_loop(interval, jid, success, fail, logger):
stop_event = threading.Event()
t = threading.Thread(target=check_job_thread, args=(interval, jid, success, fail, logger, stop_event))
t.start()
def cancel_job(jid):
results = run_command("scancel %s" % jid)
# TODO check results["errors"] for actual errors, if any return False
# instead
return True
def pause_job(jid):
results = run_command("scontrol suspend %s" % jid)
# TODO check results["errors"] for actual errors, if any return False
# instead
return True
def resume_job(jid):
results = run_command("scontrol resume %s" % jid)
# TODO check results["errors"] for actual errors, if any return False
# instead
return True
def get_job_output(path, jid):
f = path + "slurm-%s.out" % jid
if os.path.isFile(f):
results = run_command("cat %s" % f)
else:
return "The file %s does not exist." % f
return results["output"]
def set_slycatrc(config):
rc = os.path.expanduser('~') + ("/.slycatrc")
rc_file = open(rc, "w+")
parser = configparser.RawConfigParser()
for section_key in config:
if not parser.has_section(section_key):
parser.add_section(section_key)
section = config[section_key]
for option_key in section:
if not str(section[option_key]) == "":
parser.set(section_key, option_key, "\"%s\"" % section[option_key])
parser.write(rc_file)
rc_file.close()
# TODO if anything goes wrong return false instead
return True
def get_slycatrc():
results = {}
rc = os.path.expanduser('~') + ("/.slycatrc")
if os.path.isfile(rc):
try:
parser = configparser.RawConfigParser()
parser.read(rc)
config = { section: { key: eval(value) for key, value in parser.items(section) } for section in parser.sections() }
results["ok"] = True
results["config"] = config
except Exception as e:
results["ok"] = False
results["errors"] = "%s" % e
else:
results["ok"] = False
results["errors"] = "The user does not have a .slycatrc file under their home directory"
return results |
3,031 | file line helper | from tkinter import *
from idlelib.EditorWindow import EditorWindow
import re
import tkinter.messagebox as tkMessageBox
from idlelib import IOBinding
class OutputWindow(EditorWindow):
"""An editor window that can serve as an output file.
Also the future base class for the Python shell window.
This class has no input facilities.
"""
def __init__(self, *args):
EditorWindow.__init__(self, *args)
self.text.bind("<<goto-file-line>>", self.goto_file_line)
# Customize EditorWindow
def ispythonsource(self, filename):
# No colorization needed
return 0
def short_title(self):
return "Output"
def maybesave(self):
# Override base class method -- don't ask any questions
if self.get_saved():
return "yes"
else:
return "no"
# Act as output file
def write(self, s, tags=(), mark="insert"):
if isinstance(s, (bytes, bytes)):
s = s.decode(IOBinding.encoding, "replace")
self.text.insert(mark, s, tags)
self.text.see(mark)
self.text.update()
return len(s)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
pass
# Our own right-button menu
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Go to file/line", "<<goto-file-line>>", None),
]
file_line_pats = [
# order of patterns matters
r'file "([^"]*)", line (\d+)',
r'([^\s]+)\((\d+)\)',
r'^(\s*\S.*?):\s*(\d+):', # Win filename, maybe starting with spaces
r'([^\s]+):\s*(\d+):', # filename or path, ltrim
r'^\s*(\S.*?):\s*(\d+):', # Win abs path with embedded spaces, ltrim
]
file_line_progs = None
def goto_file_line(self, event=None):
if self.file_line_progs is None:
l = []
for pat in self.file_line_pats:
l.append(re.compile(pat, re.IGNORECASE))
self.file_line_progs = l
# x, y = self.event.x, self.event.y
# self.text.mark_set("insert", "@%d,%d" % (x, y))
line = self.text.get("insert linestart", "insert lineend")
result = self.METHOD_NAME(line)
if not result:
# Try the previous line. This is handy e.g. in tracebacks,
# where you tend to right-click on the displayed source line
line = self.text.get("insert -1line linestart",
"insert -1line lineend")
result = self.METHOD_NAME(line)
if not result:
tkMessageBox.showerror(
"No special line",
"The line you point at doesn't look like "
"a valid file name followed by a line number.",
parent=self.text)
return
filename, lineno = result
edit = self.flist.open(filename)
edit.gotoline(lineno)
def METHOD_NAME(self, line):
for prog in self.file_line_progs:
match = prog.search(line)
if match:
filename, lineno = match.group(1, 2)
try:
f = open(filename, "r")
f.close()
break
except OSError:
continue
else:
return None
try:
return filename, int(lineno)
except TypeError:
return None
# These classes are currently not used but might come in handy
class OnDemandOutputWindow:
tagdefs = {
# XXX Should use IdlePrefs.ColorPrefs
"stdout": {"foreground": "blue"},
"stderr": {"foreground": "#007700"},
}
def __init__(self, flist):
self.flist = flist
self.owin = None
def write(self, s, tags, mark):
if not self.owin:
self.setup()
self.owin.write(s, tags, mark)
def setup(self):
self.owin = owin = OutputWindow(self.flist)
text = owin.text
for tag, cnf in self.tagdefs.items():
if cnf:
text.tag_configure(tag, **cnf)
text.tag_raise('sel')
self.write = self.owin.write |
3,032 | test with timeout | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
ModelServiceWorker is the worker that is started by the MMS front-end.
"""
import socket
from collections import namedtuple
import mock
import pytest
from mock import Mock
from mms.model_service_worker import MXNetModelServiceWorker
from mms.service import Service
@pytest.fixture()
def socket_patches(mocker):
Patches = namedtuple('Patches', ['socket'])
mock_patch = Patches(mocker.patch('socket.socket'))
mock_patch.socket.recv.side_effect = [
b"L",
b"\x00\x00\x00\x0a", b"model_name",
b"\x00\x00\x00\x0a", b"model_path",
b"\x00\x00\x00\x01",
b"\x00\x00\x00\x07", b"handler",
b"\x00\x00\x00\x01"
]
return mock_patch
@pytest.fixture()
def model_service_worker(socket_patches):
model_service_worker = MXNetModelServiceWorker('unix', 'my-socket', None, None)
model_service_worker.sock = socket_patches.socket
model_service_worker.service = Service('name', 'mpath', 'testmanifest', None, 0, 1)
return model_service_worker
# noinspection PyClassHasNoInit
class TestInit:
socket_name = "sampleSocketName"
def test_missing_socket_name(self):
with pytest.raises(ValueError, match="Invalid socket type provided.*"):
MXNetModelServiceWorker()
def test_socket_in_use(self, mocker):
remove = mocker.patch('os.remove')
path_exists = mocker.patch('os.path.exists')
remove.side_effect = OSError()
path_exists.return_value = True
with pytest.raises(Exception, match=r".*socket already in use: sampleSocketName.*"):
MXNetModelServiceWorker('unix', self.socket_name)
@pytest.fixture()
def patches(self, mocker):
Patches = namedtuple('Patches', ['remove', 'socket'])
patches = Patches(
mocker.patch('os.remove'),
mocker.patch('socket.socket')
)
return patches
def test_success(self, patches):
MXNetModelServiceWorker('unix', self.socket_name)
patches.remove.assert_called_once_with(self.socket_name)
patches.socket.assert_called_once_with(socket.AF_UNIX, socket.SOCK_STREAM)
# noinspection PyClassHasNoInit
class TestRunServer:
accept_result = (mock.MagicMock(), None)
def test_with_socket_bind_error(self, socket_patches, model_service_worker):
bind_exception = socket.error("binding error")
socket_patches.socket.bind.side_effect = bind_exception
with pytest.raises(Exception):
model_service_worker.run_server()
socket_patches.socket.bind.assert_called()
socket_patches.socket.listen.assert_not_called()
def METHOD_NAME(self, socket_patches, model_service_worker):
exception = socket.timeout("Some Exception")
socket_patches.socket.accept.side_effect = exception
with pytest.raises(socket.timeout):
model_service_worker.run_server()
socket_patches.socket.bind.assert_called()
socket_patches.socket.listen.assert_called()
socket_patches.socket.accept.assert_called()
def test_with_run_server_debug(self, socket_patches, model_service_worker, mocker):
exception = Exception("Some Exception")
socket_patches.socket.accept.side_effect = exception
mocker.patch('mms.model_service_worker.DEBUG', True)
model_service_worker.handle_connection = Mock()
with pytest.raises(Exception):
model_service_worker.run_server()
socket_patches.socket.bind.assert_called()
socket_patches.socket.listen.assert_called()
socket_patches.socket.accept.assert_called()
def test_success(self, model_service_worker):
model_service_worker.sock.accept.return_value = self.accept_result
model_service_worker.sock.recv.return_value = b""
exception = SystemExit
model_service_worker.sock.accept.side_effect = exception
with pytest.raises(SystemExit):
model_service_worker.run_server()
model_service_worker.sock.accept.assert_called_once()
# noinspection PyClassHasNoInit
class TestLoadModel:
data = {'modelPath': b'mpath', 'modelName': b'name', 'handler': b'handled'}
@pytest.fixture()
def patches(self, mocker):
Patches = namedtuple('Patches', ['loader'])
patches = Patches(mocker.patch('mms.model_service_worker.ModelLoaderFactory'))
return patches
def test_load_model(self, patches, model_service_worker):
patches.loader.get_model_loader.return_value = Mock()
model_service_worker.load_model(self.data)
patches.loader.get_model_loader.assert_called()
# noinspection PyUnusedLocal
@pytest.mark.parametrize('batch_size', [(None, None), ('1', 1)])
@pytest.mark.parametrize('gpu', [(None, None), ('2', 2)])
def test_optional_args(self, patches, model_service_worker, batch_size, gpu):
data = self.data.copy()
if batch_size[0]:
data['batchSize'] = batch_size[0]
if gpu[0]:
data['gpu'] = gpu[0]
model_service_worker.load_model(data)
# noinspection PyClassHasNoInit
class TestHandleConnection:
data = {'modelPath': b'mpath', 'modelName': b'name', 'handler': b'handled'}
@pytest.fixture()
def patches(self, mocker):
Patches = namedtuple("Patches", ["retrieve_msg"])
patches = Patches(
mocker.patch("mms.model_service_worker.retrieve_msg")
)
return patches
def test_handle_connection(self, patches, model_service_worker):
patches.retrieve_msg.side_effect = [(b"L", ""), (b"I", ""), (b"U", "")]
model_service_worker.load_model = Mock()
model_service_worker.service.predict = Mock()
model_service_worker._remap_io = Mock()
service = Mock()
service.context = None
model_service_worker.load_model.return_value = ("", 200)
model_service_worker.service.predict.return_value = ("OK")
model_service_worker._remap_io.return_value = ("")
cl_socket = Mock()
with pytest.raises(ValueError, match=r"Received unknown command.*"):
model_service_worker.handle_connection(cl_socket)
cl_socket.send.assert_called() |
3,033 | test perceval params | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2023 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Valerio Cosentino <valcos@bitergia.com>
#
import logging
import unittest
from base import TestBaseBackend
from grimoire_elk.raw.graal import GraalOcean
HEADER_JSON = {"Content-Type": "application/json"}
class TestCoDepDocker(TestBaseBackend):
"""Test CoDepDocker backend"""
connector = "dockerdeps"
ocean_index = "test_" + connector
enrich_index = "test_" + connector + "_enrich"
def test_has_identites(self):
"""Test value of has_identities method"""
enrich_backend = self.connectors[self.connector][2]()
self.assertFalse(enrich_backend.has_identities())
def test_items_to_raw(self):
"""Test whether JSON items are properly inserted into ES"""
result = self._test_items_to_raw()
self.assertGreater(result['items'], 0)
self.assertGreater(result['raw'], 0)
self.assertGreaterEqual(result['items'], result['raw'])
def test_raw_to_enrich(self):
"""Test whether the raw index is properly enriched"""
result = self._test_raw_to_enrich()
self.assertGreater(result['raw'], 0)
self.assertGreater(result['enrich'], 0)
self.assertGreaterEqual(result['enrich'], result['raw'])
items = [item for item in self.enrich_backend.fetch()]
item = items[0]
self.assertEqual(item['file_path'], 'tests/Dockerfile')
self.assertEqual(item['origin'], 'https://github.com/chaoss/grimoirelab')
self.assertIsNotNone(item['dependency'])
item = items[1]
self.assertEqual(item['file_path'], 'tests/Dockerfile')
self.assertEqual(item['origin'], 'https://github.com/chaoss/grimoirelab')
self.assertIsNotNone(item['dependency'])
item = items[2]
self.assertEqual(item['file_path'], 'tests/Dockerfile')
self.assertEqual(item['origin'], 'https://github.com/chaoss/grimoirelab')
self.assertIsNotNone(item['dependency'])
def test_raw_to_enrich_projects(self):
"""Test enrich with Projects"""
result = self._test_raw_to_enrich(projects=True)
items = [item for item in self.enrich_backend.fetch()]
item = items[0]
self.assertEqual(item['file_path'], 'tests/Dockerfile')
self.assertEqual(item['origin'], 'https://github.com/chaoss/grimoirelab')
self.assertIsNotNone(item['dependency'])
self.assertEqual(item['project'], 'Main')
self.assertEqual(item['project_1'], 'Main')
item = items[1]
self.assertEqual(item['file_path'], 'tests/Dockerfile')
self.assertEqual(item['origin'], 'https://github.com/chaoss/grimoirelab')
self.assertIsNotNone(item['dependency'])
self.assertEqual(item['project'], 'Main')
self.assertEqual(item['project_1'], 'Main')
item = items[2]
self.assertEqual(item['file_path'], 'tests/Dockerfile')
self.assertEqual(item['origin'], 'https://github.com/chaoss/grimoirelab')
self.assertIsNotNone(item['dependency'])
self.assertEqual(item['project'], 'Main')
self.assertEqual(item['project_1'], 'Main')
def METHOD_NAME(self):
"""Test the extraction of perceval params from an URL"""
url = "https://github.com/grimoirelab/perceval"
expected_params = [
'https://github.com/grimoirelab/perceval'
]
self.assertListEqual(GraalOcean.get_perceval_params_from_url(url), expected_params)
url = "https://github.com/grimoirelab/perceval /tmp/perceval-repo"
expected_params = [
'https://github.com/grimoirelab/perceval'
]
self.assertListEqual(GraalOcean.get_perceval_params_from_url(url), expected_params)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
unittest.main(warnings='ignore') |
3,034 | h | #!/usr/bin/python2.6
#
# Copyright (C) Christian Thurau, 2010.
# Licensed under the GNU General Public License (GPL).
# http://www.gnu.org/licenses/gpl.txt
"""
PyMF Simplex Volume Maximization [1]
SIVM_SEARCH: class for search-SiVM
[1] C. Thurau, K. Kersting, and C. Bauckhage. Yes We Can - Simplex Volume
Maximization for Descriptive Web-Scale Matrix Factorization. In Proc. Int.
Conf. on Information and Knowledge Management. ACM. 2010.
"""
import scipy.sparse
import numpy as np
from scipy import inf
try:
from scipy.misc import factorial
except:
from scipy.special import factorial # scipy > 1.3
from .dist import *
from .vol import *
from .sivm import SIVM
__all__ = ["SIVM_SEARCH"]
class SIVM_SEARCH(SIVM):
"""
SIVM_SEARCH(data, num_bases=4, dist_measure='l2')
Simplex Volume Maximization. Factorize a data matrix into two matrices s.t.
F = | data - W*H | is minimal. H is restricted to convexity. W is iteratively
found by maximizing the volume of the resulting simplex (see [1]). A solution
is found by employing a simple A-star like search strategy.
Parameters
----------
data : array_like, shape (_data_dimension, _num_samples)
the input data
num_bases: int, optional
Number of bases to compute (column rank of W and row rank of H).
4 (default)
dist_measure : one of 'l2' ,'cosine', 'l1', 'kl'
Standard is 'l2' which maximizes the volume of the simplex. In contrast,
'cosine' maximizes the volume of a cone (see [1] for details).
init : string (default: 'fastmap')
'fastmap' or 'origin'. Sets the method used for finding the very first
basis vector. 'Origin' assumes the zero vector, 'Fastmap' picks one of
the two vectors that have the largest pairwise distance.
Attributes
----------
W : "data_dimension x num_bases" matrix of basis vectors
H : "num bases x num_samples" matrix of coefficients
ferr : frobenius norm (after calling .factorize())
Example
-------
Applying SIVM to some rather stupid data set:
>>> import numpy as np
>>> data = np.array([[1.0, 0.0, 2.0], [0.0, 1.0, 1.0]])
>>> sivm_mdl = SIVM_SEARCH(data, num_bases=2)
>>> sivm_mdl.factorize()
The basis vectors are now stored in sivm_mdl.W, the coefficients in sivm_mdl.H.
To compute coefficients for an existing set of basis vectors simply copy W
to sivm_mdl.W, and set compute_w to False:
>>> data = np.array([[1.5, 1.3], [1.2, 0.3]])
>>> W = np.array([[1.0, 0.0], [0.0, 1.0]])
>>> sivm_mdl = SIVM_SEARCH(data, num_bases=2)
>>> sivm_mdl.W = W
>>> sivm_mdl.factorize(compute_w=False)
The result is a set of coefficients sivm_mdl.H, s.t. data = W * sivm_mdl.H.
"""
def update_w(self):
def METHOD_NAME(sel,D,k):
# compute the volume for a selection of sel columns
# and a k-1 simplex (-> k columns have to be selected)
mv = np.max(D)
# fill the remaining distance by the maximal overall found distance
d = np.zeros((k,k)) + mv
for i in range(k):
d[i,i] = 0.0
for idx_i,i in enumerate(sel):
for idx_j,j in enumerate(sel):
d[idx_i,idx_j] = D[i,j]
return d
# compute distance matrix -> required for the volume
D = pdist(self.data, self.data)
Openset = {}
for i in range(self._num_samples):
# compute volume for temp selection
d = METHOD_NAME([i],D,self._num_bases)
Vtmp = cmdet(d)
Openset[tuple([i])] = Vtmp
Closedset = {}
finished = False
self._v = []
self.init_sivm()
next_sel = np.array([self.select[0]])
iter = 0
while not finished:
# add the current selection to closedset
Closedset[(tuple(next_sel))] = []
for i in range(D.shape[0]):
# create a temp selection
tmp_sel = np.array(next_sel).flatten()
tmp_sel = np.concatenate((tmp_sel, [i]),axis=0)
tmp_sel = np.unique(tmp_sel)
tmp_sel = list(tmp_sel)
hkey = tuple(tmp_sel)
if len(tmp_sel) > len(next_sel) and (
not Closedset.has_key(hkey)) and (
not Openset.has_key(hkey)):
# compute volume for temp selection
d = METHOD_NAME(tmp_sel, D, self._num_bases)
Vtmp = cmdet(d)
# add to openset
Openset[hkey] = Vtmp
# get next best tuple
vmax = 0.0
for (k,v) in Openset.iteritems():
if v > vmax:
next_sel = k
vmax = v
self._logger.info('Iter:' + str(iter))
self._logger.info('Current selection:' + str(next_sel))
self._logger.info('Current volume:' + str(vmax))
self._v.append(vmax)
# remove next_sel from openset
Openset.pop(next_sel)
if len(list(next_sel)) == self._num_bases:
finished = True
iter += 1
# update some values ...
self.select = list(next_sel)
self.W = self.data[:, self.select]
if __name__ == "__main__":
import doctest
doctest.testmod() |
3,035 | placements | """
Placements are representations of a laserjob origin.
A project may contain multiple such placements, for every placement
a copy of the plan will be executed with the placement indicating
the relative position
"""
from meerk40t.core.node.node import Node
from meerk40t.core.units import Angle, Length
from meerk40t.svgelements import Matrix
class PlacePointNode(Node):
"""
PlacePointNode is the bootstrapped node type for the 'place point' type.
"""
def __init__(self, x=0, y=0, rotation=0, corner=0, loops=1, **kwargs):
self.x = x
self.y = y
self.rotation = rotation
self.corner = corner
self.loops = loops
self.output = True
super().__init__(type="place point", **kwargs)
self._formatter = "{enabled}{loops}{element_type} {corner} {x} {y} {rotation}"
self.validate()
def validate(self):
if isinstance(self.output, str):
try:
self.output = bool(self.output)
except ValueError:
self.output = True
try:
if isinstance(self.x, str):
self.x = float(Length(self.x))
elif isinstance(self.x, Length):
self.x = float(self.x)
except ValueError:
self.x = 0
try:
if isinstance(self.y, str):
self.y = float(Length(self.y))
elif isinstance(self.y, Length):
self.y = float(self.y)
except ValueError:
self.y = 0
try:
if isinstance(self.rotation, str):
self.rotation = Angle(self.rotation).radians
elif isinstance(self.rotation, Angle):
self.rotation = self.rotation.radians
except ValueError:
self.rotation = 0
try:
self.corner = min(4, max(0, int(self.corner)))
except ValueError:
self.corner = 0
# repetitions at the same point
if self.loops is None:
self.loops = 1
else:
try:
self.loops = int(self.loops)
except ValueError:
self.loops = 1
def __copy__(self):
nd = self.node_dict
return PlacePointNode(**nd)
def METHOD_NAME(self, context, outline, matrix, plan):
if outline is None:
# This job can't be placed.
return
scene_width = context.device.unit_width
scene_height = context.device.unit_height
unit_x = Length(self.x, relative_length=scene_width).units
unit_y = Length(self.y, relative_length=scene_height).units
x, y = matrix.point_in_matrix_space((unit_x, unit_y))
if 0 <= self.corner <= 3:
cx, cy = outline[self.corner]
else:
cx = sum([c[0] for c in outline]) / len(outline)
cy = sum([c[1] for c in outline]) / len(outline)
x -= cx
y -= cy
shift_matrix = Matrix()
if self.rotation != 0:
shift_matrix.post_rotate(self.rotation, cx, cy)
shift_matrix.post_translate(x, y)
yield matrix * shift_matrix
def default_map(self, default_map=None):
default_map = super().default_map(default_map=default_map)
default_map["element_type"] = "Placement"
default_map.update(self.__dict__)
try:
xlen = Length(self.x, digits=2)
# print (f"{float(xlen):.2f} = {xlen.length_cm}")
except ValueError:
xlen = Length(0, digits=2)
try:
ylen = Length(self.y, digits=2)
except ValueError:
ylen = Length(0, digits=2)
# print (self.x, self.y, type(self.x).__name__, type(self.y).__name__,)
default_map["position"] = f"{xlen.length_cm}, {ylen.length_cm}"
default_map["x"] = f"{xlen.length_cm}"
default_map["y"] = f"{ylen.length_cm}"
default_map["rotation"] = f"{Angle(self.rotation, digits=1).degrees}°"
default_map["loops"] = f"{str(self.loops) + 'X ' if self.loops > 1 else ''}"
if self.corner == 0:
default_map["corner"] = "`+ "
elif self.corner == 1:
default_map["corner"] = " +'"
elif self.corner == 2:
default_map["corner"] = " +."
elif self.corner == 3:
default_map["corner"] = ".+ "
else:
default_map["corner"] = " + "
default_map["enabled"] = "(Disabled) " if not self.output else ""
return default_map
def drop(self, drag_node, modify=True):
# if drag_node.type.startswith("op"):
# if modify:
# self.insert_sibling(drag_node)
# return True
return False |
3,036 | allow updates | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetApplicationResult',
'AwaitableGetApplicationResult',
'get_application',
'get_application_output',
]
@pulumi.output_type
class GetApplicationResult:
"""
Contains information about an application in a Batch account.
"""
def __init__(__self__, METHOD_NAME=None, default_version=None, display_name=None, etag=None, id=None, name=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, bool):
raise TypeError("Expected argument 'allow_updates' to be a bool")
pulumi.set(__self__, "allow_updates", METHOD_NAME)
if default_version and not isinstance(default_version, str):
raise TypeError("Expected argument 'default_version' to be a str")
pulumi.set(__self__, "default_version", default_version)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="allowUpdates")
def METHOD_NAME(self) -> Optional[bool]:
"""
A value indicating whether packages within the application may be overwritten using the same version string.
"""
return pulumi.get(self, "allow_updates")
@property
@pulumi.getter(name="defaultVersion")
def default_version(self) -> Optional[str]:
"""
The package to use if a client requests the application but does not specify a version. This property can only be set to the name of an existing package.
"""
return pulumi.get(self, "default_version")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
The display name for the application.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def etag(self) -> str:
"""
The ETag of the resource, used for concurrency statements.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetApplicationResult(GetApplicationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApplicationResult(
METHOD_NAME=self.METHOD_NAME,
default_version=self.default_version,
display_name=self.display_name,
etag=self.etag,
id=self.id,
name=self.name,
type=self.type)
def get_application(account_name: Optional[str] = None,
application_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApplicationResult:
"""
Gets information about the specified application.
Azure REST API version: 2023-05-01.
:param str account_name: The name of the Batch account.
:param str application_name: The name of the application. This must be unique within the account.
:param str resource_group_name: The name of the resource group that contains the Batch account.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['applicationName'] = application_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:batch:getApplication', __args__, opts=opts, typ=GetApplicationResult).value
return AwaitableGetApplicationResult(
METHOD_NAME=pulumi.get(__ret__, 'allow_updates'),
default_version=pulumi.get(__ret__, 'default_version'),
display_name=pulumi.get(__ret__, 'display_name'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_application)
def get_application_output(account_name: Optional[pulumi.Input[str]] = None,
application_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetApplicationResult]:
"""
Gets information about the specified application.
Azure REST API version: 2023-05-01.
:param str account_name: The name of the Batch account.
:param str application_name: The name of the application. This must be unique within the account.
:param str resource_group_name: The name of the resource group that contains the Batch account.
"""
... |
3,037 | main | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: hpilo_boot
author: Dag Wieers (@dagwieers)
short_description: Boot system using specific media through HP iLO interface
description:
- "This module boots a system through its HP iLO interface. The boot media
can be one of: cdrom, floppy, hdd, network or usb."
- This module requires the hpilo python module.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
host:
description:
- The HP iLO hostname/address that is linked to the physical system.
type: str
required: true
login:
description:
- The login name to authenticate to the HP iLO interface.
default: Administrator
type: str
password:
description:
- The password to authenticate to the HP iLO interface.
default: admin
type: str
media:
description:
- The boot media to boot the system from
choices: [ "cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb" ]
type: str
image:
description:
- The URL of a cdrom, floppy or usb boot media image.
protocol://username:password@hostname:port/filename
- protocol is either 'http' or 'https'
- username:password is optional
- port is optional
type: str
state:
description:
- The state of the boot media.
- "no_boot: Do not boot from the device"
- "boot_once: Boot from the device once and then notthereafter"
- "boot_always: Boot from the device each time the server is rebooted"
- "connect: Connect the virtual media device and set to boot_always"
- "disconnect: Disconnects the virtual media device and set to no_boot"
- "poweroff: Power off the server"
default: boot_once
type: str
choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ]
force:
description:
- Whether to force a reboot (even when the system is already booted).
- As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running.
default: false
type: bool
ssl_version:
description:
- Change the ssl_version used.
default: TLSv1
type: str
choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
requirements:
- python-hpilo
notes:
- To use a USB key image you need to specify floppy as boot media.
- This module ought to be run from a system that can access the HP iLO
interface directly, either by using C(local_action) or using C(delegate_to).
'''
EXAMPLES = r'''
- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server
community.general.hpilo_boot:
host: YOUR_ILO_ADDRESS
login: YOUR_ILO_LOGIN
password: YOUR_ILO_PASSWORD
media: cdrom
image: http://some-web-server/iso/boot.iso
when: cmdb_hwmodel.startswith('HP ')
delegate_to: localhost
- name: Power off a server
community.general.hpilo_boot:
host: YOUR_ILO_HOST
login: YOUR_ILO_LOGIN
password: YOUR_ILO_PASSWORD
state: poweroff
delegate_to: localhost
'''
RETURN = '''
# Default return values
'''
import time
import traceback
import warnings
HPILO_IMP_ERR = None
try:
import hpilo
HAS_HPILO = True
except ImportError:
HPILO_IMP_ERR = traceback.format_exc()
HAS_HPILO = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
# Suppress warnings from hpilo
warnings.simplefilter('ignore')
def METHOD_NAME():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', required=True),
login=dict(type='str', default='Administrator'),
password=dict(type='str', default='admin', no_log=True),
media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']),
image=dict(type='str'),
state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']),
force=dict(type='bool', default=False),
ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
)
)
if not HAS_HPILO:
module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
host = module.params['host']
login = module.params['login']
password = module.params['password']
media = module.params['media']
image = module.params['image']
state = module.params['state']
force = module.params['force']
ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
changed = False
status = {}
power_status = 'UNKNOWN'
if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'):
# Workaround for: Error communicating with iLO: Problem manipulating EV
try:
ilo.set_one_time_boot(media)
except hpilo.IloError:
time.sleep(60)
ilo.set_one_time_boot(media)
# TODO: Verify if image URL exists/works
if image:
ilo.insert_virtual_media(media, image)
changed = True
if media == 'cdrom':
ilo.set_vm_status('cdrom', state, True)
status = ilo.get_vm_status()
changed = True
elif media in ('floppy', 'usb'):
ilo.set_vf_status(state, True)
status = ilo.get_vf_status()
changed = True
# Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot
if state in ('boot_once', 'boot_always') or force:
power_status = ilo.get_host_power_status()
if not force and power_status == 'ON':
module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host)
if power_status == 'ON':
ilo.warm_boot_server()
# ilo.cold_boot_server()
changed = True
else:
ilo.press_pwr_btn()
# ilo.reset_server()
# ilo.set_host_power(host_power=True)
changed = True
elif state in ('poweroff'):
power_status = ilo.get_host_power_status()
if not power_status == 'OFF':
ilo.hold_pwr_btn()
# ilo.set_host_power(host_power=False)
changed = True
module.exit_json(changed=changed, power=power_status, **status)
if __name__ == '__main__':
METHOD_NAME() |
3,038 | get date list | import datetime
from collections.abc import Sequence
from typing import Any, TypeVar
from django.db import models
from django.db.models.query import QuerySet
from django.http import HttpRequest, HttpResponse
from django.utils.datastructures import _IndexableCollection
from django.views.generic.base import View
from django.views.generic.detail import BaseDetailView, SingleObjectTemplateResponseMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from typing_extensions import TypeAlias
_M = TypeVar("_M", bound=models.Model)
class YearMixin:
year_format: str
year: str | None
def get_year_format(self) -> str: ...
def get_year(self) -> str: ...
def get_next_year(self, date: datetime.date) -> datetime.date | None: ...
def get_previous_year(self, date: datetime.date) -> datetime.date | None: ...
class MonthMixin:
month_format: str
month: str | None
def get_month_format(self) -> str: ...
def get_month(self) -> str: ...
def get_next_month(self, date: datetime.date) -> datetime.date | None: ...
def get_previous_month(self, date: datetime.date) -> datetime.date | None: ...
class DayMixin:
day_format: str
day: str | None
def get_day_format(self) -> str: ...
def get_day(self) -> str: ...
def get_next_day(self, date: datetime.date) -> datetime.date | None: ...
def get_previous_day(self, date: datetime.date) -> datetime.date | None: ...
class WeekMixin:
week_format: str
week: str | None
def get_week_format(self) -> str: ...
def get_week(self) -> str: ...
def get_next_week(self, date: datetime.date) -> datetime.date | None: ...
def get_previous_week(self, date: datetime.date) -> datetime.date | None: ...
class DateMixin:
date_field: str | None
allow_future: bool
def get_date_field(self) -> str: ...
def get_allow_future(self) -> bool: ...
@property
def uses_datetime_field(self) -> bool: ...
DatedItems: TypeAlias = tuple[_IndexableCollection[datetime.date] | None, _IndexableCollection[_M], dict[str, Any]]
class BaseDateListView(MultipleObjectMixin[_M], DateMixin, View):
date_list_period: str
def get(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse: ...
def get_dated_items(self) -> DatedItems: ...
def get_ordering(self) -> str | Sequence[str]: ...
def get_dated_queryset(self, **lookup: Any) -> models.query.QuerySet[_M]: ...
def get_date_list_period(self) -> str: ...
def METHOD_NAME(
self, queryset: models.query.QuerySet, date_type: str | None = ..., ordering: str = ...
) -> models.query.QuerySet: ...
class BaseArchiveIndexView(BaseDateListView[_M]):
context_object_name: str
def get_dated_items(self) -> DatedItems[_M]: ...
class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView):
template_name_suffix: str
class BaseYearArchiveView(YearMixin, BaseDateListView[_M]):
date_list_period: str
make_object_list: bool
def get_dated_items(self) -> DatedItems[_M]: ...
def get_make_object_list(self) -> bool: ...
class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView):
template_name_suffix: str
class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView[_M]):
date_list_period: str
def get_dated_items(self) -> DatedItems[_M]: ...
class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView):
template_name_suffix: str
class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView[_M]):
def get_dated_items(self) -> DatedItems[_M]: ...
class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView):
template_name_suffix: str
class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView[_M]):
def get_dated_items(self) -> DatedItems[_M]: ...
class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView):
template_name_suffix: str
class BaseTodayArchiveView(BaseDayArchiveView[_M]):
def get_dated_items(self) -> DatedItems[_M]: ...
class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView):
template_name_suffix: str
class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView[_M]):
def get_object(self, queryset: QuerySet[_M] | None = ...) -> _M: ...
class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView):
template_name_suffix: str
def timezone_today() -> datetime.date: ... |
3,039 | probe | from __future__ import print_function
import os
import pygame
import pkg_resources
import yaml
import fnmatch
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib import mp_util
from MAVProxy.modules.lib import mp_settings
from MAVProxy.modules.mavproxy_joystick import controls
class Joystick(mp_module.MPModule):
'''
joystick set verbose
joystick set debug
joystick status
joystick probe
'''
def __init__(self, mpstate):
"""Initialise module"""
super(Joystick, self).__init__(mpstate, 'joystick',
'A flexible joystick driver')
self.joystick = None
self.init_pygame()
self.init_settings()
self.init_commands()
self.METHOD_NAME()
def log(self, msg, level=0):
if self.mpstate.settings.moddebug < level:
return
print('{}: {}'.format(__name__, msg))
def init_pygame(self):
self.log('Initializing pygame', 2)
pygame.init()
pygame.joystick.init()
def init_settings(self):
pass
def init_commands(self):
self.log('Initializing commands', 2)
self.add_command('joystick', self.cmd_joystick,
"A flexible joystick drvier",
['status', 'probe'])
def load_definitions(self):
self.log('Loading joystick definitions', 1)
self.joydefs = []
search = []
userjoysticks = os.environ.get(
'MAVPROXY_JOYSTICK_DIR',
mp_util.dot_mavproxy('joysticks'))
if userjoysticks is not None and os.path.isdir(userjoysticks):
search.append(userjoysticks)
search.append(pkg_resources.resource_filename(__name__, 'joysticks'))
for path in search:
self.log('Looking for joystick definitions in {}'.format(path),
2)
path = os.path.expanduser(path)
for dirpath, dirnames, filenames in os.walk(path):
for joyfile in filenames:
root, ext = os.path.splitext(joyfile)
if ext[1:] not in ['yml', 'yaml', 'json']:
continue
joypath = os.path.join(dirpath, joyfile)
self.log('Loading definition from {}'.format(joypath), 2)
with open(joypath, 'r') as fd:
joydef = yaml.safe_load(fd)
joydef['path'] = joypath
self.joydefs.append(joydef)
def METHOD_NAME(self):
self.load_definitions()
for jid in range(pygame.joystick.get_count()):
joy = pygame.joystick.Joystick(jid)
self.log("Found joystick (%s)" % (joy.get_name(),))
for joydef in self.joydefs:
if 'match' not in joydef:
self.log('{} has no match patterns, ignoring.'.format(
joydef['path']), 0)
continue
for pattern in joydef['match']:
if fnmatch.fnmatch(joy.get_name().lower(),
pattern.lower()):
self.log('Using {} ("{}" matches pattern "{}")'.format(
joydef['path'], joy.get_name(), pattern))
self.joystick = controls.Joystick(joy, joydef)
return
print('{}: Failed to find matching joystick.'.format(__name__))
def usage(self):
'''show help on command line options'''
return "Usage: joystick <status|set>"
def cmd_joystick(self, args):
if not len(args):
self.log('No subcommand specified.')
elif args[0] == 'status':
self.cmd_status()
elif args[0] == 'probe':
self.cmd_probe()
elif args[0] == 'help':
self.cmd_help()
def cmd_help(self):
print('joystick probe -- reload and match joystick definitions')
print('joystick status -- show currently loaded definition, if any')
def cmd_probe(self):
self.log('Re-detecting available joysticks', 0)
self.METHOD_NAME()
def cmd_status(self):
if self.joystick is None:
print('No active joystick')
else:
print('Active joystick:')
print('Path: {path}'.format(**self.joystick.controls))
print('Description: {description}'.format(
**self.joystick.controls))
def idle_task(self):
if self.joystick is None:
return
for e in pygame.event.get():
override = self.module('rc').override[:]
values = self.joystick.read()
override = values + override[len(values):]
# self.log('channels: {}'.format(override), level=3)
if override != self.module('rc').override:
self.module('rc').override = override
self.module('rc').override_period.force()
def init(mpstate):
'''initialise module'''
return Joystick(mpstate) |
3,040 | on mouse drag | """
Section Example 1:
In this Section example we divide the screen in two sections and let the user
pick a box depending on the selected Section
Note:
- How View know nothing of what's happening inside the sections.
Each section knows what to do.
- Each event mouse input is handled by each Section even if the class
it's the same (ScreenPart).
- How on_mouse_enter/leave triggers in each Section when the mouse
enter or leaves the section boundaries
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.sections_demo_1
"""
from __future__ import annotations
from typing import Optional
import arcade
class Box(arcade.SpriteSolidColor):
""" This is a Solid Sprite that represents a GREEN Box on the screen """
def __init__(self, section):
super().__init__(100, 100, color=arcade.color.APPLE_GREEN)
self.section = section
def on_update(self, delta_time: float = 1 / 60):
# update the box (this actually moves the Box by changing its position)
self.update()
# if we hit the ground then lay on the ground and stop movement
if self.bottom <= 0:
self.bottom = 0
self.stop()
def release(self):
self.section.hold_box = None
self.change_y = -10
class ScreenPart(arcade.Section):
"""
This represents a part of the View defined by its
boundaries (left, bottom, etc.)
"""
def __init__(self, left: int, bottom: int, width: int, height: int,
**kwargs):
super().__init__(left, bottom, width, height, **kwargs)
self.selected: bool = False # if this section is selected
self.box: Box = Box(self) # the section Box Sprite
# position the Box inside this section using self.left + self.width
self.box.position = self.left + (self.width / 2), 50
# variable that will hold the Box when it's being dragged
self.hold_box: Optional[Box] = None
def on_update(self, delta_time: float):
# call on_update on the owned Box
self.box.on_update(delta_time)
def on_draw(self):
""" Draw this section """
if self.selected:
# Section is selected when mouse is within its boundaries
arcade.draw_lrbt_rectangle_filled(self.left, self.right, self.bottom,
self.top, arcade.color.GRAY)
arcade.draw_text(f'You\'re are on the {self.name}', self.left + 30,
self.top - 50, arcade.color.BLACK, 16)
# draw the box
self.box.draw()
def METHOD_NAME(self, x: float, y: float, dx: float, dy: float,
_buttons: int, _modifiers: int):
# if we hold a box, then whe move it at the same rate the mouse moves
if self.hold_box:
self.hold_box.position = x, y
def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):
# if we pick a Box with the mouse, "hold" it and stop its movement
if self.box.collides_with_point((x, y)):
self.hold_box = self.box
self.hold_box.stop()
def on_mouse_release(self, x: float, y: float, button: int, modifiers: int):
# if hold_box is True because we pick it with on_mouse_press
# then release the Box
if self.hold_box:
self.hold_box.release()
def on_mouse_enter(self, x: float, y: float):
# select this section
self.selected = True
def on_mouse_leave(self, x: float, y: float):
# unselect this section
self.selected = False
# if we are holding this section box and we leave the section
# we release the box as if we release the mouse button
if self.hold_box:
self.hold_box.release()
class GameView(arcade.View):
def __init__(self):
super().__init__()
# add sections to the view
# 1) First section holds half of the screen
self.add_section(ScreenPart(0, 0, self.window.width / 2,
self.window.height, name='Left'))
# 2) Second section holds the other half of the screen
self.add_section(ScreenPart(self.window.width / 2, 0,
self.window.width / 2, self.window.height,
name='Right'))
def on_draw(self):
# clear the screen
self.clear(arcade.color.BEAU_BLUE)
# draw a line separating each Section
arcade.draw_line(self.window.width / 2, 0, self.window.width / 2,
self.window.height, arcade.color.BLACK, 1)
def main():
# create the window
window = arcade.Window()
# create the custom View. Sections are initialized inside the GameView init
view = GameView()
# show the view
window.show_view(view)
# run arcade loop
window.run()
if __name__ == '__main__':
main() |
3,041 | output | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"adp account show",
is_experimental=True,
)
class Show(AAZCommand):
"""Get the properties of an ADP account
"""
_aaz_info = {
"version": "2022-09-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.autonomousdevelopmentplatform/accounts/{}", "2022-09-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self.METHOD_NAME()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.account_name = AAZStrArg(
options=["-n", "--name", "--account-name"],
help="The name of the ADP account",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*",
max_length=50,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.AccountsGet(ctx=self.ctx)()
def METHOD_NAME(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class AccountsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AutonomousDevelopmentPlatform/accounts/{accountName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"accountName", self.ctx.args.account_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-09-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.account_id = AAZStrType(
serialized_name="accountId",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
flags={"read_only": True},
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
flags={"read_only": True},
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
flags={"read_only": True},
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
flags={"read_only": True},
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
flags={"read_only": True},
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
flags={"read_only": True},
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
__all__ = ["Show"] |
3,042 | analyze | #################################################################################
# FOQUS Copyright (c) 2012 - 2023, by the software owners: Oak Ridge Institute
# for Science and Education (ORISE), TRIAD National Security, LLC., Lawrence
# Livermore National Security, LLC., The Regents of the University of
# California, through Lawrence Berkeley National Laboratory, Battelle Memorial
# Institute, Pacific Northwest Division through Pacific Northwest National
# Laboratory, Carnegie Mellon University, West Virginia University, Boston
# University, the Trustees of Princeton University, The University of Texas at
# Austin, URS Energy & Construction, Inc., et al. All rights reserved.
#
# Please see the file LICENSE.md for full copyright and license information,
# respectively. This file is also available online at the URL
# "https://github.com/CCSI-Toolset/FOQUS".
#################################################################################
import os
from .UQRSAnalysis import UQRSAnalysis
from .UQAnalysis import UQAnalysis
from .RSAnalyzer import RSAnalyzer
from .ResponseSurfaces import ResponseSurfaces
from .Common import Common
class RSValidation(UQRSAnalysis):
def __init__(
self,
ensemble,
output,
responseSurface,
rsOptions=None,
genCodeFile=False,
nCV=10,
userRegressionFile=None,
testFile=None,
error_tol_percent=10,
odoe=False,
):
super(RSValidation, self).__init__(
ensemble,
output,
UQAnalysis.RS_VALIDATION,
responseSurface,
None,
rsOptions,
userRegressionFile,
None,
)
self.genCodeFile = genCodeFile
if nCV is None:
nCV = 10
self.nCV = nCV
self.testFile = testFile
self.error_tol_percent = error_tol_percent
self.odoe = odoe
def saveDict(self):
sd = super(RSValidation, self).saveDict()
sd["genCodeFile"] = self.genCodeFile
sd["nCV"] = self.nCV
if self.testFile is not None:
self.archiveFile(self.testFile)
sd["testFile"] = os.path.basename(self.testFile)
return sd
def loadDict(self, sd):
super(RSValidation, self).loadDict(sd)
self.genCodeFile = sd.get("genCodeFile", False)
self.nCV = sd.get("nCV", 10)
self.testFile = sd.get("testFile", None)
if self.testFile is not None:
self.testFile = self.restoreFromArchive(self.testFile)
def METHOD_NAME(self):
data = self.ensemble
fname = Common.getLocalFileName(
RSAnalyzer.dname, data.getModelName().split()[0], ".dat"
)
index = ResponseSurfaces.getEnumValue(self.responseSurface)
fixedAsVariables = index == ResponseSurfaces.USER
data.writeToPsuade(fname, fixedAsVariables=fixedAsVariables)
mfile = RSAnalyzer.validateRS(
fname,
self.outputs[0],
self.responseSurface,
self.rsOptions,
self.genCodeFile,
self.nCV,
self.userRegressionFile,
self.testFile,
self.error_tol_percent,
)
if mfile is None:
return None
mfile = mfile[0]
if not self.odoe:
self.archiveFile(mfile)
return mfile
def showResults(self):
rsIndex = ResponseSurfaces.getEnumValue(self.responseSurface)
userMethod = rsIndex == ResponseSurfaces.USER
if userMethod:
mfile = "RSTest_hs.m"
else:
mfile = "RSFA_CV_err.m"
self.restoreFromArchive(mfile)
RSAnalyzer.plotValidate(
self.ensemble,
self.outputs[0],
self.responseSurface,
userMethod,
mfile,
error_tol_percent=self.error_tol_percent,
)
def getAdditionalInfo(self):
info = super(RSValidation, self).getAdditionalInfo()
info["Number of cross-validation groups"] = self.nCV
if self.testFile is not None:
info["Separate test file"] = os.path.basename(self.testFile)
return info |
3,043 | test docstring example | from snowfakery.utils.collections import CaseInsensitiveDict
import pytest
# From: https://github.com/psf/requests/blob/05a1a21593c9c8e79393d35fae12c9c27a6f7605/tests/test_requests.py
class TestCaseInsensitiveDict:
@pytest.mark.parametrize(
"cid",
(
CaseInsensitiveDict({"Foo": "foo", "BAr": "bar"}),
CaseInsensitiveDict([("Foo", "foo"), ("BAr", "bar")]),
CaseInsensitiveDict(FOO="foo", BAr="bar"),
),
)
def test_init(self, cid):
assert len(cid) == 2
assert "foo" in cid
assert "bar" in cid
def METHOD_NAME(self):
cid = CaseInsensitiveDict()
cid["Accept"] = "application/json"
assert cid["aCCEPT"] == "application/json"
assert list(cid) == ["Accept"]
def test_len(self):
cid = CaseInsensitiveDict({"a": "a", "b": "b"})
cid["A"] = "a"
assert len(cid) == 2
def test_getitem(self):
cid = CaseInsensitiveDict({"Spam": "blueval"})
assert cid["spam"] == "blueval"
assert cid["SPAM"] == "blueval"
def test_fixes_649(self):
"""__setitem__ should behave case-insensitively."""
cid = CaseInsensitiveDict()
cid["spam"] = "oneval"
cid["Spam"] = "twoval"
cid["sPAM"] = "redval"
cid["SPAM"] = "blueval"
assert cid["spam"] == "blueval"
assert cid["SPAM"] == "blueval"
assert list(cid.keys()) == ["SPAM"]
def test_delitem(self):
cid = CaseInsensitiveDict()
cid["Spam"] = "someval"
del cid["sPam"]
assert "spam" not in cid
assert len(cid) == 0
def test_contains(self):
cid = CaseInsensitiveDict()
cid["Spam"] = "someval"
assert "Spam" in cid
assert "spam" in cid
assert "SPAM" in cid
assert "sPam" in cid
assert "notspam" not in cid
def test_get(self):
cid = CaseInsensitiveDict()
cid["spam"] = "oneval"
cid["SPAM"] = "blueval"
assert cid.get("spam") == "blueval"
assert cid.get("SPAM") == "blueval"
assert cid.get("sPam") == "blueval"
assert cid.get("notspam", "default") == "default"
def test_update(self):
cid = CaseInsensitiveDict()
cid["spam"] = "blueval"
cid.update({"sPam": "notblueval"})
assert cid["spam"] == "notblueval"
cid = CaseInsensitiveDict({"Foo": "foo", "BAr": "bar"})
cid.update({"fOO": "anotherfoo", "bAR": "anotherbar"})
assert len(cid) == 2
assert cid["foo"] == "anotherfoo"
assert cid["bar"] == "anotherbar"
def test_update_retains_unchanged(self):
cid = CaseInsensitiveDict({"foo": "foo", "bar": "bar"})
cid.update({"foo": "newfoo"})
assert cid["bar"] == "bar"
def test_iter(self):
cid = CaseInsensitiveDict({"Spam": "spam", "Eggs": "eggs"})
keys = frozenset(["Spam", "Eggs"])
assert frozenset(iter(cid)) == keys
def test_equality(self):
cid = CaseInsensitiveDict({"SPAM": "blueval", "Eggs": "redval"})
othercid = CaseInsensitiveDict({"spam": "blueval", "eggs": "redval"})
assert cid == othercid
del othercid["spam"]
assert cid != othercid
assert cid == {"spam": "blueval", "eggs": "redval"}
assert cid != object()
def test_setdefault(self):
cid = CaseInsensitiveDict({"Spam": "blueval"})
assert cid.setdefault("spam", "notblueval") == "blueval"
assert cid.setdefault("notspam", "notblueval") == "notblueval"
def test_lower_items(self):
cid = CaseInsensitiveDict(
{
"Accept": "application/json",
"user-Agent": "requests",
}
)
keyset = frozenset(lowerkey for lowerkey, v in cid.lower_items())
lowerkeyset = frozenset(["accept", "user-agent"])
assert keyset == lowerkeyset
def test_preserve_key_case(self):
cid = CaseInsensitiveDict(
{
"Accept": "application/json",
"user-Agent": "requests",
}
)
keyset = frozenset(["Accept", "user-Agent"])
assert frozenset(i[0] for i in cid.items()) == keyset
assert frozenset(cid.keys()) == keyset
assert frozenset(cid) == keyset
def test_preserve_last_key_case(self):
cid = CaseInsensitiveDict(
{
"Accept": "application/json",
"user-Agent": "requests",
}
)
cid.update({"ACCEPT": "application/json"})
cid["USER-AGENT"] = "requests"
keyset = frozenset(["ACCEPT", "USER-AGENT"])
assert frozenset(i[0] for i in cid.items()) == keyset
assert frozenset(cid.keys()) == keyset
assert frozenset(cid) == keyset
def test_copy(self):
cid = CaseInsensitiveDict(
{
"Accept": "application/json",
"user-Agent": "requests",
}
)
cid_copy = cid.copy()
assert str(cid) == str(cid_copy)
assert cid == cid_copy
cid["changed"] = True
assert cid != cid_copy |
3,044 | typer | from .. import types, utils, errors
import operator
from .templates import (AttributeTemplate, ConcreteTemplate, AbstractTemplate,
infer_global, infer, infer_getattr,
signature, bound_function, make_callable_template)
from .builtins import normalize_1d_index
@infer_global(operator.contains)
class InContainer(AbstractTemplate):
key = operator.contains
def generic(self, args, kws):
cont, item = args
if isinstance(cont, types.Container):
return signature(types.boolean, cont, cont.dtype)
@infer_global(len)
class ContainerLen(AbstractTemplate):
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.Container)):
return signature(types.intp, val)
@infer_global(operator.truth)
class SequenceBool(AbstractTemplate):
key = operator.truth
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.Sequence)):
return signature(types.boolean, val)
@infer_global(operator.getitem)
class GetItemSequence(AbstractTemplate):
key = operator.getitem
def generic(self, args, kws):
seq, idx = args
if isinstance(seq, types.Sequence):
idx = normalize_1d_index(idx)
if isinstance(idx, types.SliceType):
# Slicing a tuple only supported with static_getitem
if not isinstance(seq, types.BaseTuple):
return signature(seq, seq, idx)
elif isinstance(idx, types.Integer):
return signature(seq.dtype, seq, idx)
@infer_global(operator.setitem)
class SetItemSequence(AbstractTemplate):
def generic(self, args, kws):
seq, idx, value = args
if isinstance(seq, types.MutableSequence):
idx = normalize_1d_index(idx)
if isinstance(idx, types.SliceType):
return signature(types.none, seq, idx, seq)
elif isinstance(idx, types.Integer):
if not self.context.can_convert(value, seq.dtype):
msg = "invalid setitem with value of {} to element of {}"
raise errors.TypingError(msg.format(types.unliteral(value), seq.dtype))
return signature(types.none, seq, idx, seq.dtype)
@infer_global(operator.delitem)
class DelItemSequence(AbstractTemplate):
def generic(self, args, kws):
seq, idx = args
if isinstance(seq, types.MutableSequence):
idx = normalize_1d_index(idx)
return signature(types.none, seq, idx)
# --------------------------------------------------------------------------
# named tuples
@infer_getattr
class NamedTupleAttribute(AttributeTemplate):
key = types.BaseNamedTuple
def resolve___class__(self, tup):
return types.NamedTupleClass(tup.instance_class)
def generic_resolve(self, tup, attr):
# Resolution of other attributes
try:
index = tup.fields.index(attr)
except ValueError:
return
return tup[index]
@infer_getattr
class NamedTupleClassAttribute(AttributeTemplate):
key = types.NamedTupleClass
def resolve___call__(self, classty):
"""
Resolve the named tuple constructor, aka the class's __call__ method.
"""
instance_class = classty.instance_class
pysig = utils.pysignature(instance_class)
def METHOD_NAME(*args, **kws):
# Fold keyword args
try:
bound = pysig.bind(*args, **kws)
except TypeError as e:
msg = "In '%s': %s" % (instance_class, e)
e.args = (msg,)
raise
assert not bound.kwargs
return types.BaseTuple.from_types(bound.args, instance_class)
# Override the typer's pysig to match the namedtuple constructor's
METHOD_NAME.pysig = pysig
return types.Function(make_callable_template(self.key, METHOD_NAME)) |
3,045 | setup | # -------------------------------------------------------------------------------------------------
# Copyright (C) 2015-2023 Nautech Systems Pty Ltd. All rights reserved.
# https://nautechsystems.io
#
# Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at https://www.gnu.org/licenses/lgpl-3.0.en.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------------------------
import pkgutil
import msgspec
import pytest
from nautilus_trader.adapters.binance.common.enums import BinanceAccountType
from nautilus_trader.adapters.binance.futures.providers import BinanceFuturesInstrumentProvider
from nautilus_trader.adapters.binance.http.client import BinanceHttpClient
from nautilus_trader.adapters.binance.spot.providers import BinanceSpotInstrumentProvider
from nautilus_trader.common.clock import LiveClock
from nautilus_trader.model.identifiers import InstrumentId
from nautilus_trader.model.identifiers import Symbol
from nautilus_trader.model.identifiers import Venue
@pytest.mark.skip(reason="WIP")
class TestBinanceInstrumentProvider:
def METHOD_NAME(self):
# Fixture Setup
self.clock = LiveClock()
@pytest.mark.asyncio()
async def test_load_all_async_for_spot_markets(
self,
binance_http_client,
live_logger,
monkeypatch,
):
# Arrange: prepare data for monkey patch
response1 = pkgutil.get_data(
package="tests.integration_tests.adapters.binance.resources.http_responses",
resource="http_wallet_trading_fee.json",
)
response2 = pkgutil.get_data(
package="tests.integration_tests.adapters.binance.resources.http_responses",
resource="http_spot_market_exchange_info.json",
)
responses = [response2, response1]
# Mock coroutine for patch
async def mock_send_request(
self, # (needed for mock)
http_method: str, # (needed for mock)
url_path: str, # (needed for mock)
payload: dict[str, str], # (needed for mock)
) -> bytes:
return msgspec.json.decode(responses.pop())
# Apply mock coroutine to client
monkeypatch.setattr(
target=BinanceHttpClient,
name="send_request",
value=mock_send_request,
)
self.provider = BinanceSpotInstrumentProvider(
client=binance_http_client,
logger=live_logger,
clock=self.clock,
account_type=BinanceAccountType.SPOT,
)
# Act
await self.provider.load_all_async()
# Assert
assert self.provider.count == 2
assert self.provider.find(InstrumentId(Symbol("BTCUSDT"), Venue("BINANCE"))) is not None
assert self.provider.find(InstrumentId(Symbol("ETHUSDT"), Venue("BINANCE"))) is not None
assert len(self.provider.currencies()) == 3
assert "BTC" in self.provider.currencies()
assert "ETH" in self.provider.currencies()
assert "USDT" in self.provider.currencies()
@pytest.mark.asyncio()
async def test_load_all_async_for_futures_markets(
self,
binance_http_client,
live_logger,
monkeypatch,
):
# Arrange: prepare data for monkey patch
# response1 = pkgutil.get_data(
# package="tests.integration_tests.adapters.binance.resources.http_responses",
# resource="http_wallet_trading_fee.json",
# )
response2 = pkgutil.get_data(
package="tests.integration_tests.adapters.binance.resources.http_responses",
resource="http_futures_market_exchange_info.json",
)
responses = [response2]
# Mock coroutine for patch
async def mock_send_request(
self, # (needed for mock)
http_method: str, # (needed for mock)
url_path: str, # (needed for mock)
payload: dict[str, str], # (needed for mock)
) -> bytes:
return msgspec.json.decode(responses.pop())
# Apply mock coroutine to client
monkeypatch.setattr(
target=BinanceHttpClient,
name="send_request",
value=mock_send_request,
)
self.provider = BinanceFuturesInstrumentProvider(
client=binance_http_client,
logger=live_logger,
clock=self.clock,
account_type=BinanceAccountType.USDT_FUTURE,
)
# Act
await self.provider.load_all_async()
# Assert
assert self.provider.count == 3
assert (
self.provider.find(InstrumentId(Symbol("BTCUSDT-PERP"), Venue("BINANCE"))) is not None
)
assert (
self.provider.find(InstrumentId(Symbol("ETHUSDT-PERP"), Venue("BINANCE"))) is not None
)
assert (
self.provider.find(InstrumentId(Symbol("BTCUSDT_220325"), Venue("BINANCE"))) is not None
)
assert len(self.provider.currencies()) == 3
assert "BTC" in self.provider.currencies()
assert "ETH" in self.provider.currencies()
assert "USDT" in self.provider.currencies() |
3,046 | caller trace | """SCons.Debug
Code for debugging SCons internal things. Shouldn't be
needed by most users. Quick shortcuts:
from SCons.Debug import caller_trace
caller_trace()
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Debug.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import os
import sys
import time
import weakref
import inspect
# Global variable that gets set to 'True' by the Main script,
# when the creation of class instances should get tracked.
track_instances = False
# List of currently tracked classes
tracked_classes = {}
def logInstanceCreation(instance, name=None):
if name is None:
name = instance.__class__.__name__
if name not in tracked_classes:
tracked_classes[name] = []
if hasattr(instance, '__dict__'):
tracked_classes[name].append(weakref.ref(instance))
else:
# weakref doesn't seem to work when the instance
# contains only slots...
tracked_classes[name].append(instance)
def string_to_classes(s):
if s == '*':
return sorted(tracked_classes.keys())
else:
return s.split()
def fetchLoggedInstances(classes="*"):
classnames = string_to_classes(classes)
return [(cn, len(tracked_classes[cn])) for cn in classnames]
def countLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write("%s: %d\n" % (classname, len(tracked_classes[classname])))
def listLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
if inspect.isclass(ref):
obj = ref()
else:
obj = ref
if obj is not None:
file.write(' %s\n' % repr(obj))
def dumpLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s:\n' % obj)
for key, value in obj.__dict__.items():
file.write(' %20s : %s\n' % (key, value))
if sys.platform[:5] == "linux":
# Linux doesn't actually support memory usage stats from getrusage().
def memory():
with open('/proc/self/stat') as f:
mstr = f.read()
mstr = mstr.split()[22]
return int(mstr)
elif sys.platform[:6] == 'darwin':
#TODO really get memory stats for OS X
def memory():
return 0
else:
try:
import resource
except ImportError:
try:
import win32process
import win32api
except ImportError:
def memory():
return 0
else:
def memory():
process_handle = win32api.GetCurrentProcess()
memory_info = win32process.GetProcessMemoryInfo( process_handle )
return memory_info['PeakWorkingSetSize']
else:
def memory():
res = resource.getrusage(resource.RUSAGE_SELF)
return res[4]
# returns caller's stack
def caller_stack():
import traceback
tb = traceback.extract_stack()
# strip itself and the caller from the output
tb = tb[:-2]
result = []
for back in tb:
# (filename, line number, function name, text)
key = back[:3]
result.append('%s:%d(%s)' % func_shorten(key))
return result
caller_bases = {}
caller_dicts = {}
def METHOD_NAME(back=0):
"""
Trace caller stack and save info into global dicts, which
are printed automatically at the end of SCons execution.
"""
global caller_bases, caller_dicts
import traceback
tb = traceback.extract_stack(limit=3+back)
tb.reverse()
callee = tb[1][:3]
caller_bases[callee] = caller_bases.get(callee, 0) + 1
for caller in tb[2:]:
caller = callee + caller[:3]
try:
entry = caller_dicts[callee]
except KeyError:
caller_dicts[callee] = entry = {}
entry[caller] = entry.get(caller, 0) + 1
callee = caller
# print a single caller and its callers, if any
def _dump_one_caller(key, file, level=0):
leader = ' '*level
for v,c in sorted([(-v,c) for c,v in caller_dicts[key].items()]):
file.write("%s %6d %s:%d(%s)\n" % ((leader,-v) + func_shorten(c[-3:])))
if c in caller_dicts:
_dump_one_caller(c, file, level+1)
# print each call tree
def dump_caller_counts(file=sys.stdout):
for k in sorted(caller_bases.keys()):
file.write("Callers of %s:%d(%s), %d calls:\n"
% (func_shorten(k) + (caller_bases[k],)))
_dump_one_caller(k, file)
shorten_list = [
( '/scons/SCons/', 1),
( '/src/engine/SCons/', 1),
( '/usr/lib/python', 0),
]
if os.sep != '/':
shorten_list = [(t[0].replace('/', os.sep), t[1]) for t in shorten_list]
def func_shorten(func_tuple):
f = func_tuple[0]
for t in shorten_list:
i = f.find(t[0])
if i >= 0:
if t[1]:
i = i + len(t[0])
return (f[i:],)+func_tuple[1:]
return func_tuple
TraceFP = {}
if sys.platform == 'win32':
TraceDefault = 'con'
else:
TraceDefault = '/dev/tty'
TimeStampDefault = None
StartTime = time.time()
PreviousTime = StartTime
def Trace(msg, file=None, mode='w', tstamp=None):
"""Write a trace message to a file. Whenever a file is specified,
it becomes the default for the next call to Trace()."""
global TraceDefault
global TimeStampDefault
global PreviousTime
if file is None:
file = TraceDefault
else:
TraceDefault = file
if tstamp is None:
tstamp = TimeStampDefault
else:
TimeStampDefault = tstamp
try:
fp = TraceFP[file]
except KeyError:
try:
fp = TraceFP[file] = open(file, mode)
except TypeError:
# Assume we were passed an open file pointer.
fp = file
if tstamp:
now = time.time()
fp.write('%8.4f %8.4f: ' % (now - StartTime, now - PreviousTime))
PreviousTime = now
fp.write(msg)
fp.flush()
fp.close()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: |
3,047 | pop back | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
Topological sorting.
Performs stable "topological" sort of directed graphs (including graphs with cycles).
Possible optimization: instead of counting sort just put vertices on rindex[v]
positions if there were no SCCs detected.
Ported from original implementation in Java by Marat Radchenko.
original: https://github.com/slonopotamus/stable-topo-sort/blob/master/test/StableTopoSort.java#L16
algorithm description (russian): https://habr.com/ru/post/451208/
"""
from collections import defaultdict
class DoubleStack(object):
def __init__(self, capacity):
self.fp = 0 # front pointer
self.bp = capacity # back pointer
self.items = [0 for i in range(capacity)]
def is_empty_front(self):
return self.fp == 0
def top_front(self):
return self.items[self.fp - 1]
def pop_front(self):
self.fp -= 1
return self.items[self.fp]
def push_front(self, item):
self.items[self.fp] = item
self.fp += 1
def is_empty_back(self):
return self.bp == len(self.items)
def top_back(self):
return self.items[self.bp]
def METHOD_NAME(self):
value = self.items[self.bp]
self.bp += 1
return value
def push_back(self, item):
self.bp -= 1
self.items[self.bp] = item
class Node(object):
def __init__(self, value):
self.value = value
self.edges = []
self.unique_edges = set()
self.index = 0
def add_edge_to(self, node):
self.unique_edges.add(node)
self.edges.append(node)
def __str__(self):
return str(self.value)
class PeaSCC(object):
def __init__(self, g):
self.graph = g
self.rindex = [0 for i in range(len(g))]
self.index = 1
self.c = len(g) - 1
self.vS = DoubleStack(len(g))
self.iS = DoubleStack(len(g))
self.root = [False for i in range(len(g))]
def visit(self, v=None):
if v is None:
# Attn! We're walking nodes in reverse
for i in range(len(self.graph)-1, -1, -1):
if self.rindex[i] == 0:
self.visit(i)
else:
self.begin_visiting(v)
while not self.vS.is_empty_front():
self.visit_loop()
def visit_loop(self):
v = self.vS.top_front()
i = self.iS.top_front()
num_edges = len(self.graph[v].edges)
# Continue traversing out-edges until none left.
while i <= num_edges:
# Continuation
if i > 0:
# Update status for previously traversed out-edge
self.finish_edge(v, i - 1)
if i < num_edges and self.begin_edge(v, i):
return
i += 1
# Finished traversing out edges, update component info
self.finish_visiting(v)
def begin_visiting(self, v):
self.vS.push_front(v)
self.iS.push_front(0)
self.root[v] = True
self.rindex[v] = self.index
self.index += 1
def finish_visiting(self, v):
# Take this vertex off the call stack
self.vS.pop_front()
self.iS.pop_front()
# Update component information
if self.root[v]:
self.index -= 1
while not self.vS.is_empty_back() and self.rindex[v] <= self.rindex[self.vS.top_back()]:
w = self.vS.METHOD_NAME()
self.rindex[w] = self.c
self.index -= 1;
self.rindex[v] = self.c
self.c -= 1
else:
self.vS.push_back(v)
def begin_edge(self, v, k):
w = self.graph[v].edges[k].index
if self.rindex[w] == 0:
self.iS.pop_front()
self.iS.push_front(k+1)
self.begin_visiting(w)
return True
else:
return False
def finish_edge(self, v, k):
w = self.graph[v].edges[k].index
if self.rindex[w] < self.rindex[v]:
self.rindex[v] = self.rindex[w]
self.root[v] = False
class StableTopoSort(object):
@staticmethod
def reverse_counting_sort(nodes, rindex):
count = [0 for i in range(len(nodes))]
for i in range(len(rindex)):
cindex = len(nodes) - 1 - rindex[i]
count[cindex] += 1
for i in range(1, len(count)):
count[i] += count[i - 1]
output = [None for i in range(len(nodes))]
for i in range(len(output)):
cindex = len(nodes) - 1 - rindex[i]
# Attn! We're sorting in reverse
output_index = len(output) - count[cindex]
output[output_index] = nodes[i]
count[cindex] -= 1
return output
@staticmethod
def stable_topo_sort(nodes):
# 0. Remember where each node was
for i in range(len(nodes)):
nodes[i].index = i
# 1. Sort edges according to node indices
for i in range(len(nodes)):
nodes[i].edges.sort(key = lambda o: o.index)
# 2. Perform Tarjan SCC
scc = PeaSCC(nodes)
scc.visit()
# 3. Perform *reverse* counting sort
return StableTopoSort.reverse_counting_sort(nodes, scc.rindex)
def sort_by_incidence(vertices, edges):
incidence = defaultdict(lambda: 0)
for i, j in edges:
incidence[i] += 1
incidence[j] += 1
indicies = list(range(len(vertices)))
indicies.sort(key = lambda i: incidence[i])
reverse_index = dict()
vertices_out = []
for new_index, old_index in enumerate(indicies):
reverse_index[old_index] = new_index
vertices_out.append(vertices[old_index])
edges_out = []
for i, j in edges:
edges_out.append((reverse_index[i], reverse_index[j]))
return vertices_out, edges_out
def stable_topo_sort(vertices, edges):
graph = []
nodes = dict()
#vertices, edges = sort_by_incidence(vertices, edges)
for i, vertex in enumerate(vertices):
node = Node(vertex)
graph.append(node)
nodes[i] = node
for i, j in edges:
from_node = nodes[i]
to_node = nodes[j]
from_node.add_edge_to(to_node)
graph = StableTopoSort.stable_topo_sort(graph)
return [node.value for node in graph]
|
3,048 | build schema on 200 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"mobile-network show",
)
class Show(AAZCommand):
"""Get information about the specified mobile network.
:example: Show mobile-network
az mobile-network show -n mobile-network-name -g rg
"""
_aaz_info = {
"version": "2022-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.mobilenetwork/mobilenetworks/{}", "2022-11-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.mobile_network_name = AAZStrArg(
options=["-n", "--name", "--mobile-network-name"],
help="The name of the mobile network.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^[a-zA-Z0-9][a-zA-Z0-9_-]*$",
max_length=64,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.MobileNetworksGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class MobileNetworksGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"mobileNetworkName", self.ctx.args.mobile_network_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self.METHOD_NAME
)
_schema_on_200 = None
@classmethod
def METHOD_NAME(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_land_mobile_network_identifier = AAZObjectType(
serialized_name="publicLandMobileNetworkIdentifier",
flags={"required": True},
)
properties.service_key = AAZStrType(
serialized_name="serviceKey",
flags={"read_only": True},
)
public_land_mobile_network_identifier = cls._schema_on_200.properties.public_land_mobile_network_identifier
public_land_mobile_network_identifier.mcc = AAZStrType(
flags={"required": True},
)
public_land_mobile_network_identifier.mnc = AAZStrType(
flags={"required": True},
)
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
3,049 | tear down class | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from unittest.mock import MagicMock
from knack.util import CLIError
from azure.cli.command_modules.resource.custom import (_ResourceUtils, _validate_resource_inputs,
parse_resource_id)
class TestApiCheck(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def METHOD_NAME(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_parse_resource(self):
parts = parse_resource_id('/subscriptions/00000/resourcegroups/bocconitestlabrg138089/'
'providers/microsoft.devtestlab/labs/bocconitestlab/'
'virtualmachines/tasktest1')
self.assertIsNotNone(parts.get('type'))
def test_parse_resource_capital(self):
parts = parse_resource_id('/subscriptions/00000/resourceGroups/bocconitestlabrg138089/'
'providers/microsoft.devtestlab/labs/bocconitestlab/'
'virtualmachines/tasktest1')
self.assertIsNotNone(parts.get('type'))
def test_validate_resource_inputs(self):
self.assertRaises(CLIError, _validate_resource_inputs, None, None, None, None)
self.assertRaises(CLIError, _validate_resource_inputs, 'a', None, None, None)
self.assertRaises(CLIError, _validate_resource_inputs, 'a', 'b', None, None)
self.assertRaises(CLIError, _validate_resource_inputs, 'a', 'b', 'c', None)
_validate_resource_inputs('a', 'b', 'c', 'd')
def test_resolve_api_provider_backup(self):
# Verifies provider is used as backup if api-version not specified.
from azure.cli.core.mock import DummyCli
cli = DummyCli()
rcf = self._get_mock_client()
res_utils = _ResourceUtils(cli, resource_type='Mock/test', resource_name='vnet1',
resource_group_name='rg', rcf=rcf)
self.assertEqual(res_utils.api_version, "2016-01-01")
def test_resolve_api_provider_with_parent_backup(self):
# Verifies provider (with parent) is used as backup if api-version not specified.
from azure.cli.core.mock import DummyCli
cli = DummyCli()
rcf = self._get_mock_client()
res_utils = _ResourceUtils(cli, parent_resource_path='foo/testfoo123', resource_group_name='rg',
resource_provider_namespace='Mock', resource_type='test',
resource_name='vnet1',
rcf=rcf)
self.assertEqual(res_utils.api_version, "1999-01-01")
def test_resolve_api_all_previews(self):
# Verifies most recent preview version returned only if there are no non-preview versions.
from azure.cli.core.mock import DummyCli
cli = DummyCli()
rcf = self._get_mock_client()
res_utils = _ResourceUtils(cli, resource_type='Mock/preview', resource_name='vnet1',
resource_group_name='rg', rcf=rcf)
self.assertEqual(res_utils.api_version, "2005-01-01-preview")
def _get_mock_client(self):
client = MagicMock()
provider = MagicMock()
provider.resource_types = [
self._get_mock_resource_type('skip', ['2000-01-01-preview', '2000-01-01']),
self._get_mock_resource_type('test', ['2016-01-01-preview', '2016-01-01']),
self._get_mock_resource_type('foo', ['1999-01-01-preview', '1999-01-01']),
self._get_mock_resource_type('preview', ['2005-01-01-preview', '2004-01-01-preview'])
]
client.providers.get.return_value = provider
return client
def _get_mock_resource_type(self, name, api_versions): # pylint: disable=no-self-use
rt = MagicMock()
rt.resource_type = name
rt.api_versions = api_versions
return rt
if __name__ == '__main__':
unittest.main() |
3,050 | nudge | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Årets Spil 2014 - klappet og klart til julehandlen.
# Prik til concieggs mellem en og fem gange! -
# Men vær forsigtig. Pludselig eksploderer concieggs!
# Vinderen er den, som har prikket mest til concieggs, uden at udløse et raserianfald!
# SJOVT FOR ALLE MENNESKER I HELE VERDEN!
import subprocess
import sys
import os, time
from functools import partial
import itertools
import random
import re
import json
import operator
#ordbogens design: {'forrigePrikker': <navn> , 'prikTilbage': <tal>, 'pointtavle': {navn:point}}
currentNames = ["",""]
def getDict():
oldDict = subprocess.check_output(["dbRead", "prikkeleg"])
Dict = json.loads(oldDict.decode('utf-8'))
return Dict
def setDict(Dict):
subprocess.call(["echo \'" + json.dumps(Dict, ensure_ascii=False) + "\' | dbWrite prikkeleg"], shell=True)
def currentUser(rootalias=True):
global currentNames
if currentNames == ["",""]:
raw = os.getenv("EGGS_USER")
(bool, rootAlias) = verifyAndGetRootAlias(raw)
if bool:
currentNames = [re.sub(" [\.;,\'\"] ", "", raw), re.sub(" [\.;,\'\"] ", "", rootAlias)]
return alias(currentNames, rootalias)
else:
print ("Beklager, %s, men jeg kan ikke lade alle og enhver røre sådan ved mig." % re.sub(" [\.;,\'\"] ", "", raw))
sys.exit(0)
else:
return alias(currentNames, rootalias)
def alias(liste, bool):
if bool: return liste[1]
else: return liste[0]
def run_main(args):
antalPrik = parseArgs(args)
try:
Dict = getDict()
except:
nytSpil()
sys.exit(0)
if Dict['forrigePrikker'] == currentUser():
print("%s! %s, du prikkede mig også sidst. Dit træk er ugyldigt!" % (svin(), currentUser(False)))
subprocess.call("putInBadStanding " + currentUser(), shell=True)
sys.exit(0)
sys.stdout.flush()
time.sleep(1)
status = Dict['prikTilbage'] - antalPrik
Dict['forrigePrikker'] = currentUser()
if status <= 0:
slutsekvens(currentUser(), Dict)
Dict['prikTilbage'] = status
try:
Dict['pointtavle'][currentUser()] += antalPrik
except KeyError:
Dict['pointtavle'][currentUser()] = antalPrik
setDict(Dict)
hvemFoerer(Dict['pointtavle'], False)
def slutsekvens(taber, dict):
try:
dict['pointtavle'].pop(taber)
except KeyError:
pass
print("%s, %s! Du prikkede det forkerte sted på det forkerte tidspunkt, og har dermed tabt!" % (svin(), randomAlias(taber)))
sys.stdout.flush()
time.sleep(1)
hvemFoerer(dict['pointtavle'], True)
def randomAlias(name):
output = subprocess.check_output("randomName " + name, shell=True).decode().strip('\n')
return output
def hvemFoerer(dictionary, Vindersekvens=False):
sorted_dict = sorted(dictionary.items(), key=operator.itemgetter(1))
if sorted_dict == []:
sorted_dict = [('concieggs', 'concieggs')]
vinder = randomAlias(sorted_dict[-1][0])
taber = randomAlias(sorted_dict[0][0])
if Vindersekvens:
print ("%s %s! Du har lige vundet i prikkeleg!! %s er den nye Konge af IRC!" % (congrats() , vinder, vinder))
print("Her er din cutscene!")
sys.stdout.flush()
subprocess.call(["runcmd heldigvideo"], shell=True)
robespierre = subprocess.call(["dbRead", "robespierre"])
if robespierre == 0: # 0 er succes, så det betyder den kan læses.
print ("Åh vent, %s kan alligevel ikke blive konge. Øv bøv." % (vinder))
else:
subprocess.call(["makeKing \'" + vinder + "\'"], shell=True)
nytSpil()
if robespierre != 0:
subprocess.call(["runcmd topic \"" + vinder + " er kongen\""], shell=True)
sys.exit(0)
else:
print("%s er i spidsen - %s, %s! Du kan sagtens nå det!" % (vinder, heppe(), taber))
def nytSpil(M=15, N=30):
dict = {'nudge': 'false', 'forrigePrikker': 'concieggs' , 'prikTilbage' : random.randint(M, N), 'pointtavle':{}}
subprocess.call(["echo \'" + json.dumps(dict, ensure_ascii=False) + "\' | dbWrite prikkeleg"], shell=True)
def forfra(argument):
mOgN = re.findall("[0-9]+", argument)
try:
assert len(mOgN) == 2
M = int(mOgN[0])
N = int(mOgN[1])
if abs(M - N) < 9:
print ("Hey allesammen! %s prøver på at starte en underlødig prikkeleg!" % (currentUser(False)))
print ("Der var faktisk kun " + str(abs(M-N)) + " priks forskel på minimum og maximum, i det han prøvede på at starte. Er det ikke frækt?")
subprocess.call(["runcmd topic \"" + currentUser(False) + " prikker falskt. Fy for skam.\""], shell=True)
subprocess.call("putInBadStanding " + currentUser(), shell=True)
if M < N:
nytSpil(M,N)
if M > N:
nytSpil(N, M)
except:
print ("Brug korrekt syntaks: <tal>:<tal>")
def verifyAndGetRootAlias(username):
try:
output = subprocess.check_output("aliases " + username, shell=True).decode()
return (True, output.split('\n')[0])
except:
return (False, "")
def METHOD_NAME():
Dict = getDict()
if Dict['nudge'] == "false":
Dict['nudge'] = "true"
Dict['prikTilbage'] += random.randint(3, 23)
setDict(Dict)
print ("Tak, %s. Nu har jeg det faktisk bedre. :-)" % (currentUser(False)))
else:
print ("Beklager, %s... " % (currentUser(False)))
def svin():
grats = [
"Så så man lige dig, hva'",
"Pak sammen",
"Lortesvin",
"Du gør mig bare trist",
"DNUR",
"FØJ",
"Dit skodøje",
"Røvlort",
"Dit fuckhoved",
"Hvad bilder du dig ind?",
"Din bøf"
]
return grats[random.randint(0, len(grats)-1)]
def congrats():
grats = [
"Tillykke",
"Novra",
"Vildt,",
"Sikke noget,",
"Godt gået,",
"Tjullahop,",
"Super fedest,"
]
return grats[random.randint(0, len(grats)-1)]
def heppe():
grats = [
"Heja",
"Fremad",
"Kom igen",
"Op med modet",
"Frem med humøret",
"Nu gælder det",
"Nu handler det om at se fremad",
"Bare rolig"
]
return grats[random.randint(0, len(grats)-1)]
def help():
print (""" Årets Spil 2014 - klappet og klart til julehandlen.
Prik til din concieggs mellem en og fem gange! -
Men vær forsigtig. Pludselig eksploderer din concieggs!
Vinderen er den, som har prikket mest til sin concieggs, uden at udløse et raserianfald!
SJOVT FOR ALLE MENNESKER I HELE VERDEN!\n\n\n
-forfra M:N starter prikkelegen forfra, med en prikketolerance på mellem M og N prik.
-trøst trøster din concieggs, så din concieggs får det lidt bedre igen.
-smugkig gør ikke noget? """)
def parseArgs(args):
if len(args) == 1:
help()
sys.exit(0)
if args[1] == "-hjælp":
help()
sys.exit()
if args[1] == "-forfra":
forfra(args[2])
sys.exit(0)
if args[1] == "-trøst":
METHOD_NAME()
sys.exit(0)
if args[1] == "-smugkig":
sys.exit(0)
if args[1] == "-hvem":
dict = getDict()
forrige = dict['forrigePrikker']
print ("Nej, det var %s som prikkede sidst :P" % subprocess.check_output("randomName " + forrige, shell=True).decode().split("\n")[0])
sys.exit(0)
antalPrik = 0
for arg in args:
if arg.lower() in ["prikke", "prik"]:
antalPrik += 1
if not (antalPrik < 6 and antalPrik > 0):
print("Prik mellem én og fem gange, eller kør prikkeleg -hjælp.")
sys.exit(0)
return antalPrik
if __name__ == '__main__':
run_main(sys.argv) |
3,051 | get booted deployment id | import os.path
import DataFormats
import SSHLibrary
from robot.libraries.BuiltIn import BuiltIn
_log = BuiltIn().log
DATA_DIR = "/var/lib/microshift"
VERSION_FILE = f"{DATA_DIR}/version"
BACKUP_STORAGE = "/var/lib/microshift-backups"
HEALTH_FILE = f"{BACKUP_STORAGE}/health.json"
def remote_sudo_rc(cmd: str) -> tuple[str, int]:
ssh = BuiltIn().get_library_instance("SSHLibrary")
stdout, stderr, rc = SSHLibrary.SSHLibrary.execute_command(
ssh, command=cmd, sudo=True, return_stderr=True, return_rc=True
)
BuiltIn().log(f"stdout:\n{stdout}")
BuiltIn().log(f"stderr:\n{stderr}")
BuiltIn().log(f"rc: {rc}")
return stdout, rc
def remote_sudo(cmd: str) -> str:
stdout, rc = remote_sudo_rc(cmd)
BuiltIn().should_be_equal_as_integers(rc, 0)
return stdout
def METHOD_NAME() -> str:
"""
Get ID of currently booted deployment
"""
stdout = remote_sudo("rpm-ostree status --booted --json")
return DataFormats.json_parse(stdout)["deployments"][0]["id"]
def get_staged_deployment_id() -> str:
"""
Get ID of a staged deployment
"""
stdout = remote_sudo("rpm-ostree status --json")
deploy = DataFormats.json_parse(stdout)["deployments"][0]
BuiltIn().should_be_true(deploy["staged"])
return deploy["id"]
def get_deployment_backup_prefix_path(deploy_id: str) -> str:
"""
Get backup path prefix for current deployment
Prefix path is BACKUP_STORAGE/{id}.
Globbing directories starting with the prefix will yield
list of backups for the deployment.
"""
BuiltIn().should_not_be_empty(BACKUP_STORAGE)
BuiltIn().should_not_be_empty(deploy_id)
return os.path.join(BACKUP_STORAGE, deploy_id)
def create_fake_backups(count: int, type_unknown: bool = False) -> None:
"""
Create number of fake Backup directories.
Unknown types are directories that do not match automated backups naming
convention of 'deploymentID_bootID' which can be described more
specifically by (osname)-(64 chars).(int)_(32 chars).
Such backups should not be automatically pruned by MicroShift.
"""
deploy_id = METHOD_NAME()
prefix_path = (
f"{get_deployment_backup_prefix_path(deploy_id)}_fake000000000000000000000000"
)
if type_unknown:
prefix_path = os.path.join(BACKUP_STORAGE, "unknown_")
for number in range(0, count):
remote_sudo(f"mkdir -p {prefix_path}{number:0>4}")
def remove_backups_for_deployment(deploy_id: str) -> None:
"""Remove any existing backup for specified deployment"""
prefix_path = get_deployment_backup_prefix_path(deploy_id)
remote_sudo(f"rm -rf {prefix_path}*")
def remove_backup_storage() -> None:
"""
Removes entire backup storage directory (/var/lib/microshift-backups)
which contains backups and health data
"""
remote_sudo(f"rm -rf {BACKUP_STORAGE}")
def get_current_ref() -> str:
"""
Get reference of current deployment
"""
ref = remote_sudo("rpm-ostree status --json | jq -r '.deployments[0].origin'")
_log(f"Current ref: {ref}")
# remote and ref are separated with colon
return ref.split(":")[1]
def get_persisted_system_health() -> str:
"""
Get system health information from health.json file
"""
return remote_sudo(f"jq -r '.health' {BACKUP_STORAGE}/health.json")
def rpm_ostree_rebase(ref: str) -> None:
"""
Rebase system to given OSTRee ref
"""
return remote_sudo(f"rpm-ostree rebase {ref}")
def rpm_ostree_rollback() -> None:
"""
Rollback system
"""
return remote_sudo("rpm-ostree rollback")
def rebase_system(ref: str) -> str:
"""
Rebase system to given OSTRee ref and return its deployment ID
"""
rpm_ostree_rebase(ref)
return get_staged_deployment_id()
def get_current_boot_id() -> str:
boot_id = remote_sudo("cat /proc/sys/kernel/random/boot_id")
return boot_id.replace("-", "")
def does_backup_exist(deploy_id: str, boot_id: str = "") -> bool:
prefix = get_deployment_backup_prefix_path(deploy_id)
if boot_id != "":
path = f"{prefix}_{boot_id}"
else:
path = f"{prefix}"
return path_exists(path)
def path_exists(path: str) -> bool:
out, rc = remote_sudo_rc(f"test -e {path}")
return rc == 0
def path_should_exist(path: str) -> None:
BuiltIn().should_be_true(path_exists(path))
def path_should_not_exist(path: str) -> None:
BuiltIn().should_not_be_true(path_exists(path))
def cleanup_rpm_ostree() -> None:
"""Removes any pending or rollback deployments leaving only currently booted"""
remote_sudo("rpm-ostree cleanup --pending --rollback")
def create_agent_config(cfg: str) -> None:
remote_sudo(f"echo '{cfg}' | sudo tee /var/lib/microshift-test-agent.json")
def write_greenboot_microshift_wait_timeout(seconds: int) -> None:
remote_sudo(
f"echo 'MICROSHIFT_WAIT_TIMEOUT_SEC={seconds}' | sudo tee /etc/greenboot/greenboot.conf"
)
def remove_greenboot_microshift_wait_timeout() -> None:
remote_sudo("rm /etc/greenboot/greenboot.conf")
def no_transaction_in_progress() -> None:
stdout = remote_sudo("rpm-ostree status --json")
status = DataFormats.json_parse(stdout)
key = "transaction"
transaction_in_progress = key in status and status[key] is not None
BuiltIn().should_not_be_true(transaction_in_progress) |
3,052 | bool option | # Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
import sys
from optparse import *
class nodefault(object):
pass
class splitter(object):
def __init__(self, split):
self.split = split
def __call__(self, option, opt_str, value, parser):
values = value.split(self.split)
dest = getattr(parser.values, option.dest)
if dest is None:
setattr(parser.values, option.dest, values)
else:
dest.extend(values)
class OptionParser(dict):
def __init__(self, *args, **kwargs):
kwargs.setdefault("formatter", optparse.TitledHelpFormatter())
self._optparse = optparse.OptionParser(*args, **kwargs)
self._optparse.disable_interspersed_args()
self._allopts = {}
# current option group
self._group = self._optparse
def set_defaults(self, *args, **kwargs):
return self._optparse.set_defaults(*args, **kwargs)
def set_group(self, *args, **kwargs):
"""set the current option group"""
if not args and not kwargs:
self._group = self._optparse
else:
self._group = self._optparse.add_option_group(*args, **kwargs)
def add_option(self, *args, **kwargs):
"""add an option to the current option group, or global none set"""
# if action=split, but allows the option arguments
# themselves to be lists separated by the split variable'''
if kwargs.get("action", None) == "append" and "split" in kwargs:
split = kwargs.pop("split")
kwargs["default"] = []
kwargs["type"] = "string"
kwargs["action"] = "callback"
kwargs["callback"] = splitter(split)
option = self._group.add_option(*args, **kwargs)
dest = option.dest
if dest not in self._allopts:
self._allopts[dest] = option
return option
def METHOD_NAME(self, name, default, help):
"""add a boolean option called --name and --no-name.
Display help depending on which is the default"""
tname = f"--{name}"
fname = f"--no-{name}"
dest = name.replace("-", "_")
if default:
thelp = optparse.SUPPRESS_HELP
fhelp = help
else:
thelp = help
fhelp = optparse.SUPPRESS_HELP
topt = self.add_option(
tname, action="store_true", default=default, help=thelp
)
fopt = self.add_option(
fname, action="store_false", dest=dest, help=fhelp
)
return topt, fopt
def __getattr__(self, attr):
if attr.startswith("_"):
return super().__getattribute__(attr)
if attr in self:
return self[attr]
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
if attr.startswith("_"):
super().__setattr__(attr, value)
elif attr in self._allopts:
defaults = {attr: value}
self.set_defaults(**defaults)
if attr in self:
self[attr] = value
else:
super().__setattr__(attr, value)
def parse_args(self):
opts, args = self._optparse.parse_args()
for key, val in opts.__dict__.items():
if val is not None or key not in self:
self[key] = val
return args
def usage(self, exitcode=None):
self._optparse.print_help()
if exitcode is not None:
sys.exit(exitcode) |
3,053 | shrink on resize | from rubicon.objc import ObjCClass
from toga_iOS.libs import UIApplication
from ..probe import BaseProbe
from .properties import toga_color
# From UIControl.h
UIControlEventTouchDown = 1 << 0
UIControlEventTouchDownRepeat = 1 << 1
UIControlEventTouchDragInside = 1 << 2
UIControlEventTouchDragOutside = 1 << 3
UIControlEventTouchDragEnter = 1 << 4
UIControlEventTouchDragExit = 1 << 5
UIControlEventTouchUpInside = 1 << 6
UIControlEventTouchUpOutside = 1 << 7
UIControlEventTouchCancel = 1 << 8
UIControlEventValueChanged = 1 << 12 # sliders, etc.
UIControlEventPrimaryActionTriggered = 1 << 13 # semantic action: for buttons, etc.
UIControlEventMenuActionTriggered = (
1 << 14
) # triggered when the menu gesture fires but before the menu presents
UIControlEventEditingDidBegin = 1 << 16 # UITextField
UIControlEventEditingChanged = 1 << 17
UIControlEventEditingDidEnd = 1 << 18
UIControlEventEditingDidEndOnExit = 1 << 19 # 'return key' ending editing
UIControlEventAllTouchEvents = 0x00000FFF # for touch events
UIControlEventAllEditingEvents = 0x000F0000 # for UITextField
UIControlEventApplicationReserved = 0x0F000000 # range available for application use
UIControlEventSystemReserved = 0xF0000000 # range reserved for internal framework use
UIControlEventAllEvents = 0xFFFFFFFF
CATransaction = ObjCClass("CATransaction")
class SimpleProbe(BaseProbe):
def __init__(self, widget):
super().__init__()
self.app = widget.app
self.widget = widget
self.impl = widget._impl
self.native = widget._impl.native
assert isinstance(self.native, self.native_class)
def assert_container(self, container):
assert container._impl.container == self.impl.container
container_native = container._impl.container.native
for control in container_native.subviews():
if control == self.native:
break
else:
raise ValueError(f"cannot find {self.native} in {container_native}")
def assert_not_contained(self):
assert self.widget._impl.container is None
assert self.native.superview() is None
def assert_alignment(self, expected):
assert self.alignment == expected
async def redraw(self, message=None, delay=None):
"""Request a redraw of the app, waiting until that redraw has completed."""
# Force a widget repaint
self.widget.window.content._impl.native.layer.displayIfNeeded()
# Flush CoreAnimation; this ensures all animations are complete
# and all constraints have been evaluated.
CATransaction.flush()
await super().redraw(message=message, delay=delay)
@property
def enabled(self):
return self.native.isEnabled()
@property
def hidden(self):
return self.native.hidden
@property
def width(self):
return self.native.frame.size.width
@property
def height(self):
height = self.native.frame.size.height
# If the widget is the top level container, the frame height will
# include the allocation for the app titlebar.
if self.impl.container is None:
height = height - self.impl.viewport.top_offset
return height
@property
def METHOD_NAME(self):
return True
def assert_layout(self, size, position):
# Widget is contained and in a window.
assert self.widget._impl.container is not None
assert self.native.superview() is not None
# size and position is as expected.
assert (self.native.frame.size.width, self.native.frame.size.height) == size
# Allow for the status bar and navigation bar in vertical position
statusbar_frame = UIApplication.sharedApplication.statusBarFrame
nav_controller = self.widget.window._impl.native.rootViewController
navbar_frame = nav_controller.navigationBar.frame
offset = statusbar_frame.size.height + navbar_frame.size.height
assert (
self.native.frame.origin.x,
self.native.frame.origin.y - offset,
) == position
def assert_width(self, min_width, max_width):
assert (
min_width <= self.width <= max_width
), f"Width ({self.width}) not in range ({min_width}, {max_width})"
def assert_height(self, min_height, max_height):
assert (
min_height <= self.height <= max_height
), f"Height ({self.height}) not in range ({min_height}, {max_height})"
@property
def background_color(self):
return toga_color(self.native.backgroundColor)
async def press(self):
self.native.sendActionsForControlEvents(UIControlEventTouchDown)
@property
def is_hidden(self):
return self.native.isHidden()
@property
def has_focus(self):
return self.native.isFirstResponder
def type_return(self):
self.native.insertText("\n")
def _prevalidate_input(self, char):
return True
async def type_character(self, char):
if char == "<esc>":
# There's no analog of esc on iOS
pass
elif char == "\n":
self.type_return()
else:
# Perform any prevalidation that is required. If the input isn't
# valid, do a dummy "empty" insertion.
valid = self._prevalidate_input(char)
if valid:
self.native.insertText(char)
else:
self.native.insertText("") |
3,054 | test bad path | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for session_bundle.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import shutil
import numpy as np
from tensorflow.contrib.session_bundle import constants
from tensorflow.contrib.session_bundle import manifest_pb2
from tensorflow.contrib.session_bundle import session_bundle
from tensorflow.core.example.example_pb2 import Example
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.parsing_ops # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.util import compat
SAVED_MODEL_PATH = (
"python/saved_model/example/saved_model_half_plus_two/00000123")
SESSION_BUNDLE_PATH = "contrib/session_bundle/testdata/half_plus_two/00000123"
def _make_serialized_example(x):
example = Example()
example.features.feature["x"].float_list.value.append(x)
return example.SerializeToString()
class SessionBundleLoadTest(test.TestCase):
def _checkRegressionSignature(self, signatures, sess):
default_signature = signatures.default_signature
input_name = default_signature.regression_signature.input.tensor_name
output_name = default_signature.regression_signature.output.tensor_name
tf_example = [_make_serialized_example(x) for x in [0, 1, 2, 3]]
y = sess.run([output_name], {input_name: tf_example})
# The operation is y = 0.5 * x + 2
self.assertEqual(y[0][0], 2)
self.assertEqual(y[0][1], 2.5)
self.assertEqual(y[0][2], 3)
self.assertEqual(y[0][3], 3.5)
def _checkNamedSignatures(self, signatures, sess):
named_signatures = signatures.named_signatures
input_name = (named_signatures["inputs"].generic_signature.map["x"]
.tensor_name)
output_name = (named_signatures["outputs"].generic_signature.map["y"]
.tensor_name)
y = sess.run([output_name], {input_name: np.array([[0], [1], [2], [3]])})
# The operation is y = 0.5 * x + 2
self.assertEqual(y[0][0], 2)
self.assertEqual(y[0][1], 2.5)
self.assertEqual(y[0][2], 3)
self.assertEqual(y[0][3], 3.5)
def testMaybeSessionBundleDir(self):
base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
self.assertTrue(session_bundle.maybe_session_bundle_dir(base_path))
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.assertFalse(session_bundle.maybe_session_bundle_dir(base_path))
base_path = "complete_garbage"
self.assertFalse(session_bundle.maybe_session_bundle_dir(base_path))
def testBasic(self):
base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
ops.reset_default_graph()
sess, meta_graph_def = session_bundle.load_session_bundle_from_path(
base_path,
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2}))
self.assertTrue(sess)
asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
with sess.as_default():
path1, path2 = sess.run(["filename1:0", "filename2:0"])
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)
collection_def = meta_graph_def.collection_def
signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
self._checkRegressionSignature(signatures, sess)
self._checkNamedSignatures(signatures, sess)
def METHOD_NAME(self):
base_path = test.test_src_dir_path("/no/such/a/dir")
ops.reset_default_graph()
with self.assertRaises(RuntimeError) as cm:
_, _ = session_bundle.load_session_bundle_from_path(
base_path,
target="local",
config=config_pb2.ConfigProto(device_count={"CPU": 2}))
self.assertTrue("Expected meta graph file missing" in str(cm.exception))
def testVarCheckpointV2(self):
base_path = test.test_src_dir_path(
"contrib/session_bundle/testdata/half_plus_two_ckpt_v2/00000123")
ops.reset_default_graph()
sess, meta_graph_def = session_bundle.load_session_bundle_from_path(
base_path,
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2}))
self.assertTrue(sess)
asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
with sess.as_default():
path1, path2 = sess.run(["filename1:0", "filename2:0"])
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)
collection_def = meta_graph_def.collection_def
signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
self._checkRegressionSignature(signatures, sess)
self._checkNamedSignatures(signatures, sess)
class SessionBundleLoadNoVarsTest(test.TestCase):
"""Test the case where there are no variables in the graph."""
def setUp(self):
self.base_path = os.path.join(test.get_temp_dir(), "no_vars")
if not os.path.exists(self.base_path):
os.mkdir(self.base_path)
# Create a simple graph with a variable, then convert variables to
# constants and export the graph.
with ops.Graph().as_default() as g:
x = array_ops.placeholder(dtypes.float32, name="x")
w = variables.Variable(3.0)
y = math_ops.subtract(w * x, 7.0, name="y") # pylint: disable=unused-variable
ops.add_to_collection("meta", "this is meta")
with self.session(graph=g) as session:
variables.global_variables_initializer().run()
new_graph_def = graph_util.convert_variables_to_constants(
session, g.as_graph_def(), ["y"])
filename = os.path.join(self.base_path, constants.META_GRAPH_DEF_FILENAME)
saver.export_meta_graph(
filename, graph_def=new_graph_def, collection_list=["meta"])
def tearDown(self):
shutil.rmtree(self.base_path)
def testGraphWithoutVarsLoadsCorrectly(self):
session, _ = session_bundle.load_session_bundle_from_path(self.base_path)
got = session.run(["y:0"], {"x:0": 5.0})[0]
self.assertEquals(got, 5.0 * 3.0 - 7.0)
self.assertEquals(ops.get_collection("meta"), [b"this is meta"])
if __name__ == "__main__":
test.main() |
3,055 | test04 custom converter | import py, os, sys
from pytest import raises, skip
from .support import ispypy
class TestAPI:
def setup_class(cls):
if ispypy:
skip('C++ API only available on CPython')
import cppyy
cppyy.include('CPyCppyy/API.h')
def test01_type_checking(self):
"""Python class type checks"""
import cppyy
cpp = cppyy.gbl
API = cpp.CPyCppyy
cppyy.cppdef("""
class APICheck {
public:
void some_method() {}
};""")
assert API.Scope_Check(cpp.APICheck)
assert not API.Scope_CheckExact(cpp.APICheck)
a = cpp.APICheck()
assert API.Instance_Check(a)
assert not API.Instance_CheckExact(a)
m = a.some_method
assert API.Overload_Check(m)
assert API.Overload_CheckExact(m)
def test02_interpreter_access(self):
"""Access to the python interpreter"""
import cppyy
API = cppyy.gbl.CPyCppyy
assert API.Exec('import sys')
def test03_instance_conversion(self):
"""Proxy object conversions"""
import cppyy
cpp = cppyy.gbl
API = cpp.CPyCppyy
cppyy.cppdef("""
class APICheck2 {
public:
virtual ~APICheck2() {}
};""")
m = cpp.APICheck2()
voidp = API.Instance_AsVoidPtr(m)
m2 = API.Instance_FromVoidPtr(voidp, 'APICheck2')
assert m is m2
def METHOD_NAME(self):
"""Custom type converter"""
import cppyy
cppyy.cppdef("""
#include "CPyCppyy/API.h"
class APICheck3 {
int fFlags;
public:
APICheck3() : fFlags(0) {}
virtual ~APICheck3() {}
void setSetArgCalled() { fFlags |= 0x01; }
bool wasSetArgCalled() { return fFlags & 0x01; }
void setFromMemoryCalled() { fFlags |= 0x02; }
bool wasFromMemoryCalled() { return fFlags & 0x02; }
void setToMemoryCalled() { fFlags |= 0x04; }
bool wasToMemoryCalled() { return fFlags & 0x04; }
};
class APICheck3Converter : public CPyCppyy::Converter {
public:
virtual bool SetArg(PyObject* pyobject, CPyCppyy::Parameter& para, CPyCppyy::CallContext* = nullptr) {
APICheck3* a3 = (APICheck3*)CPyCppyy::Instance_AsVoidPtr(pyobject);
a3->setSetArgCalled();
para.fValue.fVoidp = a3;
para.fTypeCode = 'V';
return true;
}
virtual PyObject* FromMemory(void* address) {
APICheck3* a3 = (APICheck3*)address;
a3->setFromMemoryCalled();
return CPyCppyy::Instance_FromVoidPtr(a3, "APICheck3");
}
virtual bool ToMemory(PyObject* value, void* address) {
APICheck3* a3 = (APICheck3*)address;
a3->setToMemoryCalled();
*a3 = *(APICheck3*)CPyCppyy::Instance_AsVoidPtr(value);
return true;
}
};
typedef CPyCppyy::ConverterFactory_t cf_t;
void register_a3() {
CPyCppyy::RegisterConverter("APICheck3", (cf_t)+[](CPyCppyy::cdims_t) { static APICheck3Converter c{}; return &c; });
CPyCppyy::RegisterConverter("APICheck3&", (cf_t)+[](CPyCppyy::cdims_t) { static APICheck3Converter c{}; return &c; });
}
void unregister_a3() {
CPyCppyy::UnregisterConverter("APICheck3");
CPyCppyy::UnregisterConverter("APICheck3&");
}
APICheck3 gA3a, gA3b;
void CallWithAPICheck3(APICheck3&) {}
""")
cppyy.gbl.register_a3()
gA3a = cppyy.gbl.gA3a
assert gA3a
assert type(gA3a) == cppyy.gbl.APICheck3
assert gA3a.wasFromMemoryCalled()
assert not gA3a.wasSetArgCalled()
cppyy.gbl.CallWithAPICheck3(gA3a)
assert gA3a.wasSetArgCalled()
cppyy.gbl.unregister_a3()
gA3b = cppyy.gbl.gA3b
assert gA3b
assert type(gA3b) == cppyy.gbl.APICheck3
assert not gA3b.wasFromMemoryCalled()
def test05_custom_executor(self):
"""Custom type executor"""
import cppyy
cppyy.cppdef("""
#include "CPyCppyy/API.h"
class APICheck4 {
int fFlags;
public:
APICheck4() : fFlags(0) {}
virtual ~APICheck4() {}
void setExecutorCalled() { fFlags |= 0x01; }
bool wasExecutorCalled() { return fFlags & 0x01; }
};
class APICheck4Executor : public CPyCppyy::Executor {
public:
virtual PyObject* Execute(Cppyy::TCppMethod_t meth, Cppyy::TCppObject_t obj, CPyCppyy::CallContext* ctxt) {
APICheck4* a4 = (APICheck4*)CPyCppyy::CallVoidP(meth, obj, ctxt);
a4->setExecutorCalled();
return CPyCppyy::Instance_FromVoidPtr(a4, "APICheck4", true);
}
};
typedef CPyCppyy::ExecutorFactory_t ef_t;
void register_a4() {
CPyCppyy::RegisterExecutor("APICheck4*", (ef_t)+[](CPyCppyy::cdims_t) { static APICheck4Executor c{}; return &c; });
}
void unregister_a4() {
CPyCppyy::UnregisterExecutor("APICheck4*");
}
APICheck4* CreateAPICheck4() { return new APICheck4{}; }
APICheck4* CreateAPICheck4b() { return new APICheck4{}; }
""")
cppyy.gbl.register_a4()
a4 = cppyy.gbl.CreateAPICheck4()
assert a4
assert type(a4) == cppyy.gbl.APICheck4
assert a4.wasExecutorCalled();
del a4
cppyy.gbl.unregister_a4()
a4 = cppyy.gbl.CreateAPICheck4b()
assert a4
assert type(a4) == cppyy.gbl.APICheck4
assert not a4.wasExecutorCalled(); |
3,056 | interpolate extend to | import bottleneck
import numpy as np
from Orange.data import Table, Domain
from Orange.data.util import SharedComputeValue
from scipy.interpolate import interp1d
from orangecontrib.spectroscopy.data import getx
def is_increasing(a):
return np.all(np.diff(a) >= 0)
class PreprocessException(Exception):
def message(self):
if self.args:
return self.args[0]
else:
return self.__class__.__name__
class MissingReferenceException(PreprocessException):
pass
class WrongReferenceException(PreprocessException):
pass
class SelectColumn(SharedComputeValue):
def __init__(self, feature, commonfn):
super().__init__(commonfn)
self.feature = feature
def compute(self, data, common):
return common[:, self.feature]
class CommonDomain:
"""A utility class that helps constructing common transformation for
SharedComputeValue features. It does the domain transformation
(input domain needs to be the same as it was with training data).
"""
def __init__(self, domain):
self.domain = domain
def __call__(self, data):
data = self.transform_domain(data)
return self.transformed(data)
def transform_domain(self, data):
if data.domain != self.domain:
data = data.from_table(self.domain, data)
return data
def transformed(self, data):
raise NotImplemented
class CommonDomainRef(CommonDomain):
"""CommonDomain which also ensures reference domain transformation"""
def __init__(self, reference, domain):
super().__init__(domain)
self.reference = reference
def METHOD_NAME(self, interpolate, wavenumbers):
"""
Interpolate data to given wavenumbers and extend the possibly
nan-edges with the nearest values.
"""
# interpolate reference to the given wavenumbers
X = interp1d_with_unknowns_numpy(getx(interpolate), interpolate.X, wavenumbers)
# we know that X is not NaN. same handling of reference as of X
X, _ = nan_extend_edges_and_interpolate(wavenumbers, X)
return X
class CommonDomainOrder(CommonDomain):
"""CommonDomain + it also handles wavenumber order.
"""
def __call__(self, data):
data = self.transform_domain(data)
# order X by wavenumbers
xs, xsind, mon, X = transform_to_sorted_features(data)
xc = X.shape[1]
# do the transformation
X = self.transformed(X, xs[xsind])
# restore order
return self._restore_order(X, mon, xsind, xc)
def _restore_order(self, X, mon, xsind, xc):
# restore order and leave additional columns as they are
restored = transform_back_to_features(xsind, mon, X[:, :xc])
return np.hstack((restored, X[:, xc:]))
def transformed(self, X, wavenumbers):
raise NotImplemented
class CommonDomainOrderUnknowns(CommonDomainOrder):
"""CommonDomainOrder + it also handles unknown values: it interpolates
values before computation and afterwards sets them back to unknown.
"""
def __call__(self, data):
data = self.transform_domain(data)
# order X by wavenumbers
xs, xsind, mon, X = transform_to_sorted_features(data)
xc = X.shape[1]
# interpolates unknowns
X, nans = nan_extend_edges_and_interpolate(xs[xsind], X)
# Replace remaining NaNs (where whole rows were NaN) with
# with some values so that the function does not crash.
# Results are going to be discarded later.
remaining_nans = np.isnan(X)
if np.any(remaining_nans): # if there were no nans X is a view, so do not modify
X[remaining_nans] = 1.
# do the transformation
X = self.transformed(X, xs[xsind])
# set NaNs where there were NaNs in the original array
if nans is not None:
# transformed can have additional columns
addc = X.shape[1] - xc
if addc:
nans = np.hstack((nans, np.zeros((X.shape[0], addc), dtype=bool)))
X[nans] = np.nan
# restore order
return self._restore_order(X, mon, xsind, xc)
def nan_extend_edges_and_interpolate(xs, X):
"""
Handle NaNs at the edges are handled as with savgol_filter mode nearest:
the edge values are interpolated. NaNs in the middle are interpolated
so that they do not propagate.
"""
nans = None
if bottleneck.anynan(X):
nans = np.isnan(X)
X = X.copy()
xs, xsind, mon, X = transform_to_sorted_wavenumbers(xs, X)
fill_edges(X)
X = interp1d_with_unknowns_numpy(xs[xsind], X, xs[xsind])
X = transform_back_to_features(xsind, mon, X)
return X, nans
def transform_to_sorted_features(data):
xs = getx(data)
return transform_to_sorted_wavenumbers(xs, data.X)
def transform_to_sorted_wavenumbers(xs, X):
xsind = np.argsort(xs)
mon = is_increasing(xsind)
X = X if mon else X[:, xsind]
return xs, xsind, mon, X
def transform_back_to_features(xsind, mon, X):
return X if mon else X[:, np.argsort(xsind)]
def fill_edges_1d(l):
"""Replace (inplace!) NaN at sides with the closest value"""
loc = np.where(~np.isnan(l))[0]
if len(loc):
fi, li = loc[[0, -1]]
l[:fi] = l[fi]
l[li + 1:] = l[li]
def fill_edges(mat):
"""Replace (inplace!) NaN at sides with the closest value"""
for l in mat:
fill_edges_1d(l)
def remove_whole_nan_ys(x, ys):
"""Remove whole NaN columns of ys with corresponding x coordinates."""
whole_nan_columns = bottleneck.allnan(ys, axis=0)
if np.any(whole_nan_columns):
x = x[~whole_nan_columns]
ys = ys[:, ~whole_nan_columns]
return x, ys
def interp1d_with_unknowns_numpy(x, ys, points, kind="linear"):
if kind != "linear":
raise NotImplementedError
out = np.zeros((len(ys), len(points)))*np.nan
sorti = np.argsort(x)
x = x[sorti]
for i, y in enumerate(ys):
y = y[sorti]
nan = np.isnan(y)
xt = x[~nan]
yt = y[~nan]
# do not interpolate unknowns at the edges
if len(xt): # check if all values are removed
out[i] = np.interp(points, xt, yt, left=np.nan, right=np.nan)
return out
def interp1d_with_unknowns_scipy(x, ys, points, kind="linear"):
out = np.zeros((len(ys), len(points)))*np.nan
sorti = np.argsort(x)
x = x[sorti]
for i, y in enumerate(ys):
y = y[sorti]
nan = np.isnan(y)
xt = x[~nan]
yt = y[~nan]
if len(xt): # check if all values are removed
out[i] = interp1d(xt, yt, fill_value=np.nan, assume_sorted=True,
bounds_error=False, kind=kind, copy=False)(points)
return out
def interp1d_wo_unknowns_scipy(x, ys, points, kind="linear"):
return interp1d(x, ys, fill_value=np.nan, kind=kind, bounds_error=False)(points)
def edge_baseline(x, y):
"""Baseline from edges. Assumes data without NaNs"""
return linear_baseline(x, y, zero_points=[x[0], x[-1]]) if len(x) else 0
def linear_baseline(x, y, zero_points=None):
if len(x) == 0:
return 0
values_zero_points = interp1d(x, y, axis=1, fill_value="extrapolate")(zero_points)
return interp1d(zero_points, values_zero_points, axis=1, fill_value="extrapolate")(x)
def replace_infs(array):
""" Replaces inf and -inf with nan.
This should be used anywhere a divide-by-zero can happen (/, np.log10, etc)"""
array[np.isinf(array)] = np.nan
return array
def replacex(data: Table, replacement: list):
assert len(data.domain.attributes) == len(replacement)
natts = [at.renamed(str(n)) for n, at in zip(replacement, data.domain.attributes)]
ndom = Domain(natts, data.domain.class_vars, data.domain.metas)
return data.transform(ndom) |
3,057 | todense impl | # Copyright 2021 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX primitives related to sparse operations.
This is experimental work to explore sparse support in JAX.
The primitives defined here are deliberately low-level: each primitive implements
a common sparse operation (sparse to dense, dense to sparse, sparse matrix/vector
product, sparse matrix/matrix product) for two common sparse representations
(CSR and COO).
These routines have reference implementations defined via XLA scatter/gather
operations that will work on any backend, although they are not particularly
performant. On GPU runtimes built against CUDA 11.0/ROCm 5.0 or newer, each operation is
computed efficiently via cusparse/hipsparse.
Further down are some examples of potential high-level wrappers for sparse objects.
(API should be considered unstable and subject to change).
"""
from functools import partial
import operator
from typing import Optional, Union
import jax
from jax import tree_util
from jax.experimental.sparse._base import JAXSparse
from jax.experimental.sparse.bcoo import BCOO
from jax.experimental.sparse.bcsr import BCSR
from jax.experimental.sparse.coo import COO
from jax.experimental.sparse.csr import CSR, CSC
from jax.experimental.sparse.util import _coo_extract
from jax.interpreters import mlir
from jax._src import core
from jax._src import dtypes
from jax._src.interpreters import ad
from jax._src.interpreters import batching
from jax._src.typing import Array, DTypeLike, Shape
#----------------------------------------------------------------------
# todense – function to convert sparse matrices to dense while letting
# dense matrices pass through.
todense_p = core.Primitive('todense')
todense_p.multiple_results = False
def todense(arr: Union[JAXSparse, Array]) -> Array:
"""Convert input to a dense matrix. If input is already dense, pass through."""
bufs, tree = tree_util.tree_flatten(arr)
return todense_p.bind(*bufs, tree=tree)
@todense_p.def_impl
def METHOD_NAME(*bufs, tree):
arr = tree_util.tree_unflatten(tree, bufs)
return arr.todense() if isinstance(arr, JAXSparse) else arr
@todense_p.def_abstract_eval
def _todense_abstract_eval(*bufs, tree):
arr = tree_util.tree_unflatten(tree, bufs)
if isinstance(arr, core.ShapedArray):
return arr
return core.ShapedArray(arr.shape, arr.dtype, weak_type=dtypes.is_weakly_typed(arr.data))
def _todense_jvp(primals, tangents, *, tree):
assert not isinstance(tangents[0], ad.Zero)
assert all(isinstance(t, ad.Zero) for t in tangents[1:])
primals_out = todense_p.bind(*primals, tree=tree)
tangents_out = todense_p.bind(tangents[0], *primals[1:], tree=tree)
return primals_out, tangents_out
def _todense_transpose(ct, *bufs, tree):
assert ad.is_undefined_primal(bufs[0])
assert not any(ad.is_undefined_primal(buf) for buf in bufs[1:])
standin = object()
obj = tree_util.tree_unflatten(tree, [standin] * len(bufs))
from jax.experimental.sparse import BCOO, BCSR
from jax.experimental.sparse.bcoo import _bcoo_extract
from jax.experimental.sparse.bcsr import bcsr_extract
if obj is standin:
return (ct,)
elif isinstance(obj, BCOO):
_, indices = bufs
return _bcoo_extract(indices, ct), indices
elif isinstance(obj, BCSR):
_, indices, indptr = bufs
return bcsr_extract(indices, indptr, ct), indices, indptr
elif isinstance(obj, COO):
_, row, col = bufs
return _coo_extract(row, col, ct), row, col
else:
raise NotImplementedError(f"todense_transpose for {type(obj)}")
def _todense_batching_rule(batched_args, batch_dims, *, tree):
return jax.vmap(partial(METHOD_NAME, tree=tree), batch_dims)(*batched_args), 0
ad.primitive_jvps[todense_p] = _todense_jvp
ad.primitive_transposes[todense_p] = _todense_transpose
batching.primitive_batchers[todense_p] = _todense_batching_rule
mlir.register_lowering(todense_p, mlir.lower_fun(
METHOD_NAME, multiple_results=False))
def empty(shape: Shape, dtype: Optional[DTypeLike]=None, index_dtype: DTypeLike = 'int32',
sparse_format: str = 'bcoo', **kwds) -> JAXSparse:
"""Create an empty sparse array.
Args:
shape: sequence of integers giving the array shape.
dtype: (optional) dtype of the array.
index_dtype: (optional) dtype of the index arrays.
format: string specifying the matrix format (e.g. ['bcoo']).
**kwds: additional keywords passed to the format-specific _empty constructor.
Returns:
mat: empty sparse matrix.
"""
formats = {'bcsr': BCSR, 'bcoo': BCOO, 'coo': COO, 'csr': CSR, 'csc': CSC}
if sparse_format not in formats:
raise ValueError(f"sparse_format={sparse_format!r} not recognized; "
f"must be one of {list(formats.keys())}")
cls = formats[sparse_format]
return cls._empty(shape, dtype=dtype, index_dtype=index_dtype, **kwds)
def eye(N: int, M: Optional[int] = None, k: int = 0, dtype: Optional[DTypeLike] = None,
index_dtype: DTypeLike = 'int32', sparse_format: str = 'bcoo', **kwds) -> JAXSparse:
"""Create 2D sparse identity matrix.
Args:
N: int. Number of rows in the output.
M: int, optional. Number of columns in the output. If None, defaults to `N`.
k: int, optional. Index of the diagonal: 0 (the default) refers to the main
diagonal, a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype: data-type, optional. Data-type of the returned array.
index_dtype: (optional) dtype of the index arrays.
format: string specifying the matrix format (e.g. ['bcoo']).
**kwds: additional keywords passed to the format-specific _empty constructor.
Returns:
I: two-dimensional sparse matrix with ones along the k-th diagonal.
"""
formats = {'bcoo': BCOO, 'coo': COO, 'csr': CSR, 'csc': CSC}
if M is None:
M = N
N = core.concrete_or_error(operator.index, N)
M = core.concrete_or_error(operator.index, M)
k = core.concrete_or_error(operator.index, k)
cls = formats[sparse_format]
return cls._eye(M=M, N=N, k=k, dtype=dtype, index_dtype=index_dtype, **kwds) |
3,058 | get lookup key | """Provides Registry mapping utility."""
from testplan.common.utils import logger
class Registry(logger.Loggable):
"""
A utility that provides a decorator (`@registry.bind`) for
mapping objects to another (decorated) class.
Supports absolute or category based
defaults via `@registry.bind_default` decorator as well.
Example:
>>> registry = Registry()
>>> class ClassA:
... pass
>>> # instances of ClassA are now bound to ClassB for this registry
>>> @registry.bind(ClassA)
>>> class ClassB:
... pass
>>> obj_a = ClassA()
>>> registry[obj_a] is ClassB
... True
"""
def __init__(self):
self.data = {}
self._default = None
self._category_defaults = {}
super(Registry, self).__init__()
@property
def default(self):
return self._default
@default.setter
def default(self, value):
if self._default is not None:
raise ValueError(
"Cannot re-bind default value. (Existing: {})".format(
self.default
)
)
self._default = value
def METHOD_NAME(self, obj):
"""
This method is used for generating the key when do a lookup
from the registry. Object class is used by default.
"""
return obj.__class__
def get_record_key(self, obj):
"""
This method is used for generating the key when we bind
an object (possibly a class) via the registry.
"""
return obj
def get_category(self, obj):
"""
Override this to define logic for generating
the category key from the object instance.
"""
try:
return getattr(obj, "category", obj["category"])
except KeyError:
# User has registered defaults for a category
# however category retrieval from object failed
# Need to fail explicitly and warn the user
if self._category_defaults:
raise NotImplementedError(
"Could not retrieve category information from: {}."
"You may need to override `get_category`"
"of the registry.".format(obj)
)
raise
def _get_default(self, obj):
try:
return self._category_defaults[self.get_category(obj)]
except KeyError:
if self._default:
return self._default
raise KeyError("No mapping found for: {}".format(obj))
def __getitem__(self, item):
try:
return self.data[self.METHOD_NAME(item)]
except KeyError:
return self._get_default(item)
def __setitem__(self, key, value):
key = self.get_record_key(key)
self.data[key] = value
def bind(self, *classes):
"""
Decorator for binding one or more classes to another.
:param classes: One or more classes that
will be bound to the decorated class.
"""
def wrapper(value):
for kls in classes:
self[kls] = value
return value
return wrapper
def bind_default(self, category=None):
"""
Decorator for binding a class as category based or absolute default.
:param category: (optional) If provided, the decorated class will
be the default for the given category, otherwise
it will be the absolute default.
"""
def wrapper(value):
if category:
if category in self._category_defaults:
raise ValueError(
"Cannot overwrite default value "
"for category: {}".format(category)
)
self._category_defaults[category] = value
else:
self.default = value
return value
return wrapper |
3,059 | from dict | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=R0801
from pcluster.api import util
from pcluster.api.models.base_model_ import Model
class AmiInfo(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, ami_id=None, os=None, name=None, version=None, architecture=None):
"""AmiInfo - a model defined in OpenAPI
:param ami_id: The ami_id of this AmiInfo.
:type ami_id: str
:param os: The os of this AmiInfo.
:type os: str
:param name: The name of this AmiInfo.
:type name: str
:param version: The version of this AmiInfo.
:type version: st
:param architecture: The architecture of this AmiInfo.
:type architecture: str
"""
self.openapi_types = {"ami_id": str, "os": str, "name": str, "version": str, "architecture": str}
self.attribute_map = {
"ami_id": "amiId",
"os": "os",
"name": "name",
"version": "version",
"architecture": "architecture",
}
self._ami_id = ami_id
self._os = os
self._name = name
self._version = version
self._architecture = architecture
@classmethod
def METHOD_NAME(cls, dikt) -> "AmiInfo":
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The AmiInfo of this AmiInfo.
:rtype: AmiInfo
"""
return util.deserialize_model(dikt, cls)
@property
def ami_id(self):
"""Gets the ami_id of this AmiInfo.
:return: The ami_id of this AmiInfo.
:rtype: str
"""
return self._ami_id
@ami_id.setter
def ami_id(self, ami_id):
"""Sets the ami_id of this AmiInfo.
:param ami_id: The ami_id of this AmiInfo.
:type ami_id: str
"""
if ami_id is None:
raise ValueError("Invalid value for `ami_id`, must not be `None`")
self._ami_id = ami_id
@property
def os(self):
"""Gets the os of this AmiInfo.
:return: The os of this AmiInfo.
:rtype: str
"""
return self._os
@os.setter
def os(self, os):
"""Sets the os of this AmiInfo.
:param os: The os of this AmiInfo.
:type os: str
"""
if os is None:
raise ValueError("Invalid value for `os`, must not be `None`")
self._os = os
@property
def name(self):
"""Gets the name of this AmiInfo.
:return: The name of this AmiInfo.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AmiInfo.
:param name: The name of this AmiInfo.
:type name: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def version(self):
"""Gets the version of this AmiInfo.
:return: The version of this AmiInfo.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this AmiInfo.
:param version: The version of this AmiInfo.
:type version: str
"""
if version is None:
raise ValueError("Invalid value for `version`, must not be `None`") # noqa: E501
self._version = version
@property
def architecture(self):
"""Gets the architecture of this AmiInfo.
:return: The architecture of this AmiInfo.
:rtype: str
"""
return self._architecture
@architecture.setter
def architecture(self, architecture):
"""Sets the architecture of this AmiInfo.
:param architecture: The architecture of this AmiInfo.
:type architecture: str
"""
if architecture is None:
raise ValueError("Invalid value for `architecture`, must not be `None`")
self._architecture = architecture |
3,060 | test start agent with no args | from unittest.mock import ANY
import prefect.cli.agent
from prefect import PrefectClient
from prefect.settings import PREFECT_AGENT_PREFETCH_SECONDS, temporary_settings
from prefect.testing.cli import invoke_and_assert
from prefect.testing.utilities import MagicMock
from prefect.utilities.asyncutils import run_sync_in_worker_thread
def METHOD_NAME():
invoke_and_assert(
command=["agent", "start"],
expected_output="No work queues provided!",
expected_code=1,
)
def test_start_agent_run_once():
invoke_and_assert(
command=["agent", "start", "--run-once", "-q", "test"],
expected_code=0,
expected_output_contains=["Agent started!", "Agent stopped!"],
)
async def test_start_agent_creates_work_queue(prefect_client: PrefectClient):
await run_sync_in_worker_thread(
invoke_and_assert,
command=["agent", "start", "--run-once", "-q", "test"],
expected_code=0,
expected_output_contains=["Agent stopped!", "Agent started!"],
)
queue = await prefect_client.read_work_queue_by_name("test")
assert queue
assert queue.name == "test"
def test_start_agent_with_work_queue_and_tags():
invoke_and_assert(
command=["agent", "start", "hello", "-t", "blue"],
expected_output_contains=(
"Only one of `work_queues`, `match`, or `tags` can be provided."
),
expected_code=1,
)
invoke_and_assert(
command=["agent", "start", "-q", "hello", "-t", "blue"],
expected_output_contains=(
"Only one of `work_queues`, `match`, or `tags` can be provided."
),
expected_code=1,
)
def test_start_agent_with_prefetch_seconds(monkeypatch):
mock_agent = MagicMock()
monkeypatch.setattr(prefect.cli.agent, "PrefectAgent", mock_agent)
invoke_and_assert(
command=[
"agent",
"start",
"--prefetch-seconds",
"30",
"-q",
"test",
"--run-once",
],
expected_code=0,
)
mock_agent.assert_called_once_with(
work_queues=["test"],
work_queue_prefix=ANY,
work_pool_name=None,
prefetch_seconds=30,
limit=None,
)
def test_start_agent_with_prefetch_seconds_from_setting_by_default(monkeypatch):
mock_agent = MagicMock()
monkeypatch.setattr(prefect.cli.agent, "PrefectAgent", mock_agent)
with temporary_settings({PREFECT_AGENT_PREFETCH_SECONDS: 100}):
invoke_and_assert(
command=[
"agent",
"start",
"-q",
"test",
"--run-once",
],
expected_code=0,
)
mock_agent.assert_called_once_with(
work_queues=ANY,
work_queue_prefix=ANY,
work_pool_name=None,
prefetch_seconds=100,
limit=None,
)
def test_start_agent_respects_work_queue_names(monkeypatch):
mock_agent = MagicMock()
monkeypatch.setattr(prefect.cli.agent, "PrefectAgent", mock_agent)
invoke_and_assert(
command=["agent", "start", "-q", "a", "-q", "b", "--run-once"],
expected_code=0,
)
mock_agent.assert_called_once_with(
work_queues=["a", "b"],
work_queue_prefix=[],
work_pool_name=None,
prefetch_seconds=ANY,
limit=None,
)
def test_start_agent_respects_work_queue_prefixes(monkeypatch):
mock_agent = MagicMock()
monkeypatch.setattr(prefect.cli.agent, "PrefectAgent", mock_agent)
invoke_and_assert(
command=["agent", "start", "-m", "a", "-m", "b", "--run-once"],
expected_code=0,
)
mock_agent.assert_called_once_with(
work_queues=[],
work_queue_prefix=["a", "b"],
work_pool_name=None,
prefetch_seconds=ANY,
limit=None,
)
def test_start_agent_respects_limit(monkeypatch):
mock_agent = MagicMock()
monkeypatch.setattr(prefect.cli.agent, "PrefectAgent", mock_agent)
invoke_and_assert(
command=["agent", "start", "--limit", "10", "--run-once", "-q", "test"],
expected_code=0,
)
mock_agent.assert_called_once_with(
work_queues=["test"],
work_queue_prefix=[],
work_pool_name=None,
prefetch_seconds=ANY,
limit=10,
)
def test_start_agent_respects_work_pool_name(monkeypatch):
mock_agent = MagicMock()
monkeypatch.setattr(prefect.cli.agent, "PrefectAgent", mock_agent)
invoke_and_assert(
command=["agent", "start", "--pool", "test-pool", "--run-once", "-q", "test"],
expected_code=0,
)
mock_agent.assert_called_once_with(
work_queues=["test"],
work_queue_prefix=[],
work_pool_name="test-pool",
prefetch_seconds=ANY,
limit=None,
)
def test_start_agent_with_work_queue_match_and_work_queue():
invoke_and_assert(
command=["agent", "start", "hello", "-m", "blue"],
expected_output_contains=(
"Only one of `work_queues`, `match`, or `tags` can be provided."
),
expected_code=1,
)
invoke_and_assert(
command=["agent", "start", "-q", "hello", "--match", "blue"],
expected_output_contains=(
"Only one of `work_queues`, `match`, or `tags` can be provided."
),
expected_code=1,
)
def test_start_agent_with_just_work_pool(monkeypatch):
mock_agent = MagicMock()
monkeypatch.setattr(prefect.cli.agent, "PrefectAgent", mock_agent)
invoke_and_assert(
command=["agent", "start", "--pool", "test-pool", "--run-once"],
expected_code=0,
)
mock_agent.assert_called_once_with(
work_queues=[],
work_queue_prefix=[],
work_pool_name="test-pool",
prefetch_seconds=ANY,
limit=None,
)
def test_start_agent_errors_with_work_pool_and_tags():
invoke_and_assert(
command=[
"agent",
"start",
"--pool",
"test-pool",
"--run-once",
"--tag",
"test",
],
expected_output_contains="`tag` and `pool` options cannot be used together.",
expected_code=1,
) |
3,061 | create acl policy | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.profiles import ResourceType
def METHOD_NAME(cmd, client, policy_name, start=None, expiry=None, permission=None, **kwargs):
"""Create a stored access policy on the containing object"""
t_access_policy = cmd.get_models('_models#AccessPolicy', resource_type=ResourceType.DATA_STORAGE_BLOB)
acl = _get_acl(cmd, client, **kwargs)
acl[policy_name] = t_access_policy(permission, expiry, start)
if hasattr(acl, 'public_access'):
kwargs['public_access'] = getattr(acl, 'public_access')
return _set_acl(cmd, client, acl, **kwargs)
def get_acl_policy(cmd, client, policy_name, **kwargs):
"""Show a stored access policy on a containing object"""
acl = _get_acl(cmd, client, **kwargs)
return acl.get(policy_name)
def list_acl_policies(cmd, client, **kwargs):
"""List stored access policies on a containing object"""
return _get_acl(cmd, client, **kwargs)
def set_acl_policy(cmd, client, policy_name, start=None, expiry=None, permission=None, **kwargs):
"""Set a stored access policy on a containing object"""
if not (start or expiry or permission):
from knack.util import CLIError
raise CLIError('Must specify at least one property when updating an access policy.')
acl = _get_acl(cmd, client, **kwargs)
try:
policy = acl[policy_name]
if policy is None:
t_access_policy = cmd.get_models('_models#AccessPolicy', resource_type=ResourceType.DATA_STORAGE_BLOB)
acl[policy_name] = t_access_policy(permission, expiry, start)
else:
policy.start = start if start else policy.start
policy.expiry = expiry if expiry else policy.expiry
policy.permission = permission or policy.permission
if hasattr(acl, 'public_access'):
kwargs['public_access'] = getattr(acl, 'public_access')
except KeyError:
from knack.util import CLIError
raise CLIError('ACL does not contain {}'.format(policy_name))
return _set_acl(cmd, client, acl, **kwargs)
def delete_acl_policy(cmd, client, policy_name, **kwargs):
""" Delete a stored access policy on a containing object """
acl = _get_acl(cmd, client, **kwargs)
del acl[policy_name]
if hasattr(acl, 'public_access'):
kwargs['public_access'] = getattr(acl, 'public_access')
return _set_acl(cmd, client, acl, **kwargs)
def _get_service_container_type(cmd, client):
t_blob_svc = cmd.get_models('_container_client#ContainerClient', resource_type=ResourceType.DATA_STORAGE_BLOB)
if isinstance(client, t_blob_svc):
return "container"
t_file_svc = cmd.get_models('_share_client#ShareClient', resource_type=ResourceType.DATA_STORAGE_FILESHARE)
if isinstance(client, t_file_svc):
return "share"
t_queue_svc = cmd.get_models('_queue_client#QueueClient', resource_type=ResourceType.DATA_STORAGE_QUEUE)
if isinstance(client, t_queue_svc):
return "queue"
from azure.data.tables._table_client import TableClient
if isinstance(client, TableClient):
return 'table'
raise ValueError('Unsupported service {}'.format(type(client)))
def _get_acl(cmd, client, **kwargs):
container = _get_service_container_type(cmd, client)
get_acl_fn = getattr(client, 'get_{}_access_policy'.format(container))
# When setting acl, sdk will validate that AccessPolicy.permission cannot be None, but '' is OK.
# So we convert every permission=None to permission='' here.
# This can be removed after sdk deprecate the validation.
return convert_acl_permissions(get_acl_fn(**kwargs))
def convert_acl_permissions(result):
if result is None:
return None
if 'signed_identifiers' in result:
signed_identifiers = {}
for identifier in result["signed_identifiers"]:
signed_identifiers[identifier.id] = identifier.access_policy
result = signed_identifiers
for policy in sorted(result.keys()):
if result[policy] is None:
continue
if getattr(result[policy], 'permission') is None:
setattr(result[policy], 'permission', '')
return result
def _set_acl(cmd, client, acl, **kwargs):
from knack.util import CLIError
method_name = 'set_{}_access_policy'.format(_get_service_container_type(cmd, client))
try:
method = getattr(client, method_name)
return method(acl, **kwargs)
except TypeError:
raise CLIError("Failed to invoke SDK method {}. The installed azure SDK may not be"
"compatible to this version of Azure CLI.".format(method_name))
except AttributeError:
raise CLIError("Failed to get function {} from {}. The installed azure SDK may not be "
"compatible to this version of Azure CLI.".format(client.__class__.__name__, method_name)) |
3,062 | find project id | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from unittest import mock
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.data import projects
from buildbot.test import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.reactor import TestReactorMixin
from buildbot.test.util import endpoint
from buildbot.test.util import interfaces
class ProjectEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = projects.ProjectEndpoint
resourceTypeClass = projects.Project
@defer.inlineCallbacks
def setUp(self):
self.setUpEndpoint()
yield self.db.insert_test_data([
fakedb.Project(id=1, name='project1'),
fakedb.Project(id=2, name='project2'),
])
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get_existing_id(self):
project = yield self.callGet(('projects', 2))
self.validateData(project)
self.assertEqual(project['name'], 'project2')
@defer.inlineCallbacks
def test_get_existing_name(self):
project = yield self.callGet(('projects', 'project2'))
self.validateData(project)
self.assertEqual(project['name'], 'project2')
@defer.inlineCallbacks
def test_get_missing(self):
project = yield self.callGet(('projects', 99))
self.assertIsNone(project)
@defer.inlineCallbacks
def test_get_missing_with_name(self):
project = yield self.callGet(('projects', 'project99'))
self.assertIsNone(project)
class ProjectsEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = projects.ProjectsEndpoint
resourceTypeClass = projects.Project
@defer.inlineCallbacks
def setUp(self):
self.setUpEndpoint()
yield self.db.insert_test_data([
fakedb.Project(id=1, name='project1'),
fakedb.Project(id=2, name='project2'),
])
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get(self):
projects = yield self.callGet(('projects',))
for b in projects:
self.validateData(b)
self.assertEqual(sorted([b['projectid'] for b in projects]), [1, 2])
class Project(interfaces.InterfaceTests, TestReactorMixin, unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.setup_test_reactor()
self.master = fakemaster.make_master(self, wantMq=True, wantDb=True,
wantData=True)
self.rtype = projects.Project(self.master)
yield self.master.db.insert_test_data([
fakedb.Project(id=13, name="fake_project"),
])
def test_signature_find_project_id(self):
@self.assertArgSpecMatches(
self.master.data.updates.METHOD_NAME, # fake
self.rtype.METHOD_NAME) # real
def METHOD_NAME(self, name):
pass
def test_find_project_id(self):
# this just passes through to the db method, so test that
rv = defer.succeed(None)
self.master.db.projects.METHOD_NAME = mock.Mock(return_value=rv)
self.assertIdentical(self.rtype.METHOD_NAME('foo'), rv)
def test_signature_update_project_info(self):
@self.assertArgSpecMatches(self.master.data.updates.update_project_info)
def update_project_info(
self,
projectid,
slug,
description,
description_format,
description_html
):
pass
@defer.inlineCallbacks
def test_update_project_info(self):
yield self.master.data.updates.update_project_info(
13,
"slug13",
"project13 desc",
"format",
"html desc",
)
projects = yield self.master.db.projects.get_projects()
self.assertEqual(projects, [{
"id": 13,
"name": "fake_project",
"slug": "slug13",
"description": "project13 desc",
"description_format": "format",
"description_html": "html desc",
}]) |
3,063 | test output created regression | #!/usr/bin/env python3
"""
MODULE: Test of r.learn.ml
AUTHOR(S): Steven Pawley <dr.stevenpawley gmail com>
PURPOSE: Test of r.learn.ml for regression
COPYRIGHT: (C) 2020 by Steven Pawley and the GRASS Development Team
This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
"""
import tempfile
import os
from grass.gunittest.case import TestCase
from grass.gunittest.main import test
class TestRegression(TestCase):
"""Test regression and prediction using r.learn.ml"""
# input rasters
band1 = "lsat7_2002_10@PERMANENT"
band2 = "lsat7_2002_20@PERMANENT"
band3 = "lsat7_2002_30@PERMANENT"
band4 = "lsat7_2002_40@PERMANENT"
band5 = "lsat7_2002_50@PERMANENT"
band7 = "lsat7_2002_70@PERMANENT"
input_map = "elev_ned_30m@PERMANENT"
# imagery group created during test
group = "predictors"
# training data created during test
training_points = "training_points"
# raster map created as output during test
output = "regression_result"
# files created during test
model_file = tempfile.NamedTemporaryFile(suffix=".gz").name
training_file = tempfile.NamedTemporaryFile(suffix=".gz").name
@classmethod
def setUpClass(cls):
"""Setup that is required for all tests
Uses a temporary region for testing and creates an imagery group and
randomly samples a categorical map to use as training pixels/points
"""
cls.use_temp_region()
cls.runModule("g.region", raster=cls.input_map)
cls.runModule(
"i.group",
group=cls.group,
input=[cls.band1, cls.band2, cls.band3, cls.band4, cls.band5, cls.band7],
)
cls.runModule(
"r.random",
input=cls.input_map,
npoints=1000,
vector=cls.training_points,
seed=1234,
)
@classmethod
def tearDownClass(cls):
"""Remove the temporary region (and anything else we created)"""
cls.del_temp_region()
cls.runModule("g.remove", flags="f", type="vector", name=cls.training_points)
cls.runModule("g.remove", flags="f", type="group", name=cls.group)
def tearDown(self):
"""Remove the output created from the tests
(reuse the same name for all the test functions)"""
self.runModule("g.remove", flags="f", type="raster", name=[self.output])
try:
os.remove(self.model_file)
except FileNotFoundError:
pass
try:
os.remove(self.training_file)
except FileNotFoundError:
pass
def METHOD_NAME(self):
"""Checks that the output is created"""
self.assertModule(
"r.learn.train",
group=self.group,
training_points=self.training_points,
field="value",
model_name="RandomForestRegressor",
n_estimators=100,
save_model=self.model_file,
)
self.assertFileExists(filename=self.model_file)
self.assertModule(
"r.learn.predict",
group=self.group,
load_model=self.model_file,
output=self.output,
)
self.assertRasterExists(self.output, msg="Output was not created")
def test_save_load_training(self):
"""Test that training data can be saved and loaded"""
# save training data
self.assertModule(
"r.learn.train",
group=self.group,
training_points=self.training_points,
field="value",
model_name="RandomForestRegressor",
save_training=self.training_file,
n_estimators=100,
save_model=self.model_file,
)
self.assertFileExists(filename=self.model_file)
self.assertFileExists(filename=self.training_file)
# load training data and retrain
self.assertModule(
"r.learn.train",
group=self.group,
model_name="RandomForestRegressor",
load_training=self.training_file,
n_estimators=100,
save_model=self.model_file,
overwrite=True,
)
# predict after loading training data
self.assertModule(
"r.learn.predict",
group=self.group,
load_model=self.model_file,
output=self.output,
)
self.assertRasterExists(self.output, msg="Output was not created")
if __name__ == "__main__":
test() |
3,064 | custom entity | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import json
import pytest
from enum import Enum
from sagemaker.workflow.condition_step import ConditionStep
from sagemaker.workflow.conditions import ConditionGreaterThan
from sagemaker.workflow.entities import (
DefaultEnumMeta,
Entity,
)
from sagemaker.workflow.fail_step import FailStep
from sagemaker.workflow.functions import Join, JsonGet
from sagemaker.workflow.parameters import ParameterString, ParameterInteger
from sagemaker.workflow.pipeline import Pipeline
from sagemaker.workflow.properties import PropertyFile, Properties
class CustomEntity(Entity):
def __init__(self, foo):
self.foo = foo
def to_request(self):
return {"foo": self.foo}
class CustomEnum(Enum, metaclass=DefaultEnumMeta):
A = 1
B = 2
@pytest.fixture
def METHOD_NAME():
return CustomEntity(1)
@pytest.fixture
def custom_entity_list():
return [CustomEntity(1), CustomEntity(2)]
def test_entity(METHOD_NAME):
request_struct = {"foo": 1}
assert METHOD_NAME.to_request() == request_struct
def test_default_enum_meta():
assert CustomEnum().value == 1
def test_pipeline_variable_in_pipeline_definition(sagemaker_session):
param_str = ParameterString(name="MyString", default_value="1")
param_int = ParameterInteger(name="MyInteger", default_value=3)
property_file = PropertyFile(
name="name",
output_name="result",
path="output",
)
json_get_func2 = JsonGet(
step_name="my-step",
property_file=property_file,
json_path="my-json-path",
)
prop = Properties(step_name="MyStep", shape_name="DescribeProcessingJobResponse")
cond = ConditionGreaterThan(left=param_str, right=param_int.to_string())
step_fail = FailStep(
name="MyFailStep",
error_message=Join(
on=" ",
values=[
"Execution failed due to condition check fails, see:",
json_get_func2.to_string(),
prop.ProcessingOutputConfig.Outputs["MyOutputName"].S3Output.S3Uri.to_string(),
param_int,
],
),
)
step_cond = ConditionStep(
name="MyCondStep",
conditions=[cond],
if_steps=[],
else_steps=[step_fail],
)
pipeline = Pipeline(
name="MyPipeline",
parameters=[param_str, param_int],
steps=[step_cond],
sagemaker_session=sagemaker_session,
)
dsl = json.loads(pipeline.definition())
assert dsl["Parameters"] == [
{"Name": "MyString", "Type": "String", "DefaultValue": "1"},
{"Name": "MyInteger", "Type": "Integer", "DefaultValue": 3},
]
assert len(dsl["Steps"]) == 1
assert dsl["Steps"][0] == {
"Name": "MyCondStep",
"Type": "Condition",
"Arguments": {
"Conditions": [
{
"Type": "GreaterThan",
"LeftValue": {"Get": "Parameters.MyString"},
"RightValue": {
"Std:Join": {
"On": "",
"Values": [{"Get": "Parameters.MyInteger"}],
},
},
},
],
"IfSteps": [],
"ElseSteps": [
{
"Name": "MyFailStep",
"Type": "Fail",
"Arguments": {
"ErrorMessage": {
"Std:Join": {
"On": " ",
"Values": [
"Execution failed due to condition check fails, see:",
{
"Std:Join": {
"On": "",
"Values": [
{
"Std:JsonGet": {
"PropertyFile": {
"Get": "Steps.my-step.PropertyFiles.name"
},
"Path": "my-json-path",
}
},
],
},
},
{
"Std:Join": {
"On": "",
"Values": [
{
"Get": "Steps.MyStep.ProcessingOutputConfig."
+ "Outputs['MyOutputName'].S3Output.S3Uri"
},
],
},
},
{"Get": "Parameters.MyInteger"},
],
}
}
},
}
],
},
} |
3,065 | plot | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, TYPE_CHECKING
import warnings
import pandas as pd
import sympy
from matplotlib import pyplot as plt
import numpy as np
from cirq import circuits, ops, study, value, _import
from cirq._compat import proper_repr
if TYPE_CHECKING:
import cirq
# We initialize optimize lazily, otherwise it slows global import speed.
optimize = _import.LazyLoader("optimize", globals(), "scipy.optimize")
def t1_decay(
sampler: 'cirq.Sampler',
*,
qubit: 'cirq.Qid',
num_points: int,
max_delay: 'cirq.DURATION_LIKE',
min_delay: 'cirq.DURATION_LIKE' = None,
repetitions: int = 1000,
) -> 'cirq.experiments.T1DecayResult':
"""Runs a t1 decay experiment.
Initializes a qubit into the |1⟩ state, waits for a variable amount of time,
and measures the qubit. Plots how often the |1⟩ state is observed for each
amount of waiting.
Args:
sampler: The quantum engine or simulator to run the circuits.
qubit: The qubit under test.
num_points: The number of evenly spaced delays to test.
max_delay: The largest delay to test.
min_delay: The smallest delay to test. Defaults to no delay.
repetitions: The number of repetitions of the circuit for each delay.
Returns:
A T1DecayResult object that stores and can plot the data.
Raises:
ValueError: If the supplied parameters are not valid: negative repetitions,
max delay less than min, or min delay less than 0.
"""
min_delay_dur = value.Duration(min_delay)
max_delay_dur = value.Duration(max_delay)
min_delay_nanos = min_delay_dur.total_nanos()
max_delay_nanos = max_delay_dur.total_nanos()
if repetitions <= 0:
raise ValueError('repetitions <= 0')
if isinstance(min_delay_nanos, sympy.Expr) or isinstance(max_delay_nanos, sympy.Expr):
raise ValueError('min_delay and max_delay cannot be sympy expressions.')
if max_delay_dur < min_delay_dur:
raise ValueError('max_delay < min_delay')
if min_delay_dur < 0:
raise ValueError('min_delay < 0')
var = sympy.Symbol('delay_ns')
sweep = study.Linspace(var, start=min_delay_nanos, stop=max_delay_nanos, length=num_points)
circuit = circuits.Circuit(
ops.X(qubit), ops.wait(qubit, nanos=var), ops.measure(qubit, key='output')
)
results = sampler.sample(circuit, params=sweep, repetitions=repetitions)
# Cross tabulate into a delay_ns, false_count, true_count table.
tab = pd.crosstab(results.delay_ns, results.output)
tab.rename_axis(None, axis="columns", inplace=True)
tab = tab.rename(columns={0: 'false_count', 1: 'true_count'}).reset_index()
for col_index, name in [(1, 'false_count'), (2, 'true_count')]:
if name not in tab:
tab.insert(col_index, name, [0] * tab.shape[0])
return T1DecayResult(tab)
class T1DecayResult:
"""Results from a Rabi oscillation experiment."""
def __init__(self, data: pd.DataFrame):
"""Inits T1DecayResult.
Args:
data: A data frame with three columns:
delay_ns, false_count, true_count.
"""
assert list(data.columns) == ['delay_ns', 'false_count', 'true_count']
self._data = data
@property
def data(self) -> pd.DataFrame:
"""A data frame with delay_ns, false_count, true_count columns."""
return self._data
@property
def constant(self) -> float:
"""The t1 decay constant."""
def exp_decay(x, t1):
return np.exp(-x / t1)
xs = self._data['delay_ns']
ts = self._data['true_count']
fs = self._data['false_count']
probs = ts / (fs + ts)
# Find the point closest to probability of 1/e
guess_index = np.argmin(np.abs(probs - 1.0 / np.e))
t1_guess = xs[guess_index]
# Fit to exponential decay to find the t1 constant
try:
popt, _ = optimize.curve_fit(exp_decay, xs, probs, p0=[t1_guess])
t1 = popt[0]
return t1
except RuntimeError:
warnings.warn("Optimal parameters could not be found for curve fit", RuntimeWarning)
return np.nan
def METHOD_NAME(
self, ax: Optional[plt.Axes] = None, include_fit: bool = False, **plot_kwargs: Any
) -> plt.Axes:
"""Plots the excited state probability vs the amount of delay.
Args:
ax: the plt.Axes to plot on. If not given, a new figure is created,
plotted on, and shown.
include_fit: boolean to include exponential decay fit on graph
**plot_kwargs: Arguments to be passed to 'plt.Axes.plot'.
Returns:
The plt.Axes containing the plot.
"""
show_plot = not ax
if show_plot:
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
assert ax is not None
ax.set_ylim(ymin=0, ymax=1)
xs = self._data['delay_ns']
ts = self._data['true_count']
fs = self._data['false_count']
ax.METHOD_NAME(xs, ts / (fs + ts), 'ro-', **plot_kwargs)
if include_fit and not np.isnan(self.constant):
ax.METHOD_NAME(xs, np.exp(-xs / self.constant), label='curve fit')
plt.legend()
ax.set_xlabel(r"Delay between initialization and measurement (nanoseconds)")
ax.set_ylabel('Excited State Probability')
ax.set_title('T1 Decay Experiment Data')
if show_plot:
fig.show()
return ax
def __str__(self) -> str:
return f'T1DecayResult with data:\n{self.data}'
def __eq__(self, other) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return self.data.equals(other.data)
def __ne__(self, other) -> bool:
return not self == other
def __repr__(self) -> str:
return f'cirq.experiments.T1DecayResult(data={proper_repr(self.data)})'
def _repr_pretty_(self, p: Any, cycle: bool) -> None:
"""Text output in Jupyter."""
if cycle:
# There should never be a cycle. This is just in case.
p.text('T1DecayResult(...)')
else:
p.text(str(self)) |
3,066 | encode key value | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections.abc import Sequence
from typing import Any, Mapping, Optional, List, Callable, TypeVar, Dict
import backoff
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
from opentelemetry.proto.common.v1.common_pb2 import (
InstrumentationScope as PB2InstrumentationScope,
)
from opentelemetry.proto.resource.v1.resource_pb2 import (
Resource as PB2Resource,
)
from opentelemetry.proto.common.v1.common_pb2 import AnyValue as PB2AnyValue
from opentelemetry.proto.common.v1.common_pb2 import KeyValue as PB2KeyValue
from opentelemetry.proto.common.v1.common_pb2 import (
KeyValueList as PB2KeyValueList,
)
from opentelemetry.proto.common.v1.common_pb2 import (
ArrayValue as PB2ArrayValue,
)
from opentelemetry.sdk.trace import Resource
from opentelemetry.util.types import Attributes
_logger = logging.getLogger(__name__)
_TypingResourceT = TypeVar("_TypingResourceT")
_ResourceDataT = TypeVar("_ResourceDataT")
def _encode_instrumentation_scope(
instrumentation_scope: InstrumentationScope,
) -> PB2InstrumentationScope:
if instrumentation_scope is None:
return PB2InstrumentationScope()
return PB2InstrumentationScope(
name=instrumentation_scope.name,
version=instrumentation_scope.version,
)
def _encode_resource(resource: Resource) -> PB2Resource:
return PB2Resource(attributes=_encode_attributes(resource.attributes))
def _encode_value(value: Any) -> PB2AnyValue:
if isinstance(value, bool):
return PB2AnyValue(bool_value=value)
if isinstance(value, str):
return PB2AnyValue(string_value=value)
if isinstance(value, int):
return PB2AnyValue(int_value=value)
if isinstance(value, float):
return PB2AnyValue(double_value=value)
if isinstance(value, Sequence):
return PB2AnyValue(
array_value=PB2ArrayValue(values=[_encode_value(v) for v in value])
)
elif isinstance(value, Mapping):
return PB2AnyValue(
kvlist_value=PB2KeyValueList(
values=[METHOD_NAME(str(k), v) for k, v in value.items()]
)
)
raise Exception(f"Invalid type {type(value)} of value {value}")
def METHOD_NAME(key: str, value: Any) -> PB2KeyValue:
return PB2KeyValue(key=key, value=_encode_value(value))
def _encode_span_id(span_id: int) -> bytes:
return span_id.to_bytes(length=8, byteorder="big", signed=False)
def _encode_trace_id(trace_id: int) -> bytes:
return trace_id.to_bytes(length=16, byteorder="big", signed=False)
def _encode_attributes(
attributes: Attributes,
) -> Optional[List[PB2KeyValue]]:
if attributes:
pb2_attributes = []
for key, value in attributes.items():
try:
pb2_attributes.append(METHOD_NAME(key, value))
except Exception as error: # pylint: disable=broad-except
_logger.exception(error)
else:
pb2_attributes = None
return pb2_attributes
def _get_resource_data(
sdk_resource_scope_data: Dict[Resource, _ResourceDataT],
resource_class: Callable[..., _TypingResourceT],
name: str,
) -> List[_TypingResourceT]:
resource_data = []
for (
sdk_resource,
scope_data,
) in sdk_resource_scope_data.items():
collector_resource = PB2Resource(
attributes=_encode_attributes(sdk_resource.attributes)
)
resource_data.append(
resource_class(
**{
"resource": collector_resource,
"scope_{}".format(name): scope_data.values(),
}
)
)
return resource_data
# Work around API change between backoff 1.x and 2.x. Since 2.0.0 the backoff
# wait generator API requires a first .send(None) before reading the backoff
# values from the generator.
_is_backoff_v2 = next(backoff.expo()) is None
def _create_exp_backoff_generator(*args, **kwargs):
gen = backoff.expo(*args, **kwargs)
if _is_backoff_v2:
gen.send(None)
return gen |
3,067 | test init empty user agent raises configurationerror | import pytest
from subliminal_patch.core import Episode
from subliminal_patch.providers import subf2m
from subliminal_patch.providers.subf2m import ConfigurationError
from subliminal_patch.providers.subf2m import Subf2mProvider
from subliminal_patch.providers.subf2m import Subf2mSubtitle
from subzero.language import Language
_U_A = "Mozilla/5.0 (Linux; Android 10; SM-G996U Build/QP1A.190711.020; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Mobile Safari/537.36"
@pytest.fixture
def provider():
with Subf2mProvider(user_agent=_U_A) as provider:
yield provider
@pytest.mark.parametrize(
"title,year,expected_url",
[
(
"Dead Man's Chest",
2006,
"/subtitles/pirates-of-the-caribbean-2-dead-mans-chest",
),
("Dune", 2021, "/subtitles/dune-2021"),
("Cure", 1997, "/subtitles/cure-kyua"),
],
)
def test_search_movie(provider, title, year, expected_url):
result = provider._search_movie(title, year)
assert expected_url in result
def METHOD_NAME():
with pytest.raises(ConfigurationError):
with Subf2mProvider(user_agent=" ") as provider:
assert provider
@pytest.mark.parametrize(
"series_title,season,year,expected_url",
[
("Breaking Bad", 1, None, "/subtitles/breaking-bad-first-season"),
("House Of The Dragon", 1, None, "/subtitles/house-of-the-dragon-first-season"),
("The Bear", 1, None, "/subtitles/the-bear-first-season"),
("Courage the Cowardly Dog", 1, None, "/subtitles/courage-the-cowardly-dog"),
(
"The Twilight Zone",
2,
1959,
"/subtitles/the-twilight-zone-the-complete-original-series",
),
],
)
def test_search_tv_show_season(provider, series_title, season, year, expected_url):
result = provider._search_tv_show_season(series_title, season, year)
assert expected_url in result
@pytest.mark.parametrize("language", [Language.fromalpha2("en"), Language("por", "BR")])
def test_find_movie_subtitles(provider, language, movies):
path = "/subtitles/dune-2021"
for sub in provider._find_movie_subtitles(path, language, movies["dune"].imdb_id):
assert sub.language == language
@pytest.mark.parametrize("language", [Language.fromalpha2("en"), Language("por", "BR")])
def test_find_episode_subtitles(provider, language, episodes):
path = "/subtitles/breaking-bad-first-season"
subs = provider._find_episode_subtitles(
path, 1, 1, language, imdb_id=episodes["breaking_bad_s01e01"].series_imdb_id
)
assert subs
for sub in subs:
assert sub.language == language
def test_find_episode_subtitles_from_complete_series_path(provider):
path = "/subtitles/courage-the-cowardly-dog"
subs = provider._find_episode_subtitles(
path, 1, 1, Language.fromalpha2("en"), imdb_id="tt0220880"
)
assert subs
for sub in subs:
assert sub.language == Language.fromalpha2("en")
def test_list_and_download_subtitles_complete_series_pack(provider, episodes):
episode = list(episodes.values())[0]
episode.series = "Sam & Max: Freelance Police"
episode.name = "The Glazed McGuffin Affair"
episode.title = "The Glazed McGuffin Affair"
episode.series_imdb_id = "tt0125646"
episode.season = 1
episode.episode = 21
subtitles = provider.list_subtitles(episode, {Language.fromalpha2("en")})
assert subtitles
subtitle = subtitles[0]
provider.download_subtitle(subtitle)
assert subtitle.is_valid()
@pytest.fixture
def subtitle():
release_info = """Dune-2021.All.WEBDLL
Dune.2021.WEBRip.XviD.MP3-XVID
Dune.2021.WEBRip.XviD.MP3-SHITBOX
Dune.2021.WEBRip.x264-SHITBOX
Dune.2021.WEBRip.x264-ION10
Dune.2021.HDRip.XviD-EVO[TGx]
Dune.2021.HDRip.XviD-EVO
Dune.2021.720p.HDRip.900MB.x264-GalaxyRG
Dune.2021.1080p.HDRip.X264-EVO
Dune.2021.1080p.HDRip.1400MB.x264-GalaxyRG"""
return Subf2mSubtitle(
Language.fromalpha3b("per"),
"https://subf2m.co/subtitles/dune-2021/farsi_persian/2604701",
release_info,
)
@pytest.fixture
def subtitle_episode():
return Subf2mSubtitle(
Language.fromalpha2("en"),
"https://subf2m.co/subtitles/breaking-bad-first-season/english/161227",
"Breaking.Bad.S01E01-7.DSR-HDTV.eng",
7,
)
def test_subtitle_get_matches(subtitle, movies):
matches = subtitle.get_matches(movies["dune"]) # type: set
assert matches.issuperset(
("title", "year", "source", "video_codec", "resolution", "release_group")
)
def test_subtitle_get_matches_episode(subtitle_episode, episodes):
matches = subtitle_episode.get_matches(episodes["breaking_bad_s01e01"]) # type: set
assert matches.issuperset(("title", "series", "season", "episode"))
assert "source" not in matches
def test_list_subtitles_movie(provider, movies):
assert provider.list_subtitles(movies["dune"], {Language.fromalpha2("en")})
def test_list_subtitles_inexistent_movie(provider, movies):
assert (
provider.list_subtitles(movies["inexistent"], {Language.fromalpha2("en")}) == []
)
def test_list_subtitles_episode(provider, episodes):
assert provider.list_subtitles(
episodes["breaking_bad_s01e01"], {Language.fromalpha2("en")}
)
def test_list_subtitles_inexistent_episode(provider, episodes):
assert (
provider.list_subtitles(episodes["inexistent"], {Language.fromalpha2("en")})
== []
)
def test_download_subtitle(provider, subtitle):
provider.download_subtitle(subtitle)
assert subtitle.is_valid()
def test_download_subtitle_episode(provider, subtitle_episode):
provider.download_subtitle(subtitle_episode)
assert subtitle_episode.is_valid()
def test_download_subtitle_episode_with_title(provider):
sub = Subf2mSubtitle(
Language.fromalpha2("en"),
"https://subf2m.co/subtitles/courage-the-cowardly-dog/english/2232402",
"Season 3 complete.",
13,
)
sub.episode_title = "Feast of the Bullfrogs"
provider.download_subtitle(sub)
assert sub.is_valid()
def test_get_episode_from_release():
assert subf2m._get_episode_from_release(
"Vinland Saga Season 2 - 05 [Crunchyroll][Crunchyroll] Vinland Saga Season 2 - 05"
) == {"season": [2], "episode": [5]}
def test_get_episode_from_release_return_none():
assert subf2m._get_episode_from_release("Vinland Saga Season 2 - Foo") is None
def test_get_episode_from_release_w_empty_match_return_none():
assert subf2m._get_episode_from_release("Vinland Saga - 02") is None
def test_complex_episode_name(provider):
episode = Episode(
**{
"name": "Dr.Romantic.S03E16.SBS.x265.1080p-thon.mkv",
"source": "HDTV",
"release_group": "thon",
"resolution": "1080p",
"video_codec": "H.265",
"audio_codec": "AAC",
"subtitle_languages": set(),
"original_name": "Dr. Romantic - S03E16.mkv",
"other": None,
"series": "Dr. Romantic",
"season": 3,
"episode": 16,
"title": "Dreamers",
"year": 2016,
"series_imdb_id": "tt6157190",
"alternative_series": ["Romantic Doctor Teacher Kim"],
}
)
assert provider.list_subtitles(episode, {Language.fromietf("en")}) |
3,068 | slack channel | import pytest
import subprocess
import time
import os
import yaml
from kubernetes import config
from pathlib import Path
from io import StringIO
from tests.config import CONFIG
from tests.utils.robusta_utils import RobustaController
from tests.utils.slack_utils import SlackChannel
def pytest_addoption(parser):
parser.addoption("--image", action="store", default=None)
parser.addoption(
"--no-delete-cluster", action="store_true", default=False, help="don't delete the kind cluster after test"
)
# see https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
# the goal here is to let fixtures know if a test succeeded or not by checking `request.node.report_call.passed` etc
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
# execute all other hooks to obtain the report object
outcome = yield
report = outcome.get_result()
# set a report attribute for each phase of a call, which can
# be "setup", "call", "teardown"
setattr(item, "report_" + report.when, report)
@pytest.fixture(scope="session")
def METHOD_NAME() -> SlackChannel:
if CONFIG.PYTEST_SLACK_TOKEN is None or CONFIG.PYTEST_SLACK_CHANNEL is None:
pytest.skip("skipping slack tests (missing environment variables)", allow_module_level=True)
return SlackChannel(CONFIG.PYTEST_SLACK_TOKEN, CONFIG.PYTEST_SLACK_CHANNEL)
@pytest.fixture(scope="session")
def kind_cluster(pytestconfig, tmp_path_factory):
cluster_name = "pytest-kind-cluster"
# Check if the cluster already exists
try:
with open(os.devnull, 'w') as DEVNULL:
subprocess.check_call(["kind", "get", "kubeconfig", "--name", cluster_name], stdout=DEVNULL, stderr=DEVNULL)
print("Cluster already exists, reusing...")
except subprocess.CalledProcessError:
# Cluster doesn't exist, create a new one
print("Creating a new cluster...")
subprocess.check_call(["kind", "create", "cluster", "--name", cluster_name])
# Exporting the kubeconfig file from kind to stdout
kubeconfig = subprocess.check_output(["kind", "get", "kubeconfig", "--name", cluster_name])
# Loading the kubeconfig in-memory
config.load_kube_config_from_dict(yaml.safe_load(StringIO(kubeconfig.decode())))
# Write kubeconfig to temporary file
kubeconfig_path = tmp_path_factory.mktemp("kubeconfig", numbered=False) / "kubeconfig"
with open(kubeconfig_path, "w") as f:
f.write(kubeconfig.decode())
os.environ["KUBECONFIG"] = str(kubeconfig_path)
# Polling for cluster readiness
for _ in range(60): # retry for up to 1 minute
try:
ready_nodes = subprocess.check_output(
["kubectl", "get", "nodes", "-o", "jsonpath='{.items[*].status.conditions[?(@.type==\"Ready\")].status}'"]
)
if "True" in ready_nodes.decode():
break
except subprocess.CalledProcessError:
pass
time.sleep(1)
else:
raise RuntimeError("Cluster not ready after 1 minute")
# Load the Robusta image into kind
subprocess.check_output(["kind", "load", "docker-image", "--name", cluster_name, pytestconfig.getoption("--image")])
yield cluster_name # This is where the testing happens
# Deleting the cluster after the tests are done, if --no-delete-cluster wasn't passed
if not pytestconfig.getoption("--no-delete-cluster"):
subprocess.check_call(["kind", "delete", "cluster", "--name", cluster_name])
@pytest.fixture
def robusta(METHOD_NAME: SlackChannel, kind_cluster: str, tmp_path_factory, request, pytestconfig):
robusta = RobustaController(pytestconfig.getoption("image"))
values_path = tmp_path_factory.mktemp("gen_config", numbered=False) / "./gen_values.yaml"
robusta.gen_config(
slack_api_key=CONFIG.PYTEST_IN_CLUSTER_SLACK_TOKEN,
METHOD_NAME=CONFIG.PYTEST_SLACK_CHANNEL,
output_path=str(values_path),
)
robusta.helm_install(values_path)
yield robusta
# see pytest_runtest_makereport above
if request.node.report_setup.passed and request.node.report_call.failed:
print("logs are: ")
print(robusta.get_logs())
robusta.helm_uninstall()
|
3,069 | test cluster natural gas bus links entry | import pytest
import os
import pandas
from program_files.urban_district_upscaling.components import Link
# import standard parameter
standard_parameters = pandas.ExcelFile(os.path.dirname(__file__)
+ "/standard_parameters.xlsx")
links = standard_parameters.parse("6_links")
@pytest.fixture
def test_create_link_entry():
# combine specific data and the standard parameter data
return {
"links":
pandas.merge(
left=pandas.DataFrame.from_dict({
"label": ["test_link"],
"bus1": ["building_pv_bus"],
"link_type": ["building_pv_building_link"],
"bus2": ["building_res_electricity_bus"]}),
right=links,
on="link_type").drop(columns=["link_type"])}
def test_create_link(test_create_link_entry):
"""
testing create link function
"""
# start the method to be tested
sheets = Link.create_link(
label="test_link",
bus_1="building_pv_bus",
bus_2="building_res_electricity_bus",
link_type="building_pv_building_link",
sheets={"links": pandas.DataFrame()},
standard_parameters=standard_parameters
)
# assert rather the two dataframes are equal
pandas.testing.assert_frame_equal(
sheets["links"].sort_index(axis=1),
test_create_link_entry["links"].sort_index(axis=1))
@pytest.fixture
def test_clustered_electricity_links():
sheets = {
"links":
pandas.merge(
left=pandas.DataFrame.from_dict({
"label": ["test_cluster_pv_central",
"test_cluster_central_electricity_link",
"test_cluster_pv_test_cluster_electricity_link"],
"link_type": ["building_pv_central_link",
"building_central_building_link",
"building_pv_central_link"],
"bus1": ["",
"central_electricity_bus",
"test_cluster_pv_bus"],
"bus2": ["",
"test_cluster_electricity_bus",
"test_cluster_electricity_bus"]}),
right=links,
on="link_type").drop(columns=["link_type"])}
sheets["links"].set_index("label", inplace=True, drop=False)
return sheets
def test_create_central_electricity_bus_connection(
test_clustered_electricity_links):
"""
"""
sheets = {
"links": pandas.DataFrame.from_dict(
{"label": ["test_cluster_pv_central"],
"(un)directed": ["directed"],
"active": [1.0],
"bus1": [""],
"bus2": [""],
"efficiency": [1.0],
"existing capacity": [9999.0],
"fix investment constraint costs": [0.0],
"fix investment costs": [0.0],
"max. investment capacity": [0.0],
"min. investment capacity": [0.0],
"non-convex investment": [0.0],
"periodical constraint costs": [0.00001],
"periodical costs": [0.00001],
"variable output constraint costs": [0.0],
"variable output costs": [0.0]})}
sheets["links"].set_index("label")
sheets = Link.create_central_electricity_bus_connection(
cluster="test_cluster",
sheets=sheets,
standard_parameters=standard_parameters
)
sheets["links"] = sheets["links"].sort_index(axis=0)
test_clustered_electricity_links["links"] = \
test_clustered_electricity_links["links"].sort_index(axis=0)
pandas.testing.assert_frame_equal(
sheets["links"].sort_index(axis=1),
test_clustered_electricity_links["links"].sort_index(axis=1)
)
@pytest.fixture
def test_cluster_pv_links_entries():
"""
"""
return {
"links": pandas.merge(
left = pandas.DataFrame.from_dict({
"label": ["test_cluster_pv_central_electricity_link",
"test_cluster_pv_electricity_link"],
"bus1": ["test_cluster_pv_bus"] * 2,
"bus2": ["central_electricity_bus",
"test_cluster_electricity_bus"],
"link_type": ["building_pv_central_link",
"building_pv_building_link"]}),
right=links,
on="link_type").drop(columns=["link_type"])
}
def test_create_cluster_pv_links(test_cluster_pv_links_entries):
"""
"""
sheets = Link.create_cluster_pv_links(
cluster="test_cluster",
sheets={"links": pandas.DataFrame()},
sink_parameters=[1, 2, 3, [], 0, 0, 0, [], [], [], []],
standard_parameters=standard_parameters)
test_cluster_pv_links_entries["links"].set_index(
"label", inplace=True, drop=False)
pandas.testing.assert_frame_equal(
sheets["links"].sort_index(axis=1),
test_cluster_pv_links_entries["links"].sort_index(axis=1))
@pytest.fixture
def METHOD_NAME():
"""
"""
return {
"links": pandas.merge(
left=pandas.DataFrame.from_dict({
"label": ["test_cluster_central_naturalgas_link"],
"bus1": ["central_naturalgas_bus"],
"bus2": ["test_cluster_gas_bus"],
"link_type": ["central_naturalgas_building_link"]}),
right=links,
on="link_type").drop(columns=["link_type"])
}
def test_add_cluster_naturalgas_bus_links(
METHOD_NAME):
"""
"""
sheets = Link.add_cluster_naturalgas_bus_links(
sheets={"links": pandas.DataFrame()},
cluster="test_cluster",
standard_parameters=standard_parameters
)
METHOD_NAME["links"].set_index(
"label", inplace=True, drop=False)
pandas.testing.assert_frame_equal(
sheets["links"].sort_index(axis=1),
METHOD_NAME["links"].sort_index(axis=1))
def test_delete_non_used_links():
pass |
3,070 | build transformer | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from typing import Any, Tuple
import numpy as np
import lale.docstrings
import lale.helpers
import lale.operators
from lale.expressions import collect_set, it, replace
from lale.lib.dataframe import count, get_columns
from lale.lib.sklearn import ordinal_encoder
from .aggregate import Aggregate
from .map import Map
from .monoid import Monoid, MonoidableOperator
class _OrdinalEncoderMonoid(Monoid):
def __init__(self, *, n_samples_seen_, feature_names_in_, categories_):
self.n_samples_seen_ = n_samples_seen_
self.feature_names_in_ = feature_names_in_
self.categories_ = categories_
def combine(self, other: "_OrdinalEncoderMonoid"):
n_samples_seen_ = self.n_samples_seen_ + other.n_samples_seen_
assert list(self.feature_names_in_) == list(other.feature_names_in_)
assert len(self.categories_) == len(other.categories_)
combined_categories = [
np.sort(
np.unique(np.concatenate([self.categories_[i], other.categories_[i]]))
)
for i in range(len(self.categories_))
]
return _OrdinalEncoderMonoid(
n_samples_seen_=n_samples_seen_,
feature_names_in_=self.feature_names_in_,
categories_=combined_categories,
)
class _OrdinalEncoderImpl(MonoidableOperator[_OrdinalEncoderMonoid]):
def __init__(
self,
*,
categories="auto",
dtype="float64",
handle_unknown="error",
unknown_value=None,
):
self._hyperparams = {
"categories": categories,
"dtype": dtype,
"handle_unknown": handle_unknown,
"unknown_value": unknown_value,
}
def transform(self, X):
if self._transformer is None:
self._transformer = self.METHOD_NAME()
return self._transformer.transform(X)
@property
def n_samples_seen_(self):
return getattr(self._monoid, "n_samples_seen_", 0)
@property
def categories_(self):
return getattr(self._monoid, "categories_", None)
@property
def feature_names_in_(self):
return getattr(self._monoid, "feature_names_in_", None)
def from_monoid(self, monoid: _OrdinalEncoderMonoid):
self._monoid = monoid
self.n_features_in_ = len(monoid.feature_names_in_)
self._transformer = None
def METHOD_NAME(self):
assert self._monoid is not None
result = Map(
columns={
col_name: replace(
it[col_name],
{
cat_value: cat_idx
for cat_idx, cat_value in enumerate(
self._monoid.categories_[col_idx]
)
},
handle_unknown="use_encoded_value",
unknown_value=self._hyperparams["unknown_value"],
)
for col_idx, col_name in enumerate(self._monoid.feature_names_in_)
}
)
return result
def to_monoid(self, batch: Tuple[Any, Any]):
hyperparams = self._hyperparams
X, _ = batch
n_samples_seen_ = count(X)
feature_names_in_ = get_columns(X)
if hyperparams["categories"] == "auto":
agg_op = Aggregate(
columns={c: collect_set(it[c]) for c in feature_names_in_}
)
agg_data = agg_op.transform(X)
if lale.helpers._is_spark_df(agg_data):
agg_data = agg_data.toPandas()
categories_ = [np.sort(agg_data.loc[0, c]) for c in feature_names_in_]
else:
categories_ = hyperparams["categories"]
return _OrdinalEncoderMonoid(
n_samples_seen_=n_samples_seen_,
feature_names_in_=feature_names_in_,
categories_=categories_,
)
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Relational algebra reimplementation of scikit-learn's `OrdinalEncoder`_ transformer that encodes categorical features as numbers.
Works on both pandas and Spark dataframes by using `Aggregate`_ for `fit` and `Map`_ for `transform`, which in turn use the appropriate backend.
.. _`OrdinalEncoder`: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html
.. _`Aggregate`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.aggregate.html
.. _`Map`: https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.map.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.ordinal_encoder.html",
"type": "object",
"tags": {
"pre": ["categoricals"],
"op": ["transformer", "interpretable"],
"post": [],
},
"properties": {
"hyperparams": ordinal_encoder._hyperparams_schema,
"input_fit": ordinal_encoder._input_fit_schema,
"input_transform": ordinal_encoder._input_transform_schema,
"output_transform": ordinal_encoder._output_transform_schema,
},
}
OrdinalEncoder = lale.operators.make_operator(_OrdinalEncoderImpl, _combined_schemas)
OrdinalEncoder = typing.cast(
lale.operators.PlannedIndividualOp,
OrdinalEncoder.customize_schema(
encode_unknown_with=None,
dtype={
"enum": ["float64"],
"description": "This implementation only supports `dtype='float64'`.",
"default": "float64",
},
handle_unknown={
"enum": ["use_encoded_value"],
"description": "This implementation only supports `handle_unknown='use_encoded_value'`.",
"default": "use_encoded_value",
},
unknown_value={
"anyOf": [
{"type": "integer"},
{"enum": [np.nan, None]},
],
"description": "The encoded value of unknown categories to use when `handle_unknown='use_encoded_value'`. It has to be distinct from the values used to encode any of the categories in fit. If set to np.nan, the dtype hyperparameter must be a float dtype.",
},
),
)
lale.docstrings.set_docstrings(OrdinalEncoder) |
3,071 | set default font func | """
Global definitions used by Direct Gui Classes and handy constants
that can be used during widget construction
"""
__all__ = ()
from panda3d.core import (
KeyboardButton,
MouseButton,
PGButton,
PGEntry,
PGFrameStyle,
PGSliderBar,
TextNode,
)
defaultFont = None
defaultFontFunc = TextNode.getDefaultFont
defaultClickSound = None
defaultRolloverSound = None
defaultDialogGeom = None
defaultDialogRelief = PGFrameStyle.TBevelOut
drawOrder = 100
panel = None
# USEFUL GUI CONSTANTS
#: Constant used to indicate that an option can only be set by a call
#: to the constructor.
INITOPT = ['initopt']
# Mouse buttons
LMB = 0
MMB = 1
RMB = 2
# Widget state
NORMAL = 'normal'
DISABLED = 'disabled'
# Frame style
FLAT = PGFrameStyle.TFlat
RAISED = PGFrameStyle.TBevelOut
SUNKEN = PGFrameStyle.TBevelIn
GROOVE = PGFrameStyle.TGroove
RIDGE = PGFrameStyle.TRidge
TEXTUREBORDER = PGFrameStyle.TTextureBorder
FrameStyleDict = {'flat': FLAT, 'raised': RAISED, 'sunken': SUNKEN,
'groove': GROOVE, 'ridge': RIDGE,
'texture_border': TEXTUREBORDER,
}
# Orientation of DirectSlider and DirectScrollBar
HORIZONTAL = 'horizontal'
VERTICAL = 'vertical'
VERTICAL_INVERTED = 'vertical_inverted'
# Dialog button values
DIALOG_NO = 0
DIALOG_OK = DIALOG_YES = DIALOG_RETRY = 1
DIALOG_CANCEL = -1
# User can bind commands to these gui events
DESTROY = 'destroy-'
PRINT = 'print-'
ENTER = PGButton.getEnterPrefix()
EXIT = PGButton.getExitPrefix()
WITHIN = PGButton.getWithinPrefix()
WITHOUT = PGButton.getWithoutPrefix()
B1CLICK = PGButton.getClickPrefix() + MouseButton.one().getName() + '-'
B2CLICK = PGButton.getClickPrefix() + MouseButton.two().getName() + '-'
B3CLICK = PGButton.getClickPrefix() + MouseButton.three().getName() + '-'
B1PRESS = PGButton.getPressPrefix() + MouseButton.one().getName() + '-'
B2PRESS = PGButton.getPressPrefix() + MouseButton.two().getName() + '-'
B3PRESS = PGButton.getPressPrefix() + MouseButton.three().getName() + '-'
B1RELEASE = PGButton.getReleasePrefix() + MouseButton.one().getName() + '-'
B2RELEASE = PGButton.getReleasePrefix() + MouseButton.two().getName() + '-'
B3RELEASE = PGButton.getReleasePrefix() + MouseButton.three().getName() + '-'
WHEELUP = PGButton.getReleasePrefix() + MouseButton.wheelUp().getName() + '-'
WHEELDOWN = PGButton.getReleasePrefix() + MouseButton.wheelDown().getName() + '-'
# For DirectEntry widgets
OVERFLOW = PGEntry.getOverflowPrefix()
ACCEPT = PGEntry.getAcceptPrefix() + KeyboardButton.enter().getName() + '-'
ACCEPTFAILED = PGEntry.getAcceptFailedPrefix() + KeyboardButton.enter().getName() + '-'
TYPE = PGEntry.getTypePrefix()
ERASE = PGEntry.getErasePrefix()
CURSORMOVE = PGEntry.getCursormovePrefix()
# For DirectSlider and DirectScrollBar widgets
ADJUST = PGSliderBar.getAdjustPrefix()
# For setting the sorting order of a widget's visible components
IMAGE_SORT_INDEX = 10
GEOM_SORT_INDEX = 20
TEXT_SORT_INDEX = 30
FADE_SORT_INDEX = 1000
NO_FADE_SORT_INDEX = 2000
# Handy conventions for organizing top-level gui objects in loose buckets.
BACKGROUND_SORT_INDEX = -100
MIDGROUND_SORT_INDEX = 0
FOREGROUND_SORT_INDEX = 100
# Symbolic constants for the indexes into an optionInfo list.
_OPT_DEFAULT = 0
_OPT_VALUE = 1
_OPT_FUNCTION = 2
# DirectButton States:
BUTTON_READY_STATE = PGButton.SReady # 0
BUTTON_DEPRESSED_STATE = PGButton.SDepressed # 1
BUTTON_ROLLOVER_STATE = PGButton.SRollover # 2
BUTTON_INACTIVE_STATE = PGButton.SInactive # 3
def getDefaultRolloverSound():
return defaultRolloverSound
def setDefaultRolloverSound(newSound):
global defaultRolloverSound
defaultRolloverSound = newSound
def getDefaultClickSound():
return defaultClickSound
def setDefaultClickSound(newSound):
global defaultClickSound
defaultClickSound = newSound
def getDefaultFont():
global defaultFont
if defaultFont is None:
defaultFont = defaultFontFunc()
return defaultFont
def setDefaultFont(newFont):
"""Changes the default font for DirectGUI items. To change the default
font across the board, see :meth:`.TextNode.setDefaultFont`. """
global defaultFont
defaultFont = newFont
def METHOD_NAME(newFontFunc):
global defaultFontFunc
defaultFontFunc = newFontFunc
def getDefaultDialogGeom():
return defaultDialogGeom
def getDefaultDialogRelief():
return defaultDialogRelief
def setDefaultDialogGeom(newDialogGeom, relief=None):
global defaultDialogGeom, defaultDialogRelief
defaultDialogGeom = newDialogGeom
defaultDialogRelief = relief
def getDefaultDrawOrder():
return drawOrder
def setDefaultDrawOrder(newDrawOrder):
global drawOrder
drawOrder = newDrawOrder
def getDefaultPanel():
return panel
def setDefaultPanel(newPanel):
global panel
panel = newPanel
get_default_rollover_sound = getDefaultRolloverSound
set_default_rollover_sound = setDefaultRolloverSound
get_default_click_sound = getDefaultClickSound
set_default_click_sound = setDefaultClickSound
get_default_font = getDefaultFont
set_default_font = setDefaultFont
get_default_dialog_geom = getDefaultDialogGeom
get_default_dialog_relief = getDefaultDialogRelief
set_default_dialog_geom = setDefaultDialogGeom
get_default_draw_order = getDefaultDrawOrder
set_default_draw_order = setDefaultDrawOrder
get_default_panel = getDefaultPanel
set_default_panel = setDefaultPanel |
3,072 | award count sub schedule | import pytest
from model_bakery import baker
from usaspending_api.references.models import DisasterEmergencyFundCode
from usaspending_api.submissions.models import SubmissionAttributes
from usaspending_api.submissions.models.dabs_submission_window_schedule import DABSSubmissionWindowSchedule
@pytest.fixture
def basic_award(METHOD_NAME, award_count_submission, defc_codes):
award = _normal_award(123)
_faba_for_award(award)
@pytest.fixture
def award_with_quarterly_submission(METHOD_NAME, award_count_quarterly_submission, defc_codes):
award = _normal_award(345)
_faba_for_award(award)
@pytest.fixture
def award_with_early_submission(defc_codes):
award = _normal_award(456)
_award_count_early_submission()
_faba_for_award(award)
@pytest.fixture
def file_c_with_no_award(defc_codes):
_award_count_early_submission()
_faba_for_award(None)
_faba_for_award(None, 2)
@pytest.fixture
def multiple_file_c_to_same_award(METHOD_NAME, award_count_submission, defc_codes):
award = _normal_award(874)
_faba_for_award(award)
_faba_for_award(award)
@pytest.fixture
def multiple_outlay_file_c_to_same_award(METHOD_NAME, award_count_submission, defc_codes):
award = _normal_award(923)
_faba_for_award(award, outlay_based=True)
_faba_for_award(award, outlay_based=True)
@pytest.fixture
def multiple_file_c_to_same_award_that_cancel_out(METHOD_NAME, award_count_submission, defc_codes):
award = _normal_award(643)
_faba_for_award(award)
_faba_for_award(award, negative=True)
@pytest.fixture
def obligations_incurred_award(METHOD_NAME, award_count_submission, defc_codes):
award = _normal_award(592)
baker.make(
"awards.FinancialAccountsByAwards",
award=award,
parent_award_id="obligations award",
disaster_emergency_fund=DisasterEmergencyFundCode.objects.filter(code="M").first(),
submission=SubmissionAttributes.objects.all().first(),
transaction_obligated_amount=8,
)
@pytest.fixture
def non_matching_defc_award(METHOD_NAME, award_count_submission, defc_codes):
award = _normal_award(937)
baker.make(
"awards.FinancialAccountsByAwards",
piid="piid 1",
parent_award_id="same parent award",
fain="fain 1",
uri="uri 1",
award=award,
disaster_emergency_fund=DisasterEmergencyFundCode.objects.filter(code="A").first(),
submission=SubmissionAttributes.objects.all().first(),
transaction_obligated_amount=7,
)
@pytest.fixture
def award_count_submission():
baker.make(
"submissions.SubmissionAttributes",
reporting_fiscal_year=2022,
reporting_fiscal_period=8,
quarter_format_flag=False,
is_final_balances_for_fy=True,
reporting_period_start="2022-04-01",
submission_window_id=20220800,
)
def _award_count_early_submission():
if not DABSSubmissionWindowSchedule.objects.filter(
submission_fiscal_year=2020
): # hack since in some environments these auto-populate
baker.make(
"submissions.DABSSubmissionWindowSchedule",
id=20200700,
is_quarter=False,
submission_fiscal_year=2020,
submission_fiscal_quarter=3,
submission_fiscal_month=7,
submission_reveal_date="2020-5-15",
)
baker.make(
"submissions.SubmissionAttributes",
reporting_fiscal_year=2020,
reporting_fiscal_period=7,
quarter_format_flag=False,
reporting_period_start="2020-04-01",
submission_window_id=20200700,
)
@pytest.fixture
def award_count_quarterly_submission():
baker.make(
"submissions.SubmissionAttributes",
reporting_fiscal_year=2022,
reporting_fiscal_quarter=3,
reporting_fiscal_period=8,
quarter_format_flag=True,
is_final_balances_for_fy=True,
reporting_period_start="2022-04-01",
submission_window_id=20220801,
)
@pytest.fixture
def METHOD_NAME():
baker.make(
"submissions.DABSSubmissionWindowSchedule",
id=20220800,
is_quarter=False,
submission_fiscal_year=2022,
submission_fiscal_quarter=3,
submission_fiscal_month=8,
submission_reveal_date="2020-5-15",
)
baker.make(
"submissions.DABSSubmissionWindowSchedule",
id=20220801,
is_quarter=True,
submission_fiscal_year=2022,
submission_fiscal_quarter=3,
submission_fiscal_month=8,
submission_reveal_date="2020-5-15",
)
def _normal_award(id):
return baker.make("search.AwardSearch", award_id=id, type="A")
def _faba_for_award(award, id=1, negative=False, outlay_based=False):
baker.make(
"awards.FinancialAccountsByAwards",
piid=f"piid {id}",
parent_award_id="same parent award",
fain=f"fain {id}",
uri=f"uri {id}",
award=award,
disaster_emergency_fund=DisasterEmergencyFundCode.objects.filter(code="M").first(),
submission=SubmissionAttributes.objects.all().first(),
transaction_obligated_amount=(-7 if negative else 7) if not outlay_based else 0,
gross_outlay_amount_by_award_cpe=(-7 if negative else 7) if outlay_based else 0,
distinct_award_key=f"piid {id}|same parent award|fain {id}|uri {id}".upper(),
) |
3,073 | aggrid with complex objects | from nicegui import ui
from ..documentation_tools import text_demo
def main_demo() -> None:
grid = ui.aggrid({
'defaultColDef': {'flex': 1},
'columnDefs': [
{'headerName': 'Name', 'field': 'name'},
{'headerName': 'Age', 'field': 'age'},
{'headerName': 'Parent', 'field': 'parent', 'hide': True},
],
'rowData': [
{'name': 'Alice', 'age': 18, 'parent': 'David'},
{'name': 'Bob', 'age': 21, 'parent': 'Eve'},
{'name': 'Carol', 'age': 42, 'parent': 'Frank'},
],
'rowSelection': 'multiple',
}).classes('max-h-40')
def update():
grid.options['rowData'][0]['age'] += 1
grid.update()
ui.button('Update', on_click=update)
ui.button('Select all', on_click=lambda: grid.call_api_method('selectAll'))
ui.button('Show parent', on_click=lambda: grid.call_column_api_method('setColumnVisible', 'parent', True))
def more() -> None:
@text_demo('Select AG Grid Rows', '''
You can add checkboxes to grid cells to allow the user to select single or multiple rows.
To retrieve the currently selected rows, use the `get_selected_rows` method.
This method returns a list of rows as dictionaries.
If `rowSelection` is set to `'single'` or to get the first selected row,
you can also use the `get_selected_row` method.
This method returns a single row as a dictionary or `None` if no row is selected.
See the [AG Grid documentation](https://www.ag-grid.com/javascript-data-grid/row-selection/#example-single-row-selection) for more information.
''')
def aggrid_with_selectable_rows():
grid = ui.aggrid({
'columnDefs': [
{'headerName': 'Name', 'field': 'name', 'checkboxSelection': True},
{'headerName': 'Age', 'field': 'age'},
],
'rowData': [
{'name': 'Alice', 'age': 18},
{'name': 'Bob', 'age': 21},
{'name': 'Carol', 'age': 42},
],
'rowSelection': 'multiple',
}).classes('max-h-40')
async def output_selected_rows():
rows = await grid.get_selected_rows()
if rows:
for row in rows:
ui.notify(f"{row['name']}, {row['age']}")
else:
ui.notify('No rows selected.')
async def output_selected_row():
row = await grid.get_selected_row()
if row:
ui.notify(f"{row['name']}, {row['age']}")
else:
ui.notify('No row selected!')
ui.button('Output selected rows', on_click=output_selected_rows)
ui.button('Output selected row', on_click=output_selected_row)
@text_demo('Filter Rows using Mini Filters', '''
You can add [mini filters](https://ag-grid.com/javascript-data-grid/filter-set-mini-filter/)
to the header of each column to filter the rows.
Note how the "agTextColumnFilter" matches individual characters, like "a" in "Alice" and "Carol",
while the "agNumberColumnFilter" matches the entire number, like "18" and "21", but not "1".
''')
def aggrid_with_minifilters():
ui.aggrid({
'columnDefs': [
{'headerName': 'Name', 'field': 'name', 'filter': 'agTextColumnFilter', 'floatingFilter': True},
{'headerName': 'Age', 'field': 'age', 'filter': 'agNumberColumnFilter', 'floatingFilter': True},
],
'rowData': [
{'name': 'Alice', 'age': 18},
{'name': 'Bob', 'age': 21},
{'name': 'Carol', 'age': 42},
],
}).classes('max-h-40')
@text_demo('AG Grid with Conditional Cell Formatting', '''
This demo shows how to use [cellClassRules](https://www.ag-grid.com/javascript-grid-cell-styles/#cell-class-rules)
to conditionally format cells based on their values.
''')
def aggrid_with_conditional_cell_formatting():
ui.aggrid({
'columnDefs': [
{'headerName': 'Name', 'field': 'name'},
{'headerName': 'Age', 'field': 'age', 'cellClassRules': {
'bg-red-300': 'x < 21',
'bg-green-300': 'x >= 21',
}},
],
'rowData': [
{'name': 'Alice', 'age': 18},
{'name': 'Bob', 'age': 21},
{'name': 'Carol', 'age': 42},
],
})
@text_demo('Create Grid from Pandas Dataframe', '''
You can create an AG Grid from a Pandas Dataframe using the `from_pandas` method.
This method takes a Pandas Dataframe as input and returns an AG Grid.
''')
def aggrid_from_pandas():
import pandas as pd
df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
ui.aggrid.from_pandas(df).classes('max-h-40')
@text_demo('Render columns as HTML', '''
You can render columns as HTML by passing a list of column indices to the `html_columns` argument.
''')
def aggrid_with_html_columns():
ui.aggrid({
'columnDefs': [
{'headerName': 'Name', 'field': 'name'},
{'headerName': 'URL', 'field': 'url'},
],
'rowData': [
{'name': 'Google', 'url': '<a href="https://google.com">https://google.com</a>'},
{'name': 'Facebook', 'url': '<a href="https://facebook.com">https://facebook.com</a>'},
],
}, html_columns=[1])
@text_demo('Respond to an AG Grid event', '''
All AG Grid events are passed through to NiceGUI via the AG Grid global listener.
These events can be subscribed to using the `.on()` method.
''')
def aggrid_with_html_columns():
ui.aggrid({
'columnDefs': [
{'headerName': 'Name', 'field': 'name'},
{'headerName': 'Age', 'field': 'age'},
],
'rowData': [
{'name': 'Alice', 'age': 18},
{'name': 'Bob', 'age': 21},
{'name': 'Carol', 'age': 42},
],
}).on('cellClicked', lambda event: ui.notify(f'Cell value: {event.args["value"]}'))
@text_demo('AG Grid with complex objects', '''
You can use nested complex objects in AG Grid by separating the field names with a period.
(This is the reason why keys in `rowData` are not allowed to contain periods.)
''')
def METHOD_NAME():
ui.aggrid({
'columnDefs': [
{'headerName': 'First name', 'field': 'name.first'},
{'headerName': 'Last name', 'field': 'name.last'},
{'headerName': 'Age', 'field': 'age'}
],
'rowData': [
{'name': {'first': 'Alice', 'last': 'Adams'}, 'age': 18},
{'name': {'first': 'Bob', 'last': 'Brown'}, 'age': 21},
{'name': {'first': 'Carol', 'last': 'Clark'}, 'age': 42},
],
}).classes('max-h-40')
@text_demo('AG Grid with dynamic row height', '''
You can set the height of individual rows by passing a function to the `getRowHeight` argument.
''')
def aggrid_with_dynamic_row_height():
ui.aggrid({
'columnDefs': [{'field': 'name'}, {'field': 'age'}],
'rowData': [
{'name': 'Alice', 'age': '18'},
{'name': 'Bob', 'age': '21'},
{'name': 'Carol', 'age': '42'},
],
':getRowHeight': 'params => params.data.age > 35 ? 50 : 25',
}).classes('max-h-40') |
3,074 | test prometheus | """The selenium test."""
# pylint: skip-file
# Generated by Selenium IDE
import json # pylint: disable=import-error disable=unused-import
import time # pylint: disable=import-error disable=unused-import
import pytest # pylint: disable=import-error disable=unused-import
from selenium import webdriver # pylint: disable=import-error
from selenium.webdriver.chrome.options import Options # pylint: disable=import-error
from selenium.webdriver.common.by import By # pylint: disable=import-error
from selenium.webdriver.common.action_chains import ActionChains # pylint: disable=import-error disable=unused-import
from selenium.webdriver.support import expected_conditions # pylint: disable=import-error disable=unused-import
from selenium.webdriver.support.wait import WebDriverWait # pylint: disable=import-error disable=unused-import
from selenium.webdriver.common.keys import Keys # pylint: disable=import-error disable=unused-import
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities # pylint: disable=import-error disable=unused-import
class TestPrometheus():
"""The test word press class for testing wordpress image."""
def setup_method(self):
"""setup method."""
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument("disable-infobars")
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--no-sandbox")
self.driver = webdriver.Chrome(
options=chrome_options) # pylint: disable=attribute-defined-outside-init
self.driver.implicitly_wait(10)
def teardown_method(self, method): # pylint: disable=unused-argument
"""teardown method."""
self.driver.quit()
def METHOD_NAME(self, params):
"""The test method"""
# Test name: s1
# Step # | name | target | value
# 1 | open |
# /graph?g0.expr=&g0.tab=1&g0.stacked=0&g0.show_exemplars=0&g0.range_input=1h
# |
self.driver.get(
"http://{}:{}/".format(
params["server"],
params["port"])) # pylint: disable=consider-using-f-string
# 2 | setWindowSize | 1200x859 |
self.driver.set_window_size(1200, 859)
# 3 | click | css=.cm-line |
self.driver.find_element(By.CSS_SELECTOR, ".cm-line").click()
# 4 | click | css=.execute-btn |
self.driver.find_element(By.CSS_SELECTOR, ".execute-btn").click()
# 5 | click | css=.cm-line |
self.driver.find_element(By.CSS_SELECTOR, ".cm-line").click()
# 6 | editContent | css=.cm-content | <div
# class="cm-line">request_count_total</div>
element = self.driver.find_element(By.CSS_SELECTOR, ".cm-content")
self.driver.execute_script(
"if(arguments[0].contentEditable === 'true') {arguments[0].innerText = '<div class=\"cm-line\">request_count_total</div>'}",
element) # pylint: disable=line-too-long
# 7 | click | css=.execute-btn |
self.driver.find_element(By.CSS_SELECTOR, ".execute-btn").click()
# search for the text on the page now
assert "request_count_total" in self.driver.page_source
# 8 | editContent | css=.cm-content | <div
# class="cm-line">request_latency_seconds_bucket</div> # pylint:
# disable=line-too-long
element = self.driver.find_element(By.CSS_SELECTOR, ".cm-content")
self.driver.execute_script(
"if(arguments[0].contentEditable === 'true') {arguments[0].innerText = '<div class=\"cm-line\">request_latency_seconds_bucket</div>'}",
element)
# 9 | click | css=.execute-btn |
self.driver.find_element(By.CSS_SELECTOR, ".execute-btn").click()
# search for the text on the page
assert "request_latency_seconds_bucket" in self.driver.page_source |
3,075 | ol cuda func 1 | from numba import cuda, njit
from numba.core.extending import overload
from numba.cuda.testing import CUDATestCase, skip_on_cudasim, unittest
import numpy as np
# Dummy function definitions to overload
def generic_func_1():
pass
def cuda_func_1():
pass
def generic_func_2():
pass
def cuda_func_2():
pass
def generic_calls_generic():
pass
def generic_calls_cuda():
pass
def cuda_calls_generic():
pass
def cuda_calls_cuda():
pass
def target_overloaded():
pass
def generic_calls_target_overloaded():
pass
def cuda_calls_target_overloaded():
pass
def target_overloaded_calls_target_overloaded():
pass
# To recognise which functions are resolved for a call, we identify each with a
# prime number. Each function called multiplies a value by its prime (starting
# with the value 1), and we can check that the result is as expected based on
# the final value after all multiplications.
GENERIC_FUNCTION_1 = 2
CUDA_FUNCTION_1 = 3
GENERIC_FUNCTION_2 = 5
CUDA_FUNCTION_2 = 7
GENERIC_CALLS_GENERIC = 11
GENERIC_CALLS_CUDA = 13
CUDA_CALLS_GENERIC = 17
CUDA_CALLS_CUDA = 19
GENERIC_TARGET_OL = 23
CUDA_TARGET_OL = 29
GENERIC_CALLS_TARGET_OL = 31
CUDA_CALLS_TARGET_OL = 37
GENERIC_TARGET_OL_CALLS_TARGET_OL = 41
CUDA_TARGET_OL_CALLS_TARGET_OL = 43
# Overload implementations
@overload(generic_func_1, target='generic')
def ol_generic_func_1(x):
def impl(x):
x[0] *= GENERIC_FUNCTION_1
return impl
@overload(cuda_func_1, target='cuda')
def METHOD_NAME(x):
def impl(x):
x[0] *= CUDA_FUNCTION_1
return impl
@overload(generic_func_2, target='generic')
def ol_generic_func_2(x):
def impl(x):
x[0] *= GENERIC_FUNCTION_2
return impl
@overload(cuda_func_2, target='cuda')
def ol_cuda_func(x):
def impl(x):
x[0] *= CUDA_FUNCTION_2
return impl
@overload(generic_calls_generic, target='generic')
def ol_generic_calls_generic(x):
def impl(x):
x[0] *= GENERIC_CALLS_GENERIC
generic_func_1(x)
return impl
@overload(generic_calls_cuda, target='generic')
def ol_generic_calls_cuda(x):
def impl(x):
x[0] *= GENERIC_CALLS_CUDA
cuda_func_1(x)
return impl
@overload(cuda_calls_generic, target='cuda')
def ol_cuda_calls_generic(x):
def impl(x):
x[0] *= CUDA_CALLS_GENERIC
generic_func_1(x)
return impl
@overload(cuda_calls_cuda, target='cuda')
def ol_cuda_calls_cuda(x):
def impl(x):
x[0] *= CUDA_CALLS_CUDA
cuda_func_1(x)
return impl
@overload(target_overloaded, target='generic')
def ol_target_overloaded_generic(x):
def impl(x):
x[0] *= GENERIC_TARGET_OL
return impl
@overload(target_overloaded, target='cuda')
def ol_target_overloaded_cuda(x):
def impl(x):
x[0] *= CUDA_TARGET_OL
return impl
@overload(generic_calls_target_overloaded, target='generic')
def ol_generic_calls_target_overloaded(x):
def impl(x):
x[0] *= GENERIC_CALLS_TARGET_OL
target_overloaded(x)
return impl
@overload(cuda_calls_target_overloaded, target='cuda')
def ol_cuda_calls_target_overloaded(x):
def impl(x):
x[0] *= CUDA_CALLS_TARGET_OL
target_overloaded(x)
return impl
@overload(target_overloaded_calls_target_overloaded, target='generic')
def ol_generic_calls_target_overloaded_generic(x):
def impl(x):
x[0] *= GENERIC_TARGET_OL_CALLS_TARGET_OL
target_overloaded(x)
return impl
@overload(target_overloaded_calls_target_overloaded, target='cuda')
def ol_generic_calls_target_overloaded_cuda(x):
def impl(x):
x[0] *= CUDA_TARGET_OL_CALLS_TARGET_OL
target_overloaded(x)
return impl
@skip_on_cudasim('Overloading not supported in cudasim')
class TestOverload(CUDATestCase):
def check_overload(self, kernel, expected):
x = np.ones(1, dtype=np.int32)
cuda.jit(kernel)[1, 1](x)
self.assertEqual(x[0], expected)
def check_overload_cpu(self, kernel, expected):
x = np.ones(1, dtype=np.int32)
njit(kernel)(x)
self.assertEqual(x[0], expected)
def test_generic(self):
def kernel(x):
generic_func_1(x)
expected = GENERIC_FUNCTION_1
self.check_overload(kernel, expected)
def test_cuda(self):
def kernel(x):
cuda_func_1(x)
expected = CUDA_FUNCTION_1
self.check_overload(kernel, expected)
def test_generic_and_cuda(self):
def kernel(x):
generic_func_1(x)
cuda_func_1(x)
expected = GENERIC_FUNCTION_1 * CUDA_FUNCTION_1
self.check_overload(kernel, expected)
def test_call_two_generic_calls(self):
def kernel(x):
generic_func_1(x)
generic_func_2(x)
expected = GENERIC_FUNCTION_1 * GENERIC_FUNCTION_2
self.check_overload(kernel, expected)
def test_call_two_cuda_calls(self):
def kernel(x):
cuda_func_1(x)
cuda_func_2(x)
expected = CUDA_FUNCTION_1 * CUDA_FUNCTION_2
self.check_overload(kernel, expected)
def test_generic_calls_generic(self):
def kernel(x):
generic_calls_generic(x)
expected = GENERIC_CALLS_GENERIC * GENERIC_FUNCTION_1
self.check_overload(kernel, expected)
def test_generic_calls_cuda(self):
def kernel(x):
generic_calls_cuda(x)
expected = GENERIC_CALLS_CUDA * CUDA_FUNCTION_1
self.check_overload(kernel, expected)
def test_cuda_calls_generic(self):
def kernel(x):
cuda_calls_generic(x)
expected = CUDA_CALLS_GENERIC * GENERIC_FUNCTION_1
self.check_overload(kernel, expected)
def test_cuda_calls_cuda(self):
def kernel(x):
cuda_calls_cuda(x)
expected = CUDA_CALLS_CUDA * CUDA_FUNCTION_1
self.check_overload(kernel, expected)
def test_call_target_overloaded(self):
def kernel(x):
target_overloaded(x)
expected = CUDA_TARGET_OL
self.check_overload(kernel, expected)
def test_generic_calls_target_overloaded(self):
def kernel(x):
generic_calls_target_overloaded(x)
expected = GENERIC_CALLS_TARGET_OL * CUDA_TARGET_OL
self.check_overload(kernel, expected)
def test_cuda_calls_target_overloaded(self):
def kernel(x):
cuda_calls_target_overloaded(x)
expected = CUDA_CALLS_TARGET_OL * CUDA_TARGET_OL
self.check_overload(kernel, expected)
def test_target_overloaded_calls_target_overloaded(self):
def kernel(x):
target_overloaded_calls_target_overloaded(x)
# Check the CUDA overloads are used on CUDA
expected = CUDA_TARGET_OL_CALLS_TARGET_OL * CUDA_TARGET_OL
self.check_overload(kernel, expected)
# Also check that the CPU overloads are used on the CPU
expected = GENERIC_TARGET_OL_CALLS_TARGET_OL * GENERIC_TARGET_OL
self.check_overload_cpu(kernel, expected)
if __name__ == '__main__':
unittest.main() |
3,076 | setup image | # Copyright (c) 2012-2022 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
from .input import InputLoader, GetInputObj, RegisterInputType
from .value import GetCurrentValue, CheckAllParams, GetAllParams, RegisterValueType
from .util import LoggerWrapper
from ..errors import GalSimConfigError, GalSimConfigValueError
from ..shear import Shear
from ..nfw_halo import NFWHalo
# This file adds input type nfw_halo and value types NFWHaloShear and NFWHaloMagnification.
class NFWLoader(InputLoader):
def METHOD_NAME(self, input_obj, config, base, logger=None):
# Just attach the logger to the input_obj so we can use it when evaluating values.
input_obj.logger = LoggerWrapper(logger)
# Register this as a valid input type
RegisterInputType('nfw_halo', NFWLoader(NFWHalo))
def _GenerateFromNFWHaloShear(config, base, value_type):
"""Return a shear calculated from an NFWHalo object.
"""
nfw_halo = GetInputObj('nfw_halo', config, base, 'NFWHaloShear')
logger = nfw_halo.logger
if 'uv_pos' not in base:
raise GalSimConfigError("NFWHaloShear requested, but no position defined.")
pos = base['uv_pos']
if 'gal' not in base or 'redshift' not in base['gal']:
raise GalSimConfigError("NFWHaloShear requested, but no gal.redshift defined.")
redshift = GetCurrentValue('redshift', base['gal'], float, base)
# There aren't any parameters for this, so just make sure num is the only (optional)
# one present.
CheckAllParams(config, opt={ 'num' : int })
g1,g2 = nfw_halo.getShear(pos,redshift)
try:
shear = Shear(g1=g1,g2=g2)
except Exception as e:
logger.warning('obj %d: Warning: NFWHalo shear (g1=%f, g2=%f) is invalid. '%(
base['obj_num'],g1,g2) + 'Using shear = 0.')
shear = Shear(g1=0,g2=0)
logger.debug('obj %d: NFWHalo shear = %s',base['obj_num'],shear)
return shear, False
def _GenerateFromNFWHaloMagnification(config, base, value_type):
"""Return a magnification calculated from an NFWHalo object.
"""
nfw_halo = GetInputObj('nfw_halo', config, base, 'NFWHaloMagnification')
logger = nfw_halo.logger
if 'uv_pos' not in base:
raise GalSimConfigError("NFWHaloMagnification requested, but no position defined.")
pos = base['uv_pos']
if 'gal' not in base or 'redshift' not in base['gal']:
raise GalSimConfigError("NFWHaloMagnification requested, but no gal.redshift defined.")
redshift = GetCurrentValue('redshift', base['gal'], float, base)
opt = { 'max_mu' : float, 'num' : int }
kwargs = GetAllParams(config, base, opt=opt)[0]
max_mu = kwargs.get('max_mu', 25.)
if not max_mu > 0.:
raise GalSimConfigValueError(
"Invalid max_mu for type = NFWHaloMagnification (must be > 0)", max_mu)
mu = nfw_halo.getMagnification(pos,redshift)
if mu < 0 or mu > max_mu:
logger.warning('obj %d: Warning: NFWHalo mu = %f means strong lensing. '%(
base['obj_num'],mu) + 'Using mu = %f'%max_mu)
mu = max_mu
logger.debug('obj %d: NFWHalo mu = %s',base['obj_num'],mu)
return mu, False
# Register these as valid value types
RegisterValueType('NFWHaloShear', _GenerateFromNFWHaloShear, [ Shear ],
input_type='nfw_halo')
RegisterValueType('NFWHaloMagnification', _GenerateFromNFWHaloMagnification, [ float ],
input_type='nfw_halo') |
3,077 | with carrier aggregation | """
Copyright 2022 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
import json
from datetime import datetime
from typing import List
from magma.db_service.models import DBCbsd, DBGrant, DBRequest
class DBCbsdBuilder:
def __init__(self):
self.cbsd = DBCbsd()
def build(self) -> DBCbsd:
return self.cbsd
def deleted(self):
self.cbsd.is_deleted = True
return self
def updated(self):
self.cbsd.should_deregister = True
return self
def relinquished(self):
self.cbsd.should_relinquish = True
return self
def with_id(self, db_id: int) -> DBCbsdBuilder:
self.cbsd.id = db_id
return self
def with_state(self, state_id: int) -> DBCbsdBuilder:
self.cbsd.state_id = state_id
return self
def with_registration(self, prefix: str) -> DBCbsdBuilder:
self.cbsd.cbsd_id = f'{prefix}_cbsd_id'
self.cbsd.user_id = f'{prefix}_user_id'
self.cbsd.fcc_id = f'{prefix}_fcc_id'
self.cbsd.cbsd_serial_number = f'{prefix}_serial_number'
return self
def with_eirp_capabilities(
self,
min_power: float, max_power: float,
no_ports: int,
) -> DBCbsdBuilder:
self.cbsd.min_power = min_power
self.cbsd.max_power = max_power
self.cbsd.number_of_ports = no_ports
return self
def with_single_step_enabled(self) -> DBCbsdBuilder:
self.cbsd.single_step_enabled = True
return self
def with_category(self, category: str) -> DBCbsdBuilder:
self.cbsd.cbsd_category = category
return self
def with_antenna_gain(
self,
antenna_gain_dbi: float,
) -> DBCbsdBuilder:
self.cbsd.antenna_gain = antenna_gain_dbi
return self
def with_installation_params(
self,
latitude_deg: float,
longitude_deg: float,
height_m: float,
height_type: str,
indoor_deployment: bool,
) -> DBCbsdBuilder:
self.cbsd.latitude_deg = latitude_deg
self.cbsd.longitude_deg = longitude_deg
self.cbsd.height_m = height_m
self.cbsd.height_type = height_type
self.cbsd.indoor_deployment = indoor_deployment
return self
def with_last_seen(self, last_seen: int) -> DBCbsdBuilder:
self.cbsd.last_seen = datetime.fromtimestamp(last_seen)
return self
def with_desired_state(self, desired_state_id: int) -> DBCbsdBuilder:
self.cbsd.desired_state_id = desired_state_id
return self
def with_preferences(self, bandwidth_mhz: int, frequencies_mhz: List[int]) -> DBCbsdBuilder:
self.cbsd.preferred_bandwidth_mhz = bandwidth_mhz
self.cbsd.preferred_frequencies_mhz = frequencies_mhz
return self
def with_available_frequencies(self, frequencies: List[int]):
self.cbsd.available_frequencies = frequencies
return self
def METHOD_NAME(self, enabled: bool) -> DBCbsdBuilder:
self.cbsd.carrier_aggregation_enabled = enabled
return self
def with_max_ibw(self, max_ibw_mhz: int) -> DBCbsdBuilder:
self.cbsd.max_ibw_mhz = max_ibw_mhz
return self
def with_grant_redundancy(self, enabled: bool) -> DBCbsdBuilder:
self.cbsd.grant_redundancy = enabled
return self
def with_grant(
self,
grant_id: str,
state_id: int,
hb_interval_sec: int,
last_hb_timestamp: int = None,
low_frequency: int = 3500,
high_frequency: int = 3700,
) -> DBCbsdBuilder:
last_hb_time = datetime.fromtimestamp(
last_hb_timestamp,
) if last_hb_timestamp else None
grant = DBGrant(
grant_id=grant_id,
state_id=state_id,
heartbeat_interval=hb_interval_sec,
last_heartbeat_request_time=last_hb_time,
low_frequency=low_frequency,
high_frequency=high_frequency,
max_eirp=0,
)
self.cbsd.grants.append(grant)
return self
def with_channel(
self,
low: int, high: int,
max_eirp: float = None,
) -> DBCbsdBuilder:
if not self.cbsd.channels:
# Default is set on commit, so it might be None at this point.
self.cbsd.channels = []
channel = {
"low_frequency": low,
"high_frequency": high,
"max_eirp": max_eirp,
}
self.cbsd.channels = self.cbsd.channels + [channel]
return self
def with_request(self, type_id: int, payload: str) -> DBCbsdBuilder:
request = DBRequest(
type_id=type_id,
payload=json.loads(payload),
)
self.cbsd.requests.append(request)
return self |
3,078 | setup battery component | import json
from unittest.mock import Mock
import pytest
import requests
import requests_mock
from modules.devices.tesla import bat
from modules.devices.tesla.device import Device, Tesla
from modules.common.component_state import BatState
from modules.devices.tesla.config import TeslaConfiguration
from test_utils.mock_ramdisk import MockRamdisk
sample_soe_json = """{"percentage":69.16}"""
sample_aggregates_json = """
{
"site":{
"last_communication_time":"2018-04-02T16:11:41.885377469-07:00",
"instant_power":-21.449996948242188,
"instant_reactive_power":-138.8300018310547,
"instant_apparent_power":140.47729986545957,
"frequency":60.060001373291016,
"energy_exported":1136916.6875,
"energy_imported":3276432.6625,
"instant_average_voltage":239.81999969482422,
"instant_total_current":0,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
},
"battery":{
"last_communication_time":"2018-04-02T16:11:41.89022247-07:00",
"instant_power":-2350,
"instant_reactive_power":0,
"instant_apparent_power":2350,
"frequency":60.033,
"energy_exported":1169030,
"energy_imported":1638140,
"instant_average_voltage":239.10000000000002,
"instant_total_current":45.8,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
},
"load":{
"last_communication_time":"2018-04-02T16:11:41.885377469-07:00",
"instant_power":1546.2712597712405,
"instant_reactive_power":-71.43153973801415,
"instant_apparent_power":1547.920305979569,
"frequency":60.060001373291016,
"energy_exported":0,
"energy_imported":7191016.994444443,
"instant_average_voltage":239.81999969482422,
"instant_total_current":6.44763264839839,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
},
"solar":{
"last_communication_time":"2018-04-02T16:11:41.885541803-07:00",
"instant_power":3906.1700439453125,
"instant_reactive_power":53.26999855041504,
"instant_apparent_power":3906.533259164868,
"frequency":60.060001373291016,
"energy_exported":5534272.949724403,
"energy_imported":13661.930279959455,
"instant_average_voltage":239.8699951171875,
"instant_total_current":0,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
},
"busway":{
"last_communication_time":"0001-01-01T00:00:00Z",
"instant_power":0,
"instant_reactive_power":0,
"instant_apparent_power":0,
"frequency":0,
"energy_exported":0,
"energy_imported":0,
"instant_average_voltage":0,
"instant_total_current":0,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
},
"frequency":{
"last_communication_time":"0001-01-01T00:00:00Z",
"instant_power":0,
"instant_reactive_power":0,
"instant_apparent_power":0,
"frequency":0,
"energy_exported":0,
"energy_imported":0,
"instant_average_voltage":0,
"instant_total_current":0,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
},
"generator":{
"last_communication_time":"0001-01-01T00:00:00Z",
"instant_power":0,
"instant_reactive_power":0,
"instant_apparent_power":0,
"frequency":0,
"energy_exported":0,
"energy_imported":0,
"instant_average_voltage":0,
"instant_total_current":0,
"i_a_current":0,
"i_b_current":0,
"i_c_current":0
}
}"""
def METHOD_NAME() -> Device:
device_config = Tesla(configuration=TeslaConfiguration(
ip_address="sample-address",
email="sample@mail.com",
password="some password"))
dev = Device(device_config)
dev.add_component(bat.component_descriptor.configuration_factory())
return dev
def match_cookie_ok(request: requests.PreparedRequest):
return "AuthCookie=auth-cookie" in request.headers['Cookie']
def match_cookie_reject(request: requests.PreparedRequest):
return not match_cookie_ok(request)
@pytest.fixture
def mock_ramdisk(monkeypatch):
return MockRamdisk(monkeypatch)
API_URL = "https://sample-address/api"
COOKIE_FILE_NAME = "powerwall_cookie.txt"
def assert_battery_state_correct(state: BatState):
assert state.soc == 69.16
assert state.power == 2350
assert state.imported == 1638140
assert state.exported == 1169030
def test_powerwall_update_if_cookie_cached(monkeypatch, requests_mock: requests_mock.Mocker, mock_ramdisk: MockRamdisk):
# setup
mock_bat_value_store = Mock()
monkeypatch.setattr(bat, "get_bat_value_store", Mock(return_value=mock_bat_value_store))
requests_mock.get("https://sample-address/api/meters/aggregates", text=sample_aggregates_json,
additional_matcher=match_cookie_ok)
requests_mock.get("https://sample-address/api/system_status/soe", text=sample_soe_json,
additional_matcher=match_cookie_ok)
mock_ramdisk[COOKIE_FILE_NAME] = """{"AuthCookie": "auth-cookie", "UserRecord": "user-record"}"""
# execution
METHOD_NAME().update()
# evaluation
assert_battery_state_correct(mock_bat_value_store.set.call_args[0][0])
@pytest.mark.parametrize(
"cookie_file", [
pytest.param("""{"AuthCookie": "reject-me", "UserRecord": "user-record"}""", id="expired cookie"),
pytest.param("""{this is not valid json}""", id="garbage file"),
pytest.param(None, id="no cookie file")
]
)
def test_powerwall_update_retrieves_new_cookie_if_cookie_rejected(monkeypatch,
requests_mock: requests_mock.Mocker,
mock_ramdisk: MockRamdisk,
cookie_file: str):
# setup
mock_bat_value_store = Mock()
monkeypatch.setattr(bat, "get_bat_value_store", Mock(return_value=mock_bat_value_store))
requests_mock.post(API_URL + "/login/Basic", cookies={"AuthCookie": "auth-cookie", "UserRecord": "user-record"})
requests_mock.get(API_URL + "/meters/aggregates", status_code=401, additional_matcher=match_cookie_reject)
requests_mock.get(API_URL + "/system_status/soe", status_code=401, additional_matcher=match_cookie_reject)
requests_mock.get(API_URL + "/meters/aggregates", text=sample_aggregates_json, additional_matcher=match_cookie_ok)
requests_mock.get(API_URL + "/system_status/soe", text=sample_soe_json, additional_matcher=match_cookie_ok)
if cookie_file is not None:
mock_ramdisk[COOKIE_FILE_NAME] = cookie_file
# execution
METHOD_NAME().update()
# evaluation
assert json.loads(mock_ramdisk[COOKIE_FILE_NAME]) == {"AuthCookie": "auth-cookie", "UserRecord": "user-record"}
assert_battery_state_correct(mock_bat_value_store.set.call_args[0][0]) |
3,079 | verify instance interface | import unittest
import builtins
import warnings
import os
from platform import system as platform_system
class ExceptionClassTests(unittest.TestCase):
"""Tests for anything relating to exception objects themselves (e.g.,
inheritance hierarchy)"""
def test_builtins_new_style(self):
self.assertTrue(issubclass(Exception, object))
def METHOD_NAME(self, ins):
for attr in ("args", "__str__", "__repr__"):
self.assertTrue(hasattr(ins, attr),
"%s missing %s attribute" %
(ins.__class__.__name__, attr))
def test_inheritance(self):
# Make sure the inheritance hierarchy matches the documentation
exc_set = set()
for object_ in builtins.__dict__.values():
try:
if issubclass(object_, BaseException):
exc_set.add(object_.__name__)
except TypeError:
pass
inheritance_tree = open(os.path.join(os.path.split(__file__)[0],
'exception_hierarchy.txt'))
try:
superclass_name = inheritance_tree.readline().rstrip()
try:
last_exc = getattr(builtins, superclass_name)
except AttributeError:
self.fail("base class %s not a built-in" % superclass_name)
self.assertIn(superclass_name, exc_set,
'%s not found' % superclass_name)
exc_set.discard(superclass_name)
superclasses = [] # Loop will insert base exception
last_depth = 0
for exc_line in inheritance_tree:
exc_line = exc_line.rstrip()
depth = exc_line.rindex('-')
exc_name = exc_line[depth+2:] # Slice past space
if '(' in exc_name:
paren_index = exc_name.index('(')
platform_name = exc_name[paren_index+1:-1]
exc_name = exc_name[:paren_index-1] # Slice off space
if platform_system() != platform_name:
exc_set.discard(exc_name)
continue
if '[' in exc_name:
left_bracket = exc_name.index('[')
exc_name = exc_name[:left_bracket-1] # cover space
try:
exc = getattr(builtins, exc_name)
except AttributeError:
self.fail("%s not a built-in exception" % exc_name)
if last_depth < depth:
superclasses.append((last_depth, last_exc))
elif last_depth > depth:
while superclasses[-1][0] >= depth:
superclasses.pop()
self.assertTrue(issubclass(exc, superclasses[-1][1]),
"%s is not a subclass of %s" % (exc.__name__,
superclasses[-1][1].__name__))
try: # Some exceptions require arguments; just skip them
self.METHOD_NAME(exc())
except TypeError:
pass
self.assertIn(exc_name, exc_set)
exc_set.discard(exc_name)
last_exc = exc
last_depth = depth
finally:
inheritance_tree.close()
self.assertEqual(len(exc_set), 0, "%s not accounted for" % exc_set)
interface_tests = ("length", "args", "str", "repr")
def interface_test_driver(self, results):
for test_name, (given, expected) in zip(self.interface_tests, results):
self.assertEqual(given, expected, "%s: %s != %s" % (test_name,
given, expected))
def test_interface_single_arg(self):
# Make sure interface works properly when given a single argument
arg = "spam"
exc = Exception(arg)
results = ([len(exc.args), 1], [exc.args[0], arg],
[str(exc), str(arg)],
[repr(exc), exc.__class__.__name__ + repr(exc.args)])
self.interface_test_driver(results)
def test_interface_multi_arg(self):
# Make sure interface correct when multiple arguments given
arg_count = 3
args = tuple(range(arg_count))
exc = Exception(*args)
results = ([len(exc.args), arg_count], [exc.args, args],
[str(exc), str(args)],
[repr(exc), exc.__class__.__name__ + repr(exc.args)])
self.interface_test_driver(results)
def test_interface_no_arg(self):
# Make sure that with no args that interface is correct
exc = Exception()
results = ([len(exc.args), 0], [exc.args, tuple()],
[str(exc), ''],
[repr(exc), exc.__class__.__name__ + '()'])
self.interface_test_driver(results)
class UsageTests(unittest.TestCase):
"""Test usage of exceptions"""
def raise_fails(self, object_):
"""Make sure that raising 'object_' triggers a TypeError."""
try:
raise object_
except TypeError:
return # What is expected.
self.fail("TypeError expected for raising %s" % type(object_))
def catch_fails(self, object_):
"""Catching 'object_' should raise a TypeError."""
try:
try:
raise Exception
except object_:
pass
except TypeError:
pass
except Exception:
self.fail("TypeError expected when catching %s" % type(object_))
try:
try:
raise Exception
except (object_,):
pass
except TypeError:
return
except Exception:
self.fail("TypeError expected when catching %s as specified in a "
"tuple" % type(object_))
def test_raise_new_style_non_exception(self):
# You cannot raise a new-style class that does not inherit from
# BaseException; the ability was not possible until BaseException's
# introduction so no need to support new-style objects that do not
# inherit from it.
class NewStyleClass(object):
pass
self.raise_fails(NewStyleClass)
self.raise_fails(NewStyleClass())
def test_raise_string(self):
# Raising a string raises TypeError.
self.raise_fails("spam")
def test_catch_non_BaseException(self):
# Tryinng to catch an object that does not inherit from BaseException
# is not allowed.
class NonBaseException(object):
pass
self.catch_fails(NonBaseException)
self.catch_fails(NonBaseException())
def test_catch_BaseException_instance(self):
# Catching an instance of a BaseException subclass won't work.
self.catch_fails(BaseException())
def test_catch_string(self):
# Catching a string is bad.
self.catch_fails("spam")
if __name__ == '__main__':
unittest.main() |
3,080 | posterior | from bayes_opt import BayesianOptimization
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import mlab
from matplotlib import gridspec
# https://github.com/fmfn/BayesianOptimization/issues/18
def unique_rows(a):
"""
A functions to trim repeated rows that may appear when optimizing.
This is necessary to avoid the sklearn GP object from breaking
:param a: array to trim repeated rows from
:return: mask of unique rows
"""
# Sort array and kep track of where things should go back to
order = np.lexsort(a.T)
reorder = np.argsort(order)
a = a[order]
diff = np.diff(a, axis=0)
ui = np.ones(len(a), 'bool')
ui[1:] = (diff != 0).any(axis=1)
return ui[reorder]
def target(x, y):
a = np.exp(-( (x - 2)**2/0.7 + (y - 4)**2/1.2) + (x - 2)*(y - 4)/1.6 )
b = np.exp(-( (x - 4)**2/3 + (y - 2)**2/2.) )
c = np.exp(-( (x - 4)**2/0.5 + (y - 4)**2/0.5) + (x - 4)*(y - 4)/0.5 )
d = np.sin(3.1415 * x)
e = np.exp(-( (x - 5.5)**2/0.5 + (y - 5.5)**2/.5) )
return 2*a + b - c + 0.17 * d + 2*e
n = 1e5
x = np.linspace(0, 6, 300)
y = np.linspace(0, 8, 300)
X, Y = np.meshgrid(x, y)
x = X.ravel()
y = Y.ravel()
X = np.vstack([x, y]).T[:, [1, 0]]
z = target(x, y)
print(X, X.shape)
print((max(z)))
print((min(z)))
fig, axis = plt.subplots(1, 1, figsize=(14, 10))
gridsize=150
im = axis.hexbin(x, y, C=z, gridsize=gridsize, cmap=cm.jet, bins=None, vmin=-0.9, vmax=2.1)
axis.axis([x.min(), x.max(), y.min(), y.max()])
cb = fig.colorbar(im, )
cb.set_label('Value')
def METHOD_NAME(bo, X):
ur = unique_rows(bo.X)
bo.gp.fit(bo.X[ur], bo.Y[ur])
mu, sigma2 = bo.gp.predict(X, return_std=True)
return mu, np.sqrt(sigma2), bo.util.utility(X, bo.gp, bo.Y.max())
def plot_2d(name=None):
mu, s, ut = METHOD_NAME(bo, X)
fig, ax = plt.subplots(2, 2, figsize=(14, 10))
gridsize=150
# fig.suptitle('Bayesian Optimization in Action', fontdict={'size':30})
# GP regression output
ax[0][0].set_title('Gausian Process Predicted Mean', fontdict={'size':15})
im00 = ax[0][0].hexbin(x, y, C=mu, gridsize=gridsize, cmap=cm.jet, bins=None, vmin=-0.9, vmax=2.1)
ax[0][0].axis([x.min(), x.max(), y.min(), y.max()])
ax[0][0].plot(bo.X[:, 1], bo.X[:, 0], 'D', markersize=4, color='k', label='Observations')
ax[0][1].set_title('Target Function', fontdict={'size':15})
im10 = ax[0][1].hexbin(x, y, C=z, gridsize=gridsize, cmap=cm.jet, bins=None, vmin=-0.9, vmax=2.1)
ax[0][1].axis([x.min(), x.max(), y.min(), y.max()])
ax[0][1].plot(bo.X[:, 1], bo.X[:, 0], 'D', markersize=4, color='k')
ax[1][0].set_title('Gausian Process Variance', fontdict={'size':15})
im01 = ax[1][0].hexbin(x, y, C=s, gridsize=gridsize, cmap=cm.jet, bins=None, vmin=0, vmax=1)
ax[1][0].axis([x.min(), x.max(), y.min(), y.max()])
ax[1][1].set_title('Acquisition Function', fontdict={'size':15})
im11 = ax[1][1].hexbin(x, y, C=ut, gridsize=gridsize, cmap=cm.jet, bins=None, vmin=0, vmax=8)
np.where(ut.reshape((300, 300)) == ut.max())[0]
np.where(ut.reshape((300, 300)) == ut.max())[1]
ax[1][1].plot([np.where(ut.reshape((300, 300)) == ut.max())[1]/50.,
np.where(ut.reshape((300, 300)) == ut.max())[1]/50.],
[0, 6],
'k-', lw=2, color='k')
ax[1][1].plot([0, 6],
[np.where(ut.reshape((300, 300)) == ut.max())[0]/50.,
np.where(ut.reshape((300, 300)) == ut.max())[0]/50.],
'k-', lw=2, color='k')
ax[1][1].axis([x.min(), x.max(), y.min(), y.max()])
for im, axis in zip([im00, im10, im01, im11], ax.flatten()):
cb = fig.colorbar(im, ax=axis)
# cb.set_label('Value')
if name is None:
name = '_'
plt.tight_layout()
# Save or show figure?
# fig.savefig('bo_eg_' + name + '.png')
plt.show()
plt.close(fig)
bo = BayesianOptimization(target, {'x': (0, 6), 'y': (0, 8)})
# gp_params = {'corr': 'absolute_exponential'}#, 'nugget': 1e-9}
bo.maximize(init_points=5, n_iter=0, acq='ucb', kappa=10)
plot_2d("{:03}".format(len(bo.X)))
# Turn interactive plotting off
plt.ioff()
for i in range(50):
bo.maximize(init_points=0, n_iter=1, acq='ucb', kappa=10, **gp_params)
plot_2d("{:03}".format(len(bo.X))) |
3,081 | get latest revision | import os.path
import sys
from numbers import Integral as int_types
from .error import err_add
def attrsearch(tag, attr, in_list):
for x in in_list:
if getattr(x, attr) == tag:
return x
return None
def keysearch(tag, n, in_list):
for x in in_list:
if x[n] == tag:
return x
return None
def dictsearch(val, in_dict):
for key in in_dict:
if in_dict[key] == val:
return key
return None
def is_prefixed(identifier):
return isinstance(identifier, tuple) and len(identifier) == 2
def is_local(identifier):
return isinstance(identifier, str)
def split_identifier(identifier):
idx = identifier.find(":")
if idx == -1:
return None, identifier
return identifier[:idx], identifier[idx + 1:]
def keyword_to_str(keyword):
if keyword == '__tmp_augment__':
return "undefined"
elif is_prefixed(keyword):
(prefix, keyword) = keyword
return prefix + ":" + keyword
else:
return keyword
def guess_format(text):
"""Guess YANG/YIN format
If the first non-whitespace character is '<' then it is XML.
Return 'yang' or 'yin'"""
for char in text:
if not char.isspace():
if char == '<':
return 'yin'
break
return 'yang'
def METHOD_NAME(module):
revisions = [revision.arg for revision in module.search('revision')]
return max(revisions) if revisions else 'unknown'
def prefix_to_modulename_and_revision(module, prefix, pos, errors):
if prefix == '':
return module.arg, None
if prefix == module.i_prefix:
return module.arg, None
try:
(modulename, revision) = module.i_prefixes[prefix]
except KeyError:
if prefix not in module.i_missing_prefixes:
err_add(errors, pos, 'PREFIX_NOT_DEFINED', prefix)
module.i_missing_prefixes[prefix] = True
return None, None
# remove the prefix from the unused
if prefix in module.i_unused_prefixes:
del module.i_unused_prefixes[prefix]
return modulename, revision
def prefix_to_module(module, prefix, pos, errors):
if prefix == '':
return module
if prefix == module.i_prefix:
return module
modulename, revision = \
prefix_to_modulename_and_revision(module, prefix, pos, errors)
if modulename is None:
return None
return module.i_ctx.get_module(modulename, revision)
def unique_prefixes(context):
"""Return a dictionary with unique prefixes for modules in `context`.
Keys are 'module' statements and values are prefixes,
disambiguated where necessary.
"""
modules = sorted(context.modules.values(), key=lambda module: module.arg)
prefixes = set()
conflicts = []
result = {}
for module in modules:
if module.keyword == "submodule":
continue
prefix = module.i_prefix
if prefix in prefixes:
conflicts.append(module)
else:
result[module] = prefix
prefixes.add(prefix)
for module in conflicts:
prefix = module.i_prefix
append = 0
while True:
append += 1
candidate = "%s%x" % (prefix, append)
if candidate not in prefixes:
break
result[module] = candidate
prefixes.add(candidate)
return result
files_read = {}
def report_file_read(filename, extra=None):
realpath = os.path.realpath(filename)
read = "READ" if realpath in files_read else "read"
extra = (" " + extra) if extra else ""
sys.stderr.write("# %s %s%s\n" % (read, filename, extra))
files_read[realpath] = True
def search_data_node(children, modulename, identifier, last_skipped = None):
skip = ['choice', 'case', 'input', 'output']
if last_skipped is not None:
skip.append(last_skipped)
for child in children:
if child.keyword in skip:
r = search_data_node(child.i_children,
modulename, identifier)
if r is not None:
return r
elif ((child.arg == identifier) and
(child.i_module.i_modulename == modulename)):
return child
return None
def closest_ancestor_data_node(node):
if node.keyword in ['choice', 'case']:
return closest_ancestor_data_node(node.parent)
return node
def data_node_up(node):
skip = ['choice', 'case', 'input', 'output']
p = node.parent
if node.keyword in skip:
return data_node_up(p)
if p and p.keyword in skip:
return closest_ancestor_data_node(p)
return p |
3,082 | run segments | import os
import unittest
import cle
from cle.address_translator import AT
from cle.backends import Section, Segment
TESTS_BASE = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.path.join("..", "..", "binaries", "tests"),
)
groundtruth = {
("x86_64", "allcmps"): {
"sections": [
Section("", 0x0, 0x0, 0x0),
Section(".interp", 0x238, 0x400238, 0x1C),
Section(".note.ABI-tag", 0x254, 0x400254, 0x20),
Section(".note.gnu.build-id", 0x274, 0x400274, 0x24),
Section(".gnu.hash", 0x298, 0x400298, 0x1C),
Section(".dynsym", 0x2B8, 0x4002B8, 0x48),
Section(".dynstr", 0x300, 0x400300, 0x38),
Section(".gnu.version", 0x338, 0x400338, 0x6),
Section(".gnu.version_r", 0x340, 0x400340, 0x20),
Section(".rela.dyn", 0x360, 0x400360, 0x18),
Section(".rela.plt", 0x378, 0x400378, 0x30),
Section(".init", 0x3A8, 0x4003A8, 0x1A),
Section(".plt", 0x3D0, 0x4003D0, 0x30),
Section(".text", 0x400, 0x400400, 0x2C4),
Section(".fini", 0x6C4, 0x4006C4, 0x9),
Section(".rodata", 0x6D0, 0x4006D0, 0x4),
Section(".eh_frame_hdr", 0x6D4, 0x4006D4, 0x3C),
Section(".eh_frame", 0x710, 0x400710, 0xF4),
Section(".init_array", 0xE10, 0x600E10, 0x8),
Section(".fini_array", 0xE18, 0x600E18, 0x8),
Section(".jcr", 0xE20, 0x600E20, 0x8),
Section(".dynamic", 0xE28, 0x600E28, 0x1D0),
Section(".got", 0xFF8, 0x600FF8, 0x8),
Section(".got.plt", 0x1000, 0x601000, 0x28),
Section(".data", 0x1028, 0x601028, 0x10),
Section(".bss", 0x1038, 0x601038, 0x8),
Section(".comment", 0x1038, 0x0, 0x2A),
Section(".shstrtab", 0x1062, 0x0, 0x108),
Section(".symtab", 0x18F0, 0x0, 0x630),
Section(".strtab", 0x1F20, 0x0, 0x232),
],
"segments": [
Segment(0, 0x400000, 0x804, 0x804),
Segment(0xE10, 0x600E10, 0x1F0, 0x1F0),
Segment(0x1000, 0x601000, 0x38, 0x40),
],
}
}
class TestRunSections(unittest.TestCase):
def _run_sections(self, arch, filename, sections):
binary_path = os.path.join(TESTS_BASE, arch, filename)
ld = cle.Loader(binary_path, auto_load_libs=False)
self.assertEqual(len(ld.main_object.sections), len(sections))
for i, section in enumerate(ld.main_object.sections):
self.assertEqual(section.name, sections[i].name)
self.assertEqual(section.offset, sections[i].offset)
self.assertEqual(AT.from_mva(section.vaddr, ld.main_object).to_lva(), sections[i].vaddr)
self.assertEqual(section.memsize, sections[i].memsize)
# address lookups
self.assertIsNone(ld.main_object.sections.find_region_containing(-1))
# skip all sections that are not mapped into memory
mapped_sections = [section for section in sections if section.vaddr != 0]
for section in mapped_sections:
self.assertEqual(ld.main_object.find_section_containing(section.vaddr).name, section.name)
self.assertEqual(
ld.main_object.sections.find_region_containing(section.vaddr).name,
section.name,
)
if section.memsize > 0:
self.assertEqual(
ld.main_object.find_section_containing(section.vaddr + 1).name,
section.name,
)
self.assertEqual(
ld.main_object.sections.find_region_containing(section.vaddr + 1).name,
section.name,
)
self.assertEqual(
ld.main_object.find_section_containing(section.vaddr + section.memsize - 1).name,
section.name,
)
self.assertEqual(
ld.main_object.sections.find_region_containing(section.vaddr + section.memsize - 1).name,
section.name,
)
for i in range(len(mapped_sections) - 1):
sec_a, sec_b = mapped_sections[i], mapped_sections[i + 1]
if sec_a.vaddr + sec_a.memsize < sec_b.vaddr:
# there is a gap between sec_a and sec_b
for j in range(min(sec_b.vaddr - (sec_a.vaddr + sec_a.memsize), 20)):
a = sec_a.vaddr + sec_a.memsize + j
self.assertIsNone(ld.main_object.find_section_containing(a))
self.assertIsNone(ld.main_object.sections.find_region_containing(a))
self.assertIsNone(ld.main_object.find_section_containing(0xFFFFFFFF), None)
def METHOD_NAME(self, arch, filename, segments):
binary_path = os.path.join(TESTS_BASE, arch, filename)
ld = cle.Loader(binary_path, auto_load_libs=False)
self.assertEqual(len(ld.main_object.segments), len(segments))
for i, segment in enumerate(ld.main_object.segments):
self.assertEqual(segment.offset, segments[i].offset)
self.assertEqual(segment.vaddr, segments[i].vaddr)
self.assertEqual(segment.memsize, segments[i].memsize)
self.assertEqual(segment.filesize, segments[i].filesize)
# address lookups
self.assertIsNone(ld.main_object.segments.find_region_containing(-1))
# skip all segments that are not mapped into memory
mapped_segments = [segment for segment in segments if segment.vaddr != 0]
for segment in mapped_segments:
self.assertEqual(
ld.main_object.find_segment_containing(segment.vaddr).vaddr,
segment.vaddr,
)
self.assertEqual(
ld.main_object.segments.find_region_containing(segment.vaddr).vaddr,
segment.vaddr,
)
if segment.memsize > 0:
self.assertEqual(
ld.main_object.find_segment_containing(segment.vaddr + 1).vaddr,
segment.vaddr,
)
self.assertEqual(
ld.main_object.segments.find_region_containing(segment.vaddr + 1).vaddr,
segment.vaddr,
)
self.assertEqual(
ld.main_object.find_segment_containing(segment.vaddr + segment.memsize - 1).vaddr,
segment.vaddr,
)
self.assertEqual(
ld.main_object.segments.find_region_containing(segment.vaddr + segment.memsize - 1).vaddr,
segment.vaddr,
)
for i in range(len(mapped_segments) - 1):
seg_a, seg_b = mapped_segments[i], mapped_segments[i + 1]
if seg_a.vaddr + seg_a.memsize < seg_b.vaddr:
# there is a gap between seg_a and seg_b
for j in range(min(seg_b.vaddr - (seg_a.vaddr + seg_a.memsize), 20)):
a = seg_a.vaddr + seg_a.memsize + j
self.assertIsNone(ld.main_object.find_segment_containing(a))
self.assertIsNone(ld.main_object.segments.find_region_containing(a))
self.assertIsNone(ld.main_object.find_segment_containing(0xFFFFFFFF), None)
def test_sections(self):
for (arch, filename), data in groundtruth.items():
self._run_sections(arch, filename, data["sections"])
def test_segments(self):
for (arch, filename), data in groundtruth.items():
self.METHOD_NAME(arch, filename, data["segments"])
if __name__ == "__main__":
unittest.main() |
3,083 | unused pytest runtest protocol | import pytest
import spytest.framework as stf
from utilities.common import get_proc_name
def trace(fmt, *args):
if args:
stf.dtrace(fmt % args)
else:
stf.dtrace(fmt)
def unused_pytest_collect_file(parent, path):
trace("\n%s: start", get_proc_name())
trace("{} {}".format(parent, path))
trace("%s: end\n", get_proc_name())
def pytest_itemcollected(item):
trace("\n%s: start", get_proc_name())
trace("{} {} {}".format(item.name, item.fspath, item.nodeid))
stf.collect_test(item)
trace("%s: end\n", get_proc_name())
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(session, config, items):
trace("\n%s: start", get_proc_name())
trace("{}".format(items))
stf.modify_tests(config, items)
trace("%s: end\n", get_proc_name())
@pytest.hookimpl(trylast=True)
def pytest_generate_tests(metafunc):
trace("\n%s: start", get_proc_name())
trace("{}".format(metafunc))
stf.generate_tests(metafunc.config, metafunc)
trace("%s: end\n", get_proc_name())
def unused_pytest_runtest_logstart(nodeid, location):
trace("\n%s: start", get_proc_name())
trace("{} {}".format(nodeid, location))
trace("%s: end\n", get_proc_name())
# this gets called in xdist for every test completion
def pytest_runtest_logreport(report):
trace("\n%s: start", get_proc_name())
trace("{}".format(report))
stf.log_report(report)
trace("%s: end\n", get_proc_name())
def pytest_runtest_makereport(item, call):
trace("\n%s: start", get_proc_name())
trace("{} {}".format(item, call))
stf.make_report(item, call)
trace("%s: end\n", get_proc_name())
def unused_pytest_runtest_setup(item):
trace("\n%s: start", get_proc_name())
trace("{}".format(item))
trace("%s: end\n", get_proc_name())
def unused_pytest_runtest_call(item):
trace("\n%s: start", get_proc_name())
trace("{}".format(item))
trace("%s: end\n", get_proc_name())
@pytest.hookspec(firstresult=True)
def METHOD_NAME(item, nextitem):
print("\n%s: start", get_proc_name())
print("{}".format(item))
print("{}".format(nextitem))
print("%s: end\n", get_proc_name())
def pytest_addoption(parser):
trace("\n%s: start", get_proc_name())
stf.add_options(parser)
trace("%s: end\n", get_proc_name())
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
trace("\n%s: start", get_proc_name())
trace("{}".format(config))
stf.configure(config)
trace("%s: end\n", get_proc_name())
def pytest_unconfigure(config):
trace("\n%s: start", get_proc_name())
trace("{}".format(config))
stf.unconfigure(config)
trace("%s: end\n", get_proc_name())
@pytest.hookimpl(tryfirst=True)
def pytest_xdist_setupnodes(config, specs):
trace("\n%s: start", get_proc_name())
trace("{}".format(config))
stf.configure_nodes(config, specs)
trace("%s: end\n", get_proc_name())
def pytest_configure_node(node):
trace("\n%s: start", get_proc_name())
trace("{}".format(node))
stf.configure_node(node)
trace("%s: end\n", get_proc_name())
def pytest_xdist_newgateway(gateway):
trace("\n%s: start", get_proc_name())
trace("{}".format(gateway))
stf.begin_node(gateway)
trace("%s: end\n", get_proc_name())
def pytest_testnodedown(node, error):
trace("\n%s: start", get_proc_name())
trace("{} {}".format(node, error))
stf.finish_node(node, error)
trace("%s: end\n", get_proc_name())
def pytest_exception_interact(node, call, report):
trace("\n%s: start", get_proc_name())
if report.failed:
stf.log_test_exception(call.excinfo)
trace("%s: end\n", get_proc_name())
def pytest_xdist_make_scheduler(config, log):
trace("\n%s: start", get_proc_name())
trace("{}".format(config))
rv = stf.make_scheduler(config, log)
trace("%s: end\n", get_proc_name())
return rv
@pytest.hookimpl(hookwrapper=True)
def pytest_fixture_setup(fixturedef, request):
trace("\n%s: start", get_proc_name())
trace("{}".format(fixturedef))
trace("{}".format(request))
stf.fixture_setup(fixturedef, request)
yield
stf.fixture_setup_finish(fixturedef, request)
trace("\n%s: end", get_proc_name())
trace("{}".format(fixturedef))
trace("{}".format(request))
@pytest.hookimpl(tryfirst=True)
@pytest.hookspec(firstresult=True)
def unused_pytest_fixture_setup(fixturedef, request):
trace("\n%s: start", get_proc_name())
trace("{}".format(fixturedef))
trace("{}".format(request))
rv = stf.fixture_setup(fixturedef, request)
return rv
def pytest_fixture_post_finalizer(fixturedef, request):
trace("\n%s: start", get_proc_name())
trace("{}".format(fixturedef))
trace("{}".format(request))
stf.fixture_post_finalizer(fixturedef, request)
trace("%s: end\n", get_proc_name())
def pytest_sessionstart(session):
trace("\n%s: start", get_proc_name())
trace("{}".format(session))
stf.session_start(session)
trace("%s: end\n", get_proc_name())
def pytest_sessionfinish(session, exitstatus):
trace("\n%s: start", get_proc_name())
trace("{}".format(session))
trace("{}".format(exitstatus))
stf.session_finish(session, exitstatus)
trace("%s: end\n", get_proc_name())
def unused_pytest_keyboard_interrupt(excinfo):
trace("\n%s: start", get_proc_name())
trace("{}".format(excinfo))
trace("%s: end\n", get_proc_name())
@pytest.hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem):
trace("\n%s: prolog", get_proc_name())
stf.pyfunc_call(pyfuncitem, False)
yield
stf.pyfunc_call(pyfuncitem, True)
trace("\n%s: epilog", get_proc_name())
@pytest.fixture(autouse=True)
def global_repeat_request(request):
""" repeat hook """
trace("\n----------global repeat start------------\n")
rv = stf.global_repeat_request(request)
trace("\n----------global repeat end------------\n")
return rv
@pytest.fixture(scope="session", autouse=True)
def global_session_request(request):
""" session hook """
trace("\n----------global session start------------\n")
stf.fixture_callback(request, "session", False)
yield
stf.fixture_callback(request, "session", True)
trace("\n----------global session end------------\n")
@pytest.fixture(scope="module", autouse=True)
def global_module_hook(request):
""" common module hook """
trace("\n----------global module start------------\n")
rv = stf.fixture_callback(request, "module", False)
if rv:
return rv
def fin():
rv = stf.fixture_callback(request, "module", True)
trace("\n----------global module end------------\n")
return rv
request.addfinalizer(fin)
@pytest.fixture(scope="module", autouse=True)
def global_module_hook_addl(request):
""" additional module hook """
trace("\n----------global module addl start------------\n")
yield
trace("\n----------global module addl end------------\n")
@pytest.fixture(scope="function", autouse=True)
def global_function_hook(request):
""" common function hook """
trace("\n----------global test start------------\n")
stf.fixture_callback(request, "function", False)
yield
stf.fixture_callback(request, "function", True)
trace("\n----------global test end------------\n")
def pytest_internalerror(excrepr, excinfo):
trace("\n%s: start", get_proc_name())
trace("{} {}".format(excrepr, excinfo))
trace("%s: end\n", get_proc_name())
collect_fails = []
def pytest_collectreport(report):
if report.failed:
collect_fails.append(report.nodeid)
def pytest_report_collectionfinish(config, startdir, items):
if collect_fails:
for fail in collect_fails:
stf.collect_fail(fail)
# raise pytest.UsageError("Errors during collection, aborting") |
3,084 | test status | """
integration tests for mac_service
"""
import plistlib
import pytest
import salt.utils.files
from tests.support.case import ModuleCase
@pytest.mark.skip_if_not_root
@pytest.mark.skip_if_binaries_missing("launchctl", "plutil")
@pytest.mark.skip_unless_on_darwin
class MacServiceModuleTest(ModuleCase):
"""
Validate the mac_service module
"""
SERVICE_NAME = "com.salt.integration.test"
SERVICE_PATH = "/Library/LaunchDaemons/com.salt.integration.test.plist"
def setUp(self):
"""
setup our test launch service.
"""
service_data = {
"KeepAlive": True,
"Label": self.SERVICE_NAME,
"ProgramArguments": ["/bin/sleep", "1000"],
"RunAtLoad": True,
}
with salt.utils.files.fopen(self.SERVICE_PATH, "wb") as fp:
plistlib.dump(service_data, fp)
self.run_function("service.enable", [self.SERVICE_NAME])
self.run_function("service.start", [self.SERVICE_NAME])
def tearDown(self):
"""
stop and remove our test service.
"""
self.run_function("service.stop", [self.SERVICE_NAME])
salt.utils.files.safe_rm(self.SERVICE_PATH)
@pytest.mark.slow_test
def test_show(self):
"""
Test service.show
"""
# Existing Service
service_info = self.run_function("service.show", [self.SERVICE_NAME])
self.assertIsInstance(service_info, dict)
self.assertEqual(service_info["plist"]["Label"], self.SERVICE_NAME)
# Missing Service
self.assertIn(
"Service not found", self.run_function("service.show", ["spongebob"])
)
@pytest.mark.slow_test
def test_launchctl(self):
"""
Test service.launchctl
"""
# Expected Functionality
self.assertTrue(
self.run_function("service.launchctl", ["error", "bootstrap", 64])
)
self.assertEqual(
self.run_function(
"service.launchctl", ["error", "bootstrap", 64], return_stdout=True
),
"64: unknown error code",
)
# Raise an error
self.assertIn(
"Failed to error service",
self.run_function("service.launchctl", ["error", "bootstrap"]),
)
@pytest.mark.slow_test
def test_list(self):
"""
Test service.list
"""
# Expected Functionality
self.assertIn("PID", self.run_function("service.list"))
self.assertIn("{", self.run_function("service.list", [self.SERVICE_NAME]))
# Service not found
self.assertIn(
"Service not found", self.run_function("service.list", ["spongebob"])
)
@pytest.mark.destructive_test
@pytest.mark.slow_test
def test_enable(self):
"""
Test service.enable
"""
self.assertTrue(self.run_function("service.enable", [self.SERVICE_NAME]))
self.assertIn(
"Service not found", self.run_function("service.enable", ["spongebob"])
)
@pytest.mark.destructive_test
@pytest.mark.slow_test
def test_disable(self):
"""
Test service.disable
"""
self.assertTrue(self.run_function("service.disable", [self.SERVICE_NAME]))
self.assertIn(
"Service not found", self.run_function("service.disable", ["spongebob"])
)
@pytest.mark.destructive_test
@pytest.mark.slow_test
def test_start(self):
"""
Test service.start
Test service.stop
Test service.status
"""
self.assertTrue(self.run_function("service.start", [self.SERVICE_NAME]))
self.assertIn(
"Service not found", self.run_function("service.start", ["spongebob"])
)
@pytest.mark.destructive_test
@pytest.mark.slow_test
def test_stop(self):
"""
Test service.stop
"""
self.assertTrue(self.run_function("service.stop", [self.SERVICE_NAME]))
self.assertIn(
"Service not found", self.run_function("service.stop", ["spongebob"])
)
@pytest.mark.destructive_test
@pytest.mark.slow_test
def METHOD_NAME(self):
"""
Test service.status
"""
# A running service
self.assertTrue(self.run_function("service.start", [self.SERVICE_NAME]))
self.assertTrue(self.run_function("service.status", [self.SERVICE_NAME]))
# A stopped service
self.assertTrue(self.run_function("service.stop", [self.SERVICE_NAME]))
self.assertFalse(self.run_function("service.status", [self.SERVICE_NAME]))
# Service not found
self.assertFalse(self.run_function("service.status", ["spongebob"]))
@pytest.mark.slow_test
def test_available(self):
"""
Test service.available
"""
self.assertTrue(self.run_function("service.available", [self.SERVICE_NAME]))
self.assertFalse(self.run_function("service.available", ["spongebob"]))
@pytest.mark.slow_test
def test_missing(self):
"""
Test service.missing
"""
self.assertFalse(self.run_function("service.missing", [self.SERVICE_NAME]))
self.assertTrue(self.run_function("service.missing", ["spongebob"]))
@pytest.mark.destructive_test
@pytest.mark.slow_test
def test_enabled(self):
"""
Test service.enabled
"""
self.assertTrue(self.run_function("service.enabled", [self.SERVICE_NAME]))
self.assertTrue(self.run_function("service.start", [self.SERVICE_NAME]))
self.assertTrue(self.run_function("service.enabled", [self.SERVICE_NAME]))
self.assertTrue(self.run_function("service.stop", [self.SERVICE_NAME]))
self.assertTrue(self.run_function("service.enabled", ["spongebob"]))
@pytest.mark.destructive_test
@pytest.mark.slow_test
def test_disabled(self):
"""
Test service.disabled
"""
self.assertTrue(self.run_function("service.start", [self.SERVICE_NAME]))
self.assertFalse(self.run_function("service.disabled", [self.SERVICE_NAME]))
self.assertTrue(self.run_function("service.disable", [self.SERVICE_NAME]))
self.assertTrue(self.run_function("service.disabled", [self.SERVICE_NAME]))
self.assertTrue(self.run_function("service.enable", [self.SERVICE_NAME]))
self.assertIn(
"Service not found", self.run_function("service.stop", ["spongebob"])
)
@pytest.mark.slow_test
def test_get_all(self):
"""
Test service.get_all
"""
services = self.run_function("service.get_all")
self.assertIsInstance(services, list)
self.assertIn(self.SERVICE_NAME, services)
@pytest.mark.slow_test
def test_get_enabled(self):
"""
Test service.get_enabled
"""
services = self.run_function("service.get_enabled")
self.assertIsInstance(services, list)
self.assertIn(self.SERVICE_NAME, services)
@pytest.mark.slow_test
def test_service_laoded(self):
"""
Test service.get_enabled
"""
self.assertTrue(self.run_function("service.loaded", [self.SERVICE_NAME])) |
3,085 | test collider graph dsep | from itertools import combinations
import pytest
import networkx as nx
def path_graph():
"""Return a path graph of length three."""
G = nx.path_graph(3, create_using=nx.DiGraph)
G.graph["name"] = "path"
nx.freeze(G)
return G
def fork_graph():
"""Return a three node fork graph."""
G = nx.DiGraph(name="fork")
G.add_edges_from([(0, 1), (0, 2)])
nx.freeze(G)
return G
def collider_graph():
"""Return a collider/v-structure graph with three nodes."""
G = nx.DiGraph(name="collider")
G.add_edges_from([(0, 2), (1, 2)])
nx.freeze(G)
return G
def naive_bayes_graph():
"""Return a simply Naive Bayes PGM graph."""
G = nx.DiGraph(name="naive_bayes")
G.add_edges_from([(0, 1), (0, 2), (0, 3), (0, 4)])
nx.freeze(G)
return G
def asia_graph():
"""Return the 'Asia' PGM graph."""
G = nx.DiGraph(name="asia")
G.add_edges_from(
[
("asia", "tuberculosis"),
("smoking", "cancer"),
("smoking", "bronchitis"),
("tuberculosis", "either"),
("cancer", "either"),
("either", "xray"),
("either", "dyspnea"),
("bronchitis", "dyspnea"),
]
)
nx.freeze(G)
return G
@pytest.fixture(name="path_graph")
def path_graph_fixture():
return path_graph()
@pytest.fixture(name="fork_graph")
def fork_graph_fixture():
return fork_graph()
@pytest.fixture(name="collider_graph")
def collider_graph_fixture():
return collider_graph()
@pytest.fixture(name="naive_bayes_graph")
def naive_bayes_graph_fixture():
return naive_bayes_graph()
@pytest.fixture(name="asia_graph")
def asia_graph_fixture():
return asia_graph()
@pytest.mark.parametrize(
"graph",
[path_graph(), fork_graph(), collider_graph(), naive_bayes_graph(), asia_graph()],
)
def test_markov_condition(graph):
"""Test that the Markov condition holds for each PGM graph."""
for node in graph.nodes:
parents = set(graph.predecessors(node))
non_descendants = graph.nodes - nx.descendants(graph, node) - {node} - parents
assert nx.d_separated(graph, {node}, non_descendants, parents)
def test_path_graph_dsep(path_graph):
"""Example-based test of d-separation for path_graph."""
assert nx.d_separated(path_graph, {0}, {2}, {1})
assert not nx.d_separated(path_graph, {0}, {2}, {})
def test_fork_graph_dsep(fork_graph):
"""Example-based test of d-separation for fork_graph."""
assert nx.d_separated(fork_graph, {1}, {2}, {0})
assert not nx.d_separated(fork_graph, {1}, {2}, {})
def METHOD_NAME(collider_graph):
"""Example-based test of d-separation for collider_graph."""
assert nx.d_separated(collider_graph, {0}, {1}, {})
assert not nx.d_separated(collider_graph, {0}, {1}, {2})
def test_naive_bayes_dsep(naive_bayes_graph):
"""Example-based test of d-separation for naive_bayes_graph."""
for u, v in combinations(range(1, 5), 2):
assert nx.d_separated(naive_bayes_graph, {u}, {v}, {0})
assert not nx.d_separated(naive_bayes_graph, {u}, {v}, {})
def test_asia_graph_dsep(asia_graph):
"""Example-based test of d-separation for asia_graph."""
assert nx.d_separated(
asia_graph, {"asia", "smoking"}, {"dyspnea", "xray"}, {"bronchitis", "either"}
)
assert nx.d_separated(
asia_graph, {"tuberculosis", "cancer"}, {"bronchitis"}, {"smoking", "xray"}
)
def test_undirected_graphs_are_not_supported():
"""
Test that undirected graphs are not supported.
d-separation and its related algorithms do not apply in
the case of undirected graphs.
"""
g = nx.path_graph(3, nx.Graph)
with pytest.raises(nx.NetworkXNotImplemented):
nx.d_separated(g, {0}, {1}, {2})
with pytest.raises(nx.NetworkXNotImplemented):
nx.is_minimal_d_separator(g, {0}, {1}, {2})
with pytest.raises(nx.NetworkXNotImplemented):
nx.minimal_d_separator(g, {0}, {1})
def test_cyclic_graphs_raise_error():
"""
Test that cycle graphs should cause erroring.
This is because PGMs assume a directed acyclic graph.
"""
g = nx.cycle_graph(3, nx.DiGraph)
with pytest.raises(nx.NetworkXError):
nx.d_separated(g, {0}, {1}, {2})
with pytest.raises(nx.NetworkXError):
nx.minimal_d_separator(g, 0, 1)
with pytest.raises(nx.NetworkXError):
nx.is_minimal_d_separator(g, 0, 1, {2})
def test_invalid_nodes_raise_error(asia_graph):
"""
Test that graphs that have invalid nodes passed in raise errors.
"""
with pytest.raises(nx.NodeNotFound):
nx.d_separated(asia_graph, {0}, {1}, {2})
with pytest.raises(nx.NodeNotFound):
nx.is_minimal_d_separator(asia_graph, 0, 1, {2})
with pytest.raises(nx.NodeNotFound):
nx.minimal_d_separator(asia_graph, 0, 1)
def test_minimal_d_separator():
# Case 1:
# create a graph A -> B <- C
# B -> D -> E;
# B -> F;
# G -> E;
edge_list = [("A", "B"), ("C", "B"), ("B", "D"), ("D", "E"), ("B", "F"), ("G", "E")]
G = nx.DiGraph(edge_list)
assert not nx.d_separated(G, {"B"}, {"E"}, set())
# minimal set of the corresponding graph
# for B and E should be (D,)
Zmin = nx.minimal_d_separator(G, "B", "E")
# the minimal separating set should pass the test for minimality
assert nx.is_minimal_d_separator(G, "B", "E", Zmin)
assert Zmin == {"D"}
# Case 2:
# create a graph A -> B -> C
# B -> D -> C;
edge_list = [("A", "B"), ("B", "C"), ("B", "D"), ("D", "C")]
G = nx.DiGraph(edge_list)
assert not nx.d_separated(G, {"A"}, {"C"}, set())
Zmin = nx.minimal_d_separator(G, "A", "C")
# the minimal separating set should pass the test for minimality
assert nx.is_minimal_d_separator(G, "A", "C", Zmin)
assert Zmin == {"B"}
Znotmin = Zmin.union({"D"})
assert not nx.is_minimal_d_separator(G, "A", "C", Znotmin)
def test_minimal_d_separator_checks_dsep():
"""Test that is_minimal_d_separator checks for d-separation as well."""
g = nx.DiGraph()
g.add_edges_from(
[
("A", "B"),
("A", "E"),
("B", "C"),
("B", "D"),
("D", "C"),
("D", "F"),
("E", "D"),
("E", "F"),
]
)
assert not nx.d_separated(g, {"C"}, {"F"}, {"D"})
# since {'D'} and {} are not d-separators, we return false
assert not nx.is_minimal_d_separator(g, "C", "F", {"D"})
assert not nx.is_minimal_d_separator(g, "C", "F", {}) |
3,086 | load nones | # This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from marshmallow import fields, post_dump, post_load, pre_load
from marshmallow_enum import EnumField
from indico.core.marshmallow import mm
from indico.modules.events.models.persons import EventPerson
from indico.modules.users.models.affiliations import Affiliation
from indico.modules.users.models.users import UserTitle
from indico.modules.users.schemas import AffiliationSchema
from indico.util.marshmallow import ModelField, NoneValueEnumField
class PersonLinkSchema(mm.Schema):
type = fields.String(dump_default='person_link')
person_id = fields.Int()
user_id = fields.Int(attribute='person.user_id', dump_only=True)
user_identifier = fields.String(attribute='person.user.identifier', dump_only=True)
name = fields.String(attribute='display_full_name', dump_only=True)
first_name = fields.String(load_default='')
last_name = fields.String(required=True)
_title = EnumField(UserTitle, data_key='title')
affiliation = fields.String(load_default='')
affiliation_link = ModelField(Affiliation, data_key='affiliation_id', load_default=None, load_only=True)
affiliation_id = fields.Integer(load_default=None, dump_only=True)
affiliation_meta = fields.Nested(AffiliationSchema, attribute='affiliation_link', dump_only=True)
phone = fields.String(load_default='')
address = fields.String(load_default='')
email = fields.String(load_default='')
display_order = fields.Int(load_default=0, dump_default=0)
avatar_url = fields.Function(lambda o: o.person.user.avatar_url if o.person.user else None, dump_only=True)
roles = fields.List(fields.String(), load_only=True)
@pre_load
def METHOD_NAME(self, data, **kwargs):
if not data.get('title'):
data['title'] = UserTitle.none.name
if not data.get('affiliation'):
data['affiliation'] = ''
if data.get('affiliation_id') == -1:
# external search results with a predefined affiliation
del data['affiliation_id']
return data
@post_load
def ensure_affiliation_text(self, data, **kwargs):
if data['affiliation_link']:
data['affiliation'] = data['affiliation_link'].name
return data
@post_dump
def dump_type(self, data, **kwargs):
if data['person_id'] is None:
del data['type']
del data['person_id']
if data['title'] == UserTitle.none.name:
data['title'] = None
return data
class EventPersonSchema(mm.SQLAlchemyAutoSchema):
class Meta:
model = EventPerson
public_fields = ('id', 'identifier', 'title', 'email', 'affiliation', 'affiliation_link', 'affiliation_id',
'affiliation_meta', 'name', 'first_name', 'last_name', 'user_identifier')
fields = public_fields + ('phone', 'address')
type = fields.Constant('EventPerson')
title = NoneValueEnumField(UserTitle, none_value=UserTitle.none, attribute='_title')
name = fields.String(attribute='full_name')
user_identifier = fields.String(attribute='user.identifier')
last_name = fields.String(required=True)
email = fields.String(load_default='')
affiliation_link = ModelField(Affiliation, data_key='affiliation_id', load_default=None, load_only=True)
affiliation_id = fields.Integer(load_default=None, dump_only=True)
affiliation_meta = fields.Nested(AffiliationSchema, attribute='affiliation_link', dump_only=True)
@pre_load
def handle_affiliation_link(self, data, **kwargs):
# in some cases we get data that's already been loaded by PersonLinkSchema and thus no longer
# has an affiliation_id but only an affiliation_link...
data = data.copy()
if affiliation_link := data.pop('affiliation_link', None):
data['affiliation_id'] = affiliation_link.id
return data
@post_load
def ensure_affiliation_text(self, data, **kwargs):
if affiliation_link := data.get('affiliation_link'):
data['affiliation'] = affiliation_link.name
return data
class EventPersonUpdateSchema(EventPersonSchema):
class Meta(EventPersonSchema.Meta):
fields = ('title', 'first_name', 'last_name', 'address', 'phone', 'affiliation', 'affiliation_link')
title = EnumField(UserTitle) |
3,087 | setup | # Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=attribute-defined-outside-init
from wa import ApkUiautoWorkload, Parameter
from wa.framework import pluginloader
class Applaunch(ApkUiautoWorkload):
name = 'applaunch'
description = '''
This workload launches and measures the launch time of applications for supporting workloads.
Currently supported workloads are the ones that implement ``ApplaunchInterface``. For any
workload to support this workload, it should implement the ``ApplaunchInterface``.
The corresponding java file of the workload associated with the application being measured
is executed during the run. The application that needs to be
measured is passed as a parameter ``workload_name``. The parameters required for that workload
have to be passed as a dictionary which is captured by the parameter ``workload_params``.
This information can be obtained by inspecting the workload details of the specific workload.
The workload allows to run multiple iterations of an application
launch in two modes:
1. Launch from background
2. Launch from long-idle
These modes are captured as a parameter applaunch_type.
``launch_from_background``
Launches an application after the application is sent to background by
pressing Home button.
``launch_from_long-idle``
Launches an application after killing an application process and
clearing all the caches.
**Test Description:**
- During the initialization and setup, the application being launched is launched
for the first time. The jar file of the workload of the application
is moved to device at the location ``workdir`` which further implements the methods
needed to measure the application launch time.
- Run phase calls the UiAutomator of the applaunch which runs in two subphases.
A. Applaunch Setup Run:
During this phase, welcome screens and dialogues during the first launch
of the instrumented application are cleared.
B. Applaunch Metric Run:
During this phase, the application is launched multiple times determined by
the iteration number specified by the parameter ``applaunch_iterations``.
Each of these iterations are instrumented to capture the launch time taken
and the values are recorded as UXPERF marker values in logfile.
'''
supported_platforms = ['android']
parameters = [
Parameter('workload_name', kind=str,
description='Name of the uxperf workload to launch',
default='gmail'),
Parameter('workload_params', kind=dict, default={},
description="""
parameters of the uxperf workload whose application launch
time is measured
"""),
Parameter('applaunch_type', kind=str, default='launch_from_background',
allowed_values=['launch_from_background', 'launch_from_long-idle'],
description="""
Choose launch_from_long-idle for measuring launch time
from long-idle. These two types are described in the workload
description.
"""),
Parameter('applaunch_iterations', kind=int, default=1,
description="""
Number of iterations of the application launch
"""),
]
def init_resources(self, context):
super(Applaunch, self).init_resources(context)
self.workload_params['markers_enabled'] = True
# pylint: disable=no-member
self.workload = pluginloader.get_workload(self.workload_name, self.target,
**self.workload_params)
self.workload.init_resources(context)
self.workload.initialize(context)
self.package_names = self.workload.package_names
self.pass_parameters()
# Deploy test workload uiauto apk
self.asset_files.append(self.workload.gui.uiauto_file)
def pass_parameters(self):
self.gui.uiauto_params['workload'] = self.workload.name
self.gui.uiauto_params['package_name'] = self.workload.package
self.gui.uiauto_params.update(self.workload.gui.uiauto_params)
if self.workload.apk.activity:
self.gui.uiauto_params['launch_activity'] = self.workload.apk.activity
else:
self.gui.uiauto_params['launch_activity'] = "None"
self.gui.uiauto_params['applaunch_type'] = self.applaunch_type
self.gui.uiauto_params['applaunch_iterations'] = self.applaunch_iterations
def METHOD_NAME(self, context):
self.workload.gui.uiauto_params['package_name'] = self.workload.apk.apk_info.package
self.workload.gui.init_commands()
self.workload.gui.deploy()
super(Applaunch, self).METHOD_NAME(context)
def finalize(self, context):
super(Applaunch, self).finalize(context)
self.workload.finalize(context) |
3,088 | get id | # License models
from le_utils.constants import licenses
from .. import config
from ..exceptions import UnknownLicenseError
def get_license(license_id, copyright_holder=None, description=None):
if license_id == licenses.CC_BY:
return CC_BYLicense(copyright_holder=copyright_holder)
elif license_id == licenses.CC_BY_SA:
return CC_BY_SALicense(copyright_holder=copyright_holder)
elif license_id == licenses.CC_BY_ND:
return CC_BY_NDLicense(copyright_holder=copyright_holder)
elif license_id == licenses.CC_BY_NC:
return CC_BY_NCLicense(copyright_holder=copyright_holder)
elif license_id == licenses.CC_BY_NC_SA:
return CC_BY_NC_SALicense(copyright_holder=copyright_holder)
elif license_id == licenses.CC_BY_NC_ND:
return CC_BY_NC_NDLicense(copyright_holder=copyright_holder)
elif license_id == licenses.ALL_RIGHTS_RESERVED:
return AllRightsLicense(copyright_holder=copyright_holder)
elif license_id == licenses.PUBLIC_DOMAIN:
return PublicDomainLicense(copyright_holder=copyright_holder)
elif license_id == licenses.SPECIAL_PERMISSIONS:
return SpecialPermissionsLicense(
copyright_holder=copyright_holder, description=description
)
else:
raise UnknownLicenseError(
"{} is not a valid license id. (Valid license are {})".format(
license_id, [_list[0] for _list in licenses.choices]
)
)
class License(object):
license_id = None # (str): content's license based on le_utils.constants.licenses
copyright_holder = (
None # (str): name of person or organization who owns license (optional)
)
description = None # (str): description of the license (optional)
require_copyright_holder = True
def __init__(self, copyright_holder=None, description=None):
self.copyright_holder = copyright_holder or ""
self.description = description
def METHOD_NAME(self):
return self.license_id
def validate(self):
assert (
not self.require_copyright_holder or self.copyright_holder != ""
), "Assertion Failed: {} License requires a copyright holder".format(
self.license_id
)
assert isinstance(
self.copyright_holder, str
), "Assertion Failed: Copyright holder must be a string"
def truncate_fields(self):
if (
self.description
and len(self.description) > config.MAX_LICENSE_DESCRIPTION_LENGTH
):
config.print_truncate(
"license_description", self.license_id, self.description
)
self.description = self.description[: config.MAX_LICENSE_DESCRIPTION_LENGTH]
if (
self.copyright_holder
and len(self.copyright_holder) > config.MAX_COPYRIGHT_HOLDER_LENGTH
):
config.print_truncate(
"copyright_holder", self.license_id, self.copyright_holder
)
self.copyright_holder = self.copyright_holder[
: config.MAX_COPYRIGHT_HOLDER_LENGTH
]
def as_dict(self):
return {
"license_id": self.license_id,
"copyright_holder": self.copyright_holder,
"description": self.description,
}
class CC_BYLicense(License):
"""
The Attribution License lets others distribute, remix, tweak,
and build upon your work, even commercially, as long as they credit
you for the original creation. This is the most accommodating of
licenses offered. Recommended for maximum dissemination and use of
licensed materials.
Reference: https://creativecommons.org/licenses/by/4.0
"""
license_id = licenses.CC_BY
class CC_BY_SALicense(License):
"""
The Attribution-ShareAlike License lets others remix, tweak, and
build upon your work even for commercial purposes, as long as they
credit you and license their new creations under the identical terms.
This license is often compared to "copyleft" free and open source
software licenses. All new works based on yours will carry the same
license, so any derivatives will also allow commercial use. This is
the license used by Wikipedia, and is recommended for materials that
would benefit from incorporating content from Wikipedia and similarly
licensed projects.
Reference: https://creativecommons.org/licenses/by-sa/4.0
"""
license_id = licenses.CC_BY_SA
class CC_BY_NDLicense(License):
"""
The Attribution-NoDerivs License allows for redistribution, commercial
and non-commercial, as long as it is passed along unchanged and in
whole, with credit to you.
Reference: https://creativecommons.org/licenses/by-nd/4.0
"""
license_id = licenses.CC_BY_ND
class CC_BY_NCLicense(License):
"""
The Attribution-NonCommercial License lets others remix, tweak, and
build upon your work non-commercially, and although their new works
must also acknowledge you and be non-commercial, they don't have to
license their derivative works on the same terms.
Reference: https://creativecommons.org/licenses/by-nc/4.0
"""
license_id = licenses.CC_BY_NC
class CC_BY_NC_SALicense(License):
"""
The Attribution-NonCommercial-ShareAlike License lets others remix, tweak,
and build upon your work non-commercially, as long as they credit you and
license their new creations under the identical terms.
Reference: https://creativecommons.org/licenses/by-nc-sa/4.0
"""
license_id = licenses.CC_BY_NC_SA
class CC_BY_NC_NDLicense(License):
"""
The Attribution-NonCommercial-NoDerivs License is the most restrictive of
our six main licenses, only allowing others to download your works and share
them with others as long as they credit you, but they can't change them in
any way or use them commercially.
Reference: https://creativecommons.org/licenses/by-nc-nd/4.0
"""
license_id = licenses.CC_BY_NC_ND
class AllRightsLicense(License):
"""
The All Rights Reserved License indicates that the copyright holder reserves,
or holds for their own use, all the rights provided by copyright law under
one specific copyright treaty.
Reference: http://www.allrights-reserved.com
"""
license_id = licenses.ALL_RIGHTS_RESERVED
class PublicDomainLicense(License):
"""
Public Domain work has been identified as being free of known restrictions
under copyright law, including all related and neighboring rights.
Reference: https://creativecommons.org/publicdomain/mark/1.0
"""
require_copyright_holder = False
license_id = licenses.PUBLIC_DOMAIN
class SpecialPermissionsLicense(License):
"""
Special Permissions is a custom license to use when the current licenses
do not apply to the content. The owner of this license is responsible for
creating a description of what this license entails.
"""
license_id = licenses.SPECIAL_PERMISSIONS
def __init__(self, copyright_holder=None, description=None):
assert description, "Special Permissions licenses must have a description"
super(SpecialPermissionsLicense, self).__init__(
copyright_holder=copyright_holder, description=description
) |
3,089 | decodeint | # ed25519.py - Optimized version of the reference implementation of Ed25519
# downloaded from https://github.com/pyca/ed25519
#
# Written in 2011? by Daniel J. Bernstein <djb@cr.yp.to>
# 2013 by Donald Stufft <donald@stufft.io>
# 2013 by Alex Gaynor <alex.gaynor@gmail.com>
# 2013 by Greg Price <price@mit.edu>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
"""
NB: This code is not safe for use with secret keys or secret data.
The only safe use of this code is for verifying signatures on public messages.
Functions for computing the public key of a secret key and for signing
a message are included, namely publickey_unsafe and signature_unsafe,
for testing purposes only.
The root of the problem is that Python's long-integer arithmetic is
not designed for use in cryptography. Specifically, it may take more
or less time to execute an operation depending on the values of the
inputs, and its memory access patterns may also depend on the inputs.
This opens it to timing and cache side-channel attacks which can
disclose data to an attacker. We rely on Python's long-integer
arithmetic, so we cannot handle secrets without risking their disclosure.
"""
import hashlib
from typing import List, NewType, Tuple
Point = NewType("Point", Tuple[int, int, int, int])
__version__ = "1.0.dev1"
b = 256
q: int = 2**255 - 19
l: int = 2**252 + 27742317777372353535851937790883648493
COORD_MASK = ~(1 + 2 + 4 + (1 << b - 1))
COORD_HIGH_BIT = 1 << b - 2
def H(m: bytes) -> bytes:
return hashlib.sha512(m).digest()
def pow2(x: int, p: int) -> int:
"""== pow(x, 2**p, q)"""
while p > 0:
x = x * x % q
p -= 1
return x
def inv(z: int) -> int:
"""$= z^{-1} mod q$, for z != 0"""
# Adapted from curve25519_athlon.c in djb's Curve25519.
z2 = z * z % q # 2
z9 = pow2(z2, 2) * z % q # 9
z11 = z9 * z2 % q # 11
z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0
z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0
z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ...
z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q
z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q
z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q
z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q
z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0
return pow2(z2_250_0, 5) * z11 % q # 2^255 - 2^5 + 11 = q - 2
d = -121665 * inv(121666) % q
I = pow(2, (q - 1) // 4, q)
def xrecover(y: int) -> int:
xx = (y * y - 1) * inv(d * y * y + 1)
x = pow(xx, (q + 3) // 8, q)
if (x * x - xx) % q != 0:
x = (x * I) % q
if x % 2 != 0:
x = q - x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = Point((Bx % q, By % q, 1, (Bx * By) % q))
ident = Point((0, 1, 1, 0))
def edwards_add(P: Point, Q: Point) -> Point:
# This is formula sequence 'addition-add-2008-hwcd-3' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, t1) = P
(x2, y2, z2, t2) = Q
a = (y1 - x1) * (y2 - x2) % q
b = (y1 + x1) * (y2 + x2) % q
c = t1 * 2 * d * t2 % q
dd = z1 * 2 * z2 % q
e = b - a
f = dd - c
g = dd + c
h = b + a
x3 = e * f
y3 = g * h
t3 = e * h
z3 = f * g
return Point((x3 % q, y3 % q, z3 % q, t3 % q))
def edwards_double(P: Point) -> Point:
# This is formula sequence 'dbl-2008-hwcd' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, _) = P
a = x1 * x1 % q
b = y1 * y1 % q
c = 2 * z1 * z1 % q
# dd = -a
e = ((x1 + y1) * (x1 + y1) - a - b) % q
g = -a + b # dd + b
f = g - c
h = -a - b # dd - b
x3 = e * f
y3 = g * h
t3 = e * h
z3 = f * g
return Point((x3 % q, y3 % q, z3 % q, t3 % q))
def scalarmult(P: Point, e: int) -> Point:
if e == 0:
return ident
Q = scalarmult(P, e // 2)
Q = edwards_double(Q)
if e & 1:
Q = edwards_add(Q, P)
return Q
# Bpow[i] == scalarmult(B, 2**i)
Bpow: List[Point] = []
def make_Bpow() -> None:
P = B
for _ in range(253):
Bpow.append(P)
P = edwards_double(P)
make_Bpow()
def scalarmult_B(e: int) -> Point:
"""
Implements scalarmult(B, e) more efficiently.
"""
# scalarmult(B, l) is the identity
e = e % l
P = ident
for i in range(253):
if e & 1:
P = edwards_add(P, Bpow[i])
e = e // 2
assert e == 0, e
return P
def encodeint(y: int) -> bytes:
return y.to_bytes(b // 8, "little")
def encodepoint(P: Point) -> bytes:
(x, y, z, _) = P
zi = inv(z)
x = (x * zi) % q
y = (y * zi) % q
xbit = (x & 1) << (b - 1)
y_result = y & ~xbit # clear x bit
y_result |= xbit # set corret x bit value
return encodeint(y_result)
def METHOD_NAME(s: bytes) -> int:
return int.from_bytes(s, "little")
def decodepoint(s: bytes) -> Point:
y = METHOD_NAME(s) & ~(1 << b - 1) # y without the highest bit
x = xrecover(y)
if x & 1 != bit(s, b - 1):
x = q - x
P = Point((x, y, 1, (x * y) % q))
if not isoncurve(P):
raise ValueError("decoding point that is not on curve")
return P
def decodecoord(s: bytes) -> int:
a = METHOD_NAME(s[: b // 8])
# clear mask bits
a &= COORD_MASK
# set high bit
a |= COORD_HIGH_BIT
return a
def bit(h: bytes, i: int) -> int:
return (h[i // 8] >> (i % 8)) & 1
def publickey_unsafe(sk: bytes) -> bytes:
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = decodecoord(h)
A = scalarmult_B(a)
return encodepoint(A)
def Hint(m: bytes) -> int:
return METHOD_NAME(H(m))
def signature_unsafe(m: bytes, sk: bytes, pk: bytes) -> bytes:
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = decodecoord(h)
r = Hint(h[b // 8 : b // 4] + m)
R = scalarmult_B(r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P: Point) -> bool:
(x, y, z, t) = P
return (
z % q != 0
and x * y % q == z * t % q
and (y * y - x * x - z * z - d * t * t) % q == 0
)
class SignatureMismatch(Exception):
pass
def checkvalid(s: bytes, m: bytes, pk: bytes) -> None:
"""
Not safe to use when any argument is secret.
See module docstring. This function should be used only for
verifying public signatures of public messages.
"""
if len(s) != b // 4:
raise ValueError("signature length is wrong")
if len(pk) != b // 8:
raise ValueError("public-key length is wrong")
R = decodepoint(s[: b // 8])
A = decodepoint(pk)
S = METHOD_NAME(s[b // 8 : b // 4])
h = Hint(encodepoint(R) + pk + m)
(x1, y1, z1, _) = P = scalarmult_B(S)
(x2, y2, z2, _) = Q = edwards_add(R, scalarmult(A, h))
if (
not isoncurve(P)
or not isoncurve(Q)
or (x1 * z2 - x2 * z1) % q != 0
or (y1 * z2 - y2 * z1) % q != 0
):
raise SignatureMismatch("signature does not pass verification") |
3,090 | run | # This file is Copyright 2020 Volatility Foundation and licensed under the Volatility Software License 1.0
# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0
#
import logging
import os
from typing import List, Optional, Tuple, Iterator
from volatility3.framework import interfaces, renderers, exceptions, symbols
from volatility3.framework.configuration import requirements
from volatility3.framework.interfaces import configuration
from volatility3.framework.renderers import format_hints
from volatility3.framework.symbols import intermed
from volatility3.framework.symbols.windows import extensions
from volatility3.framework.symbols.windows import versions
vollog = logging.getLogger(__name__)
class BigPools(interfaces.plugins.PluginInterface):
"""List big page pools."""
_required_framework_version = (2, 0, 0)
_version = (1, 1, 0)
@classmethod
def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]:
# Since we're calling the plugin, make sure we have the plugin's requirements
return [
requirements.ModuleRequirement(
name="kernel",
description="Windows kernel",
architectures=["Intel32", "Intel64"],
),
requirements.StringRequirement(
name="tags",
description="Comma separated list of pool tags to filter pools returned",
optional=True,
default=None,
),
requirements.BooleanRequirement(
name="show-free",
description="Show freed regions (otherwise only show allocations in use)",
default=False,
optional=True,
),
]
@classmethod
def list_big_pools(
cls,
context: interfaces.context.ContextInterface,
layer_name: str,
symbol_table: str,
tags: Optional[list] = None,
show_free: bool = False,
):
"""Returns the big page pool objects from the kernel PoolBigPageTable array.
Args:
context: The context to retrieve required elements (layers, symbol tables) from
layer_name: The name of the layer on which to operate
symbol_table: The name of the table containing the kernel symbols
tags: An optional list of pool tags to filter big page pool tags by
Yields:
A big page pool object
"""
kvo = context.layers[layer_name].config["kernel_virtual_offset"]
ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo)
big_page_table_offset = ntkrnlmp.get_symbol("PoolBigPageTable").address
big_page_table = ntkrnlmp.object(
object_type="unsigned long long", offset=big_page_table_offset
)
big_page_table_size_offset = ntkrnlmp.get_symbol("PoolBigPageTableSize").address
big_page_table_size = ntkrnlmp.object(
object_type="unsigned long", offset=big_page_table_size_offset
)
try:
big_page_table_type = ntkrnlmp.get_type("_POOL_TRACKER_BIG_PAGES")
except exceptions.SymbolError:
# We have to manually load a symbol table
is_vista_or_later = versions.is_vista_or_later(context, symbol_table)
is_win10 = versions.is_win10(context, symbol_table)
if is_win10:
big_pools_json_filename = "bigpools-win10"
elif is_vista_or_later:
big_pools_json_filename = "bigpools-vista"
else:
big_pools_json_filename = "bigpools"
if symbols.symbol_table_is_64bit(context, symbol_table):
big_pools_json_filename += "-x64"
else:
big_pools_json_filename += "-x86"
new_table_name = intermed.IntermediateSymbolTable.create(
context=context,
config_path=configuration.path_join(
context.symbol_space[symbol_table].config_path, "bigpools"
),
sub_path=os.path.join("windows", "bigpools"),
filename=big_pools_json_filename,
table_mapping={"nt_symbols": symbol_table},
class_types={
"_POOL_TRACKER_BIG_PAGES": extensions.pool.POOL_TRACKER_BIG_PAGES
},
)
module = context.module(new_table_name, layer_name, offset=0)
big_page_table_type = module.get_type("_POOL_TRACKER_BIG_PAGES")
big_pools = ntkrnlmp.object(
object_type="array",
offset=big_page_table,
subtype=big_page_table_type,
count=big_page_table_size,
absolute=True,
)
for big_pool in big_pools:
if big_pool.is_valid():
if (tags is None or big_pool.get_key() in tags) and (
show_free or not big_pool.is_free()
):
yield big_pool
def _generator(self) -> Iterator[Tuple[int, Tuple[int, str]]]: # , str, int]]]:
if self.config.get("tags"):
tags = [tag for tag in self.config["tags"].split(",")]
else:
tags = None
kernel = self.context.modules[self.config["kernel"]]
for big_pool in self.list_big_pools(
context=self.context,
layer_name=kernel.layer_name,
symbol_table=kernel.symbol_table_name,
tags=tags,
show_free=self.config.get("show-free"),
):
num_bytes = big_pool.get_number_of_bytes()
if not isinstance(num_bytes, interfaces.renderers.BaseAbsentValue):
num_bytes = format_hints.Hex(num_bytes)
if big_pool.is_free():
status = "Free"
else:
status = "Allocated"
yield (
0,
(
format_hints.Hex(big_pool.Va),
big_pool.get_key(),
big_pool.get_pool_type(),
num_bytes,
status,
),
)
def METHOD_NAME(self):
return renderers.TreeGrid(
[
("Allocation", format_hints.Hex),
("Tag", str),
("PoolType", str),
("NumberOfBytes", format_hints.Hex),
("Status", str),
],
self._generator(),
) |
3,091 | test bfd fast convergence | #!/usr/bin/env python
# SPDX-License-Identifier: ISC
#
# test_bfd_topo1.py
# Part of NetDEF Topology Tests
#
# Copyright (c) 2018 by
# Network Device Education Foundation, Inc. ("NetDEF")
#
"""
test_bfd_topo1.py: Test the FRR BFD daemon.
"""
import os
import sys
import json
from functools import partial
import pytest
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd]
def setup_module(mod):
"Sets up the pytest environment"
topodef = {
"s1": ("r1", "r2"),
"s2": ("r2", "r3"),
"s3": ("r2", "r4"),
}
tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname))
)
router.load_config(
TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
# Initialize all routers.
tgen.start_router()
# Verify that we are using the proper version and that the BFD
# daemon exists.
for router in router_list.values():
# Check for Version
if router.has_version("<", "5.1"):
tgen.set_error("Unsupported FRR version")
break
def teardown_module(_mod):
"Teardown the pytest environment"
tgen = get_topogen()
tgen.stop_topology()
def test_bfd_connection():
"Assert that the BFD peers can find themselves."
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("waiting for bfd peers to go up")
for router in tgen.routers().values():
json_file = "{}/{}/peers.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
test_func = partial(
topotest.router_json_cmp, router, "show bfd peers json", expected
)
_, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
def test_bgp_convergence():
"Assert that BGP is converging."
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("waiting for bgp peers to go up")
for router in tgen.routers().values():
ref_file = "{}/{}/bgp_summary.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
test_func = partial(
topotest.router_json_cmp, router, "show ip bgp summary json", expected
)
_, res = topotest.run_and_expect(test_func, None, count=125, wait=1.0)
assertmsg = "{}: bgp did not converge".format(router.name)
assert res is None, assertmsg
def test_bgp_fast_convergence():
"Assert that BGP is converging before setting a link down."
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("waiting for bgp peers converge")
for router in tgen.routers().values():
ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
test_func = partial(
topotest.router_json_cmp, router, "show ip bgp json", expected
)
_, res = topotest.run_and_expect(test_func, None, count=40, wait=0.5)
assertmsg = "{}: bgp did not converge".format(router.name)
assert res is None, assertmsg
def METHOD_NAME():
"""
Assert that BFD notices the link down after simulating network
failure.
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Disable r1-eth0 link.
tgen.gears["r1"].link_enable("r1-eth0", enabled=False)
# Wait the minimum time we can before checking that BGP/BFD
# converged.
logger.info("waiting for BFD converge")
# Check that BGP converged quickly.
for router in tgen.routers().values():
json_file = "{}/{}/peers.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
# Load the same file as previous test, but expect R1 to be down.
if router.name == "r1":
for peer in expected:
if peer["peer"] == "192.168.0.2":
peer["status"] = "down"
else:
for peer in expected:
if peer["peer"] == "192.168.0.1":
peer["status"] = "down"
test_func = partial(
topotest.router_json_cmp, router, "show bfd peers json", expected
)
_, res = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert res is None, assertmsg
def test_bgp_fast_reconvergence():
"Assert that BGP is converging after setting a link down."
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
logger.info("waiting for BGP re convergence")
# Check that BGP converged quickly.
for router in tgen.routers().values():
ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
# Load the same file as previous test, but set networks to None
# to test absence.
if router.name == "r1":
expected["routes"]["10.254.254.2/32"] = None
expected["routes"]["10.254.254.3/32"] = None
expected["routes"]["10.254.254.4/32"] = None
else:
expected["routes"]["10.254.254.1/32"] = None
test_func = partial(
topotest.router_json_cmp, router, "show ip bgp json", expected
)
_, res = topotest.run_and_expect(test_func, None, count=5, wait=1)
assertmsg = "{}: bgp did not converge".format(router.name)
assert res is None, assertmsg
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args)) |
3,092 | forward | from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Parameter
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.nn.inits import glorot
from torch_geometric.typing import (
Adj,
Optional,
OptPairTensor,
OptTensor,
Size,
)
from torch_geometric.utils import softmax
class GeneralConv(MessagePassing):
r"""A general GNN layer adapted from the `"Design Space for Graph Neural
Networks" <https://arxiv.org/abs/2011.08843>`_ paper.
Args:
in_channels (int or tuple): Size of each input sample, or :obj:`-1` to
derive the size from the first input(s) to the forward method.
A tuple corresponds to the sizes of source and target
dimensionalities.
out_channels (int): Size of each output sample.
in_edge_channels (int, optional): Size of each input edge.
(default: :obj:`None`)
aggr (str, optional): The aggregation scheme to use
(:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`).
(default: :obj:`"mean"`)
skip_linear (bool, optional): Whether apply linear function in skip
connection. (default: :obj:`False`)
directed_msg (bool, optional): If message passing is directed;
otherwise, message passing is bi-directed. (default: :obj:`True`)
heads (int, optional): Number of message passing ensembles.
If :obj:`heads > 1`, the GNN layer will output an ensemble of
multiple messages.
If attention is used (:obj:`attention=True`), this corresponds to
multi-head attention. (default: :obj:`1`)
attention (bool, optional): Whether to add attention to message
computation. (default: :obj:`False`)
attention_type (str, optional): Type of attention: :obj:`"additive"`,
:obj:`"dot_product"`. (default: :obj:`"additive"`)
l2_normalize (bool, optional): If set to :obj:`True`, output features
will be :math:`\ell_2`-normalized, *i.e.*,
:math:`\frac{\mathbf{x}^{\prime}_i}
{\| \mathbf{x}^{\prime}_i \|_2}`.
(default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
Shapes:
- **input:**
node features :math:`(|\mathcal{V}|, F_{in})` or
:math:`((|\mathcal{V_s}|, F_{s}), (|\mathcal{V_t}|, F_{t}))`
if bipartite,
edge indices :math:`(2, |\mathcal{E}|)`,
edge attributes :math:`(|\mathcal{E}|, D)` *(optional)*
- **output:** node features :math:`(|\mathcal{V}|, F_{out})` or
:math:`(|\mathcal{V}_t|, F_{out})` if bipartite
"""
def __init__(
self,
in_channels: Union[int, Tuple[int, int]],
out_channels: Optional[int],
in_edge_channels: int = None,
aggr: str = "add",
skip_linear: str = False,
directed_msg: bool = True,
heads: int = 1,
attention: bool = False,
attention_type: str = "additive",
l2_normalize: bool = False,
bias: bool = True,
**kwargs,
):
kwargs.setdefault('aggr', aggr)
super().__init__(node_dim=0, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.in_edge_channels = in_edge_channels
self.aggr = aggr
self.skip_linear = skip_linear
self.directed_msg = directed_msg
self.heads = heads
self.attention = attention
self.attention_type = attention_type
self.normalize_l2 = l2_normalize
if isinstance(in_channels, int):
in_channels = (in_channels, in_channels)
if self.directed_msg:
self.lin_msg = Linear(in_channels[0], out_channels * self.heads,
bias=bias)
else:
self.lin_msg = Linear(in_channels[0], out_channels * self.heads,
bias=bias)
self.lin_msg_i = Linear(in_channels[0], out_channels * self.heads,
bias=bias)
if self.skip_linear or self.in_channels != self.out_channels:
self.lin_self = Linear(in_channels[1], out_channels, bias=bias)
else:
self.lin_self = torch.nn.Identity()
if self.in_edge_channels is not None:
self.lin_edge = Linear(in_edge_channels, out_channels * self.heads,
bias=bias)
# TODO: A general torch_geometric.nn.AttentionLayer
if self.attention:
if self.attention_type == 'additive':
self.att_msg = Parameter(
torch.empty(1, self.heads, self.out_channels))
elif self.attention_type == 'dot_product':
scaler = torch.tensor(out_channels, dtype=torch.float).sqrt()
self.register_buffer('scaler', scaler)
else:
raise ValueError(
f"Attention type '{self.attention_type}' not supported")
self.reset_parameters()
def reset_parameters(self):
super().reset_parameters()
self.lin_msg.reset_parameters()
if hasattr(self.lin_self, 'reset_parameters'):
self.lin_self.reset_parameters()
if self.in_edge_channels is not None:
self.lin_edge.reset_parameters()
if self.attention and self.attention_type == 'additive':
glorot(self.att_msg)
def METHOD_NAME(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,
edge_attr: Tensor = None, size: Size = None) -> Tensor:
if isinstance(x, Tensor):
x: OptPairTensor = (x, x)
x_self = x[1]
# propagate_type: (x: OptPairTensor)
out = self.propagate(edge_index, x=x, size=size, edge_attr=edge_attr)
out = out.mean(dim=1) # todo: other approach to aggregate heads
out = out + self.lin_self(x_self)
if self.normalize_l2:
out = F.normalize(out, p=2, dim=-1)
return out
def message_basic(self, x_i: Tensor, x_j: Tensor, edge_attr: OptTensor):
if self.directed_msg:
x_j = self.lin_msg(x_j)
else:
x_j = self.lin_msg(x_j) + self.lin_msg_i(x_i)
if edge_attr is not None:
x_j = x_j + self.lin_edge(edge_attr)
return x_j
def message(self, x_i: Tensor, x_j: Tensor, edge_index_i: Tensor,
size_i: Tensor, edge_attr: Tensor) -> Tensor:
x_j_out = self.message_basic(x_i, x_j, edge_attr)
x_j_out = x_j_out.view(-1, self.heads, self.out_channels)
if self.attention:
if self.attention_type == 'dot_product':
x_i_out = self.message_basic(x_j, x_i, edge_attr)
x_i_out = x_i_out.view(-1, self.heads, self.out_channels)
alpha = (x_i_out * x_j_out).sum(dim=-1) / self.scaler
else:
alpha = (x_j_out * self.att_msg).sum(dim=-1)
alpha = F.leaky_relu(alpha, negative_slope=0.2)
alpha = softmax(alpha, edge_index_i, num_nodes=size_i)
alpha = alpha.view(-1, self.heads, 1)
return x_j_out * alpha
else:
return x_j_out |
3,093 | get class | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of IVRE.
# Copyright 2011 - 2022 Pierre LALET <pierre@droids-corp.org>
#
# IVRE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IVRE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IVRE. If not, see <http://www.gnu.org/licenses/>.
"""
Specific type definitions for IVRE
"""
from __future__ import annotations
from typing import Any, Dict, Generator, Iterable, List, Optional, Set, Tuple, Union
try:
from typing import Literal, Protocol, TypedDict
except ImportError:
HAS_TYPED_DICT = False
else:
HAS_TYPED_DICT = True
NmapProbe = List[Tuple[str, Dict[str, Any]]]
ParsedCertificate = Dict[str, Any] # TODO: TypedDict
# Filters
MongoFilter = Dict[str, Any] # TODO: TypedDict
# TODO
ElasticFilter = Any
HttpFilter = Any
SqlFilter = Any
TinyFilter = Any
Filter = Union[MongoFilter, SqlFilter, HttpFilter, ElasticFilter, TinyFilter]
# Records (TODO)
Record = Dict[str, Any]
# Sort
if HAS_TYPED_DICT:
SortKey = Tuple[str, Literal[-1, 1]]
IndexKey = Tuple[str, Literal[-1, 1, "text"]]
else:
SortKey = Tuple[str, int] # type: ignore
IndexKey = Tuple[str, Union[int, str]] # type: ignore
Sort = Iterable[SortKey]
# DB objects
DBCursor = Generator[Record, None, None]
if HAS_TYPED_DICT:
class CpeDict(TypedDict, total=False):
type: str
vendor: str
product: str
version: str
origins: Set[str]
# class ParsedCertificate(TypedDict, total=False):
# TODO
class Tag(TypedDict, total=False):
value: str
type: str
info: List[str]
class NmapProbeRec(TypedDict, total=False):
probe: bytes
fp: NmapProbe
fallbacks: List[str]
class NmapServiceMatch(TypedDict, total=False):
service_name: str
service_product: str
service_version: str
service_devicetype: str
service_extrainfo: str
service_hostname: str
service_ostype: str
service_tunnel: str
service_method: str
service_servicefp: str
service_conf: int
cpe: List[str]
soft: bool
class NmapScanTemplate(TypedDict, total=False):
nmap: str
pings: str
scans: str
osdetect: bool
traceroute: bool
resolve: int
verbosity: int
ports: Optional[str]
top_ports: Optional[int]
host_timeout: Optional[str]
script_timeout: Optional[str]
scripts_categories: Optional[Iterable[str]]
scripts_exclude: Optional[Iterable[str]]
scripts_force: Optional[Iterable[str]]
extra_options: Optional[Iterable[str]]
class DB(Protocol):
flt_empty: Filter
def distinct(
self,
field: str,
flt: Optional[Filter] = None,
sort: Optional[Any] = None,
limit: Optional[int] = None,
skip: Optional[int] = None,
) -> Iterable:
...
@classmethod
def flt_and(cls, *args: Filter) -> Filter:
...
def from_binary(self, data: Any) -> bytes:
...
def get(self, spec: Filter, **kargs: Any) -> Generator[Record, None, None]:
...
def _get(self, spec: Filter, **kargs: Any) -> DBCursor:
...
def explain(self, cur: DBCursor, **kargs: Any) -> str:
...
def remove_many(self, spec: Filter) -> None:
...
def searchcert(
self,
keytype: Optional[str] = None,
md5: Optional[str] = None,
sha1: Optional[str] = None,
sha256: Optional[str] = None,
subject: Optional[str] = None,
issuer: Optional[str] = None,
self_signed: Optional[bool] = None,
pkmd5: Optional[str] = None,
pksha1: Optional[str] = None,
pksha256: Optional[str] = None,
cacert: bool = False,
) -> Filter:
...
@staticmethod
def serialize(obj: Any) -> str:
...
class DBAgent(DB, Protocol):
pass
class DBData(DB, Protocol):
pass
class DBFlow(DB, Protocol):
pass
class DBActive(DB, Protocol):
def searchsshkey(
self,
fingerprint: Optional[str] = None,
key: Optional[str] = None,
keytype: Optional[str] = None,
bits: Optional[int] = None,
output: Optional[str] = None,
) -> Filter:
...
class DBNmap(DBActive, Protocol):
pass
class DBPassive(DB, Protocol):
def searchsshkey(
self,
fingerprint: Optional[str] = None,
key: Optional[str] = None,
keytype: Optional[str] = None,
bits: Optional[int] = None,
) -> Filter:
...
class DBView(DBActive, Protocol):
pass
class MetaDB(Protocol):
agent: DBAgent
data: DBData
db_types: Dict[str, Dict[str, Tuple[str, str]]]
flow: DBFlow
nmap: DBNmap
passive: DBPassive
url: str
urls: Dict[str, str]
view: DBView
def METHOD_NAME(self, purpose: str) -> DB:
...
class Target(Iterable[int], Protocol):
targetscount: int
else:
CpeDict = Dict[str, Union[str, Set[str]]] # type: ignore
NmapProbeRec = Dict[str, Union[bytes, NmapProbe, List[str]]] # type: ignore
NmapServiceMatch = Dict[str, Union[str, List[str]]] # type: ignore
NmapScanTemplate = Dict[ # type: ignore
str,
Union[str, bool, int, Optional[str], Optional[int], Optional[Iterable[str]]],
]
DB = Any # type: ignore
DBAgent = Any # type: ignore
DBData = Any # type: ignore
DBFlow = Any # type: ignore
DBActive = Any # type: ignore
DBNmap = Any # type: ignore
DBPassive = Any # type: ignore
DBView = Any # type: ignore
MetaDB = Any # type: ignore
Target = Any # type: ignore
Tag = Dict[str, Union[str, List[str]]] # type: ignore |
3,094 | to binary image | from enum import Enum
from typing import Iterable, List, Union
import numpy as np
import SimpleITK as sitk
from local_migrator import register_class
@register_class
class RadiusType(Enum):
"""
If operation should be performed and if on each layer separately on globally
"""
NO = 0 #: No operation
R2D = 1 #: operation in each layer separately
R3D = 2 #: operation on whole stack
def __str__(self):
return ["No", "2d", "3d"][self.value]
@register_class
class NoiseFilterType(Enum):
No = 0
Gauss = 1
Median = 2
Bilateral = 3
def __str__(self):
return self.name
def _generic_image_operation(image, radius, fun, layer):
if image.ndim == 3 and image.shape[0] == 1:
layer = True
if image.ndim == 2:
layer = False
if image.dtype == bool:
image = image.astype(np.uint8)
if isinstance(radius, (list, tuple)):
radius = list(reversed(radius))
if not layer and image.ndim <= 3:
return sitk.GetArrayFromImage(fun(sitk.GetImageFromArray(image), radius))
return _generic_image_operations_recurse(np.copy(image), radius, fun, layer)
def _generic_image_operations_recurse(image, radius, fun, layer):
if (not layer and image.ndim == 3) or image.ndim == 2:
return sitk.GetArrayFromImage(fun(sitk.GetImageFromArray(image), radius))
for layer_data in image:
layer_data[...] = _generic_image_operations_recurse(layer_data, radius, fun, layer)
return image
def gaussian(image: np.ndarray, radius: float, layer=True):
"""
Gaussian blur of image.
:param np.ndarray image: image to apply gaussian filter
:param float radius: radius for gaussian kernel
:param bool layer: if operation should be run on each layer separately
:return:
"""
return _generic_image_operation(image, radius, sitk.DiscreteGaussian, layer)
def bilateral(image: np.ndarray, radius: float, layer=True):
"""
Gaussian blur of image.
:param np.ndarray image: image to apply gaussian filter
:param float radius: radius for gaussian kernel
:param bool layer: if operation should be run on each layer separately
:return:
"""
return _generic_image_operation(image, radius, sitk.Bilateral, layer)
def median(image: np.ndarray, radius: Union[int, List[int]], layer=True):
"""
Median blur of image.
:param np.ndarray image: image to apply median filter
:param float radius: radius for median kernel
:param bool layer: if operation should be run on each layer separately
:return:
"""
if not isinstance(radius, Iterable):
radius = [radius] * min(image.ndim, 2 if layer else 3)
return _generic_image_operation(image, radius, sitk.Median, layer)
def dilate(image, radius, layer=True):
"""
Dilate of image.
:param image: image to apply dilation
:param radius: dilation radius
:param layer: if operation should be run on each layer separately
:return:
"""
return _generic_image_operation(image, radius, sitk.GrayscaleDilate, layer)
def apply_filter(filter_type, image, radius, layer=True) -> np.ndarray:
"""
Apply operation selected by filter type to image.
:param NoiseFilterType filter_type:
:param np.ndarray image:
:param float radius:
:param bool layer:
:return: image after operation
:rtype: np.ndarray
"""
if filter_type == NoiseFilterType.Gauss:
return gaussian(image, radius, layer)
if filter_type == NoiseFilterType.Median:
return median(image, int(radius), layer)
return image
def erode(image, radius, layer=True):
"""
Erosion of image
:param image: image to apply erosion
:param radius: erosion radius
:param layer: if operation should be run on each layer separately
:return:
"""
return _generic_image_operation(image, radius, sitk.GrayscaleErode, layer)
def METHOD_NAME(image):
"""Convert image to binary. All positive values are set to 1."""
return np.array(image > 0).astype(np.uint8) |
3,095 | update | import math
import time
from typing import List
import numpy
import numpy as np
import pygame
from pygame.color import THECOLORS
from soccer_strategy.ball import Ball
from soccer_strategy.robot import Robot
from soccer_strategy.robot_controlled_2d import RobotControlled2D
class Scene:
"""
Scene used by the 2d simulator, contains drawing functions
Rules and Dimensions https://cdn.robocup.org/hl/wp/2021/04/V-HL21_Rules_changesMarked.pdf
"""
def __init__(self, robots, ball):
pygame.init()
self.A = 9
self.B = 6
self.C = 0.6
self.D = 2.6
self.E = 1
self.F = 3
self.G = 1.5
self.H = 1.5
self.I = 1
self.J = 2
self.K = 5
self.LINE_WIDTH = 0.05
self.pygame_size = (1100, 800)
self.meter_to_pixel_x = self.pygame_size[0] / (self.A + self.I * 2)
self.meter_to_pixel_y = self.pygame_size[1] / (self.B + self.I * 2)
self.screen = pygame.display.set_mode(self.pygame_size, pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.SRCALPHA)
self.screen_overlay_0 = pygame.Surface(self.pygame_size, pygame.SRCALPHA)
self.screen_overlay_0.fill(THECOLORS["green"])
width = self.pygame_size[0]
height = self.pygame_size[1]
# Draw the ellipse
pos_x = (self.I + self.A / 2 - self.H / 2) * self.meter_to_pixel_x
pos_y = (self.I + self.B / 2 - self.H / 2) * self.meter_to_pixel_y
width = self.H * self.meter_to_pixel_x
height = self.H * self.meter_to_pixel_y
pygame.draw.ellipse(
self.screen_overlay_0, THECOLORS["white"], rect=(pos_x, pos_y, width, height), width=int(self.LINE_WIDTH * self.meter_to_pixel_x)
)
# Draw the lines
vline1 = [[0, -self.B / 2], [0, self.B / 2]]
vline2 = [[-self.A / 2, -self.B / 2], [-self.A / 2, self.B / 2]]
vline3 = [[self.A / 2, -self.B / 2], [self.A / 2, self.B / 2]]
hline1 = [[-self.A / 2, -self.B / 2], [self.A / 2, -self.B / 2]]
hline2 = [[-self.A / 2, self.B / 2], [self.A / 2, self.B / 2]]
goal_line_1 = [[-self.A / 2 - self.C, -self.F / 2], [-self.A / 2 - self.C, self.F / 2]]
goal_line_2 = [[self.A / 2 + self.C, -self.F / 2], [self.A / 2 + self.C, self.F / 2]]
lines = [vline1, vline2, vline3, hline1, hline2, goal_line_1, goal_line_2]
# TODO draw the rest of the lines
for line in lines:
l_convert = self.convert_list_of_points_to_pixel_coordinates(line)
pygame.draw.line(
self.screen_overlay_0,
THECOLORS["white"],
start_pos=l_convert[0],
end_pos=l_convert[1],
width=int(self.LINE_WIDTH * self.meter_to_pixel_x),
)
self.font = pygame.font.SysFont("arial", 9)
def convert_point_to_pixel_coordinates(self, point):
return (point[0] + self.I + self.A / 2) * self.meter_to_pixel_x, (point[1] + self.I + self.B / 2) * self.meter_to_pixel_y
def convert_list_of_points_to_pixel_coordinates(self, line):
points = []
for l in line:
points.append(self.convert_point_to_pixel_coordinates(l))
return points
def METHOD_NAME(self, robots: List[RobotControlled2D], ball: Ball):
self.screen.fill((0, 0, 0))
self.screen.blit(self.screen_overlay_0, (0, 0))
for robot in robots:
x = robot.position[0]
y = robot.position[1]
position = (x, y)
polygon_points = robot.get_robot_polygon()
polygon_points_pixel = self.convert_list_of_points_to_pixel_coordinates(polygon_points)
pygame.draw.polygon(self.screen, THECOLORS["white"], polygon_points_pixel)
if robot.team == Robot.Team.OPPONENT:
pygame.draw.circle(
self.screen, THECOLORS["blue"], center=self.convert_point_to_pixel_coordinates(position), radius=self.meter_to_pixel_x * 0.05
)
else:
pygame.draw.circle(
self.screen, THECOLORS["red"], center=self.convert_point_to_pixel_coordinates(position), radius=self.meter_to_pixel_x * 0.05
)
text = self.font.render(f"{robot.robot_id}", True, THECOLORS["white"])
textRect = text.get_rect()
textRect.center = self.convert_point_to_pixel_coordinates(position)
self.screen.blit(text, textRect)
# Draw robot arrow
theta = robot.position[2]
arrow_len = 0.3
arrow_end_x = math.cos(theta) * arrow_len
arrow_end_y = math.sin(theta) * arrow_len
direction = np.array([arrow_end_x, arrow_end_y])
direction = direction / np.linalg.norm(direction)
arrow_end = [position[0] + arrow_end_x, position[1] + arrow_end_y]
pygame.draw.line(
self.screen,
THECOLORS["white"],
start_pos=self.convert_point_to_pixel_coordinates(position),
end_pos=self.convert_point_to_pixel_coordinates(arrow_end),
width=2,
)
# Draw vision cone
robot_pos = np.array([x, y])
theta = -RobotControlled2D.ObservationConstants.FOV / 2
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s), (s, c)))
vision_cone_right = np.matmul(R, direction) * RobotControlled2D.ObservationConstants.VISION_RANGE
theta = RobotControlled2D.ObservationConstants.FOV / 2
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s), (s, c)))
vision_cone_left = np.matmul(R, direction) * RobotControlled2D.ObservationConstants.VISION_RANGE
vision_cone_right_end = [position[0] + vision_cone_right[0], position[1] + vision_cone_right[1]]
vision_cone_left_end = [position[0] + vision_cone_left[0], position[1] + vision_cone_left[1]]
pygame.draw.line(
self.screen,
THECOLORS["white"],
start_pos=self.convert_point_to_pixel_coordinates(position),
end_pos=self.convert_point_to_pixel_coordinates(vision_cone_right_end),
width=2,
)
pygame.draw.line(
self.screen,
THECOLORS["white"],
start_pos=self.convert_point_to_pixel_coordinates(position),
end_pos=self.convert_point_to_pixel_coordinates(vision_cone_left_end),
width=2,
)
for i, obstacle in enumerate(robot.observed_obstacles):
if robot.team == Robot.Team.OPPONENT:
color = (255, 0, 0, obstacle.probability * 0.5)
else:
color = (0, 0, 255, obstacle.probability * 0.5)
pygame.draw.circle(
self.screen, color, center=self.convert_point_to_pixel_coordinates(obstacle.position), radius=self.meter_to_pixel_x * 0.1, width=1
)
# Draw robot path
if robot.path is not None:
verts = []
for j in range(0, 11):
path_vert = robot.path.poseAtRatio(j / 10).position
verts.append([path_vert[0], path_vert[1]])
pygame.draw.lines(
self.screen, THECOLORS["orange"], closed=False, points=self.convert_list_of_points_to_pixel_coordinates(verts), width=1
)
if ball.position is not None:
x = ball.position[0]
y = ball.position[1]
pygame.draw.circle(
self.screen, THECOLORS["yellow"], center=self.convert_point_to_pixel_coordinates((x, y)), radius=self.meter_to_pixel_x * 0.07
)
pygame.display.flip()
pygame.event.get() |
3,096 | details | '''
Groups
======
The following methods allow for interaction into the Tenable Security Center
:sc-api:`Group <Group.htm>` API. These items are typically seen under the
**User Groups** section of Tenable Security Center.
Methods available on ``sc.groups``:
.. rst-class:: hide-signature
.. autoclass:: GroupAPI
:members:
'''
from .base import SCEndpoint
class GroupAPI(SCEndpoint):
def _constructor(self, **kw):
'''
Handles parsing the keywords and returns a group definition document
'''
if 'name' in kw:
self._check('name', kw['name'], str)
if 'description' in kw:
self._check('description', kw['description'], str)
mapping = {
'viewable': 'definingAssets',
'repos': 'repositories',
'lce_ids': 'lces',
'asset_lists': 'assets',
'scan_policies': 'policies',
'query_ids': 'queries',
'scan_creds': 'credentials',
'dashboards': 'dashboardTabs',
'report_cards': 'arcs',
'audit_files': 'auditFiles'
}
for k, v in mapping.items():
if k in kw:
# For each item in the mapping, expand the kwarg if it exists
# into a list of dictionaries with an id attribute. Associate
# the expanded list to the value of the hash table and delete
# the original kwarg.
kw[v] = [{'id': self._check('{}:item'.format(k), i, int)}
for i in self._check(k, kw[k], list)]
del(kw[k])
return kw
def create(self, name, **kw):
'''
Creates a group.
:sc-api:`group: create <Group.htm#group_POST>`
Args:
name (str): The name of the user group
asset_lists (list, optional):
List of asset list ids to allow this group to access.
audit_files (list, optional):
List of audit file ids to allow this group to access.
dashboards (list, optional):
List of dashboard ids to allow this group to access.
lce_ids (list, optional):
List of LCE ionstance ids to allow this group to access.
query_ids (list, optional):
List of query ids to allow this group to access.
report_cards (list, optional):
List of report card ids to allow this group to access.
repos (list, optional):
List of repository ids to allow this group to access.
scan_creds (list, optional):
List of scanning credential ids to allow this group to access.
scan_policies (list, optional):
List of scan policy ids to allow this group to access.
viewable (list, optional):
List of asset list ids to use for the purposes of restricting
what members of this group can see within Tenable Security Center.
Returns:
:obj:`dict`:
The newly created group.
Examples:
>>> group = sc.groups.create('New Group')
'''
kw['name'] = name
payload = self._constructor(**kw)
return self._api.post('group', json=payload).json()['response']
def METHOD_NAME(self, id, fields=None):
'''
Returns the details for a specific group.
:sc-api:`group: details <Group.htm#GroupRESTReference-/group/{id}>`
Args:
id (int): The identifier for the group.
fields (list, optional): A list of attributes to return.
Returns:
:obj:`dict`:
The group resource record.
Examples:
>>> group = sc.groups.details(1)
>>> pprint(group)
'''
params = dict()
if fields:
params['fields'] = ','.join([self._check('field', f, str) for f in fields])
return self._api.get('group/{}'.format(self._check('id', id, int)),
params=params).json()['response']
def edit(self, id, **kw):
'''
Edits a group.
:sc-api:`group: edit <Group.htm#group_id_PATCH>`
Args:
asset_lists (list, optional):
List of asset list ids to allow this group to access.
audit_files (list, optional):
List of audit file ids to allow this group to access.
dashboards (list, optional):
List of dashboard ids to allow this group to access.
lce_ids (list, optional):
List of LCE ionstance ids to allow this group to access.
name (str, optional):
The name of the user group
query_ids (list, optional):
List of query ids to allow this group to access.
report_cards (list, optional):
List of report card ids to allow this group to access.
repos (list, optional):
List of repository ids to allow this group to access.
scan_creds (list, optional):
List of scanning credential ids to allow this group to access.
scan_policies (list, optional):
List of scan policy ids to allow this group to access.
viewable (list, optional):
List of asset list ids to use for the purposes of restricting
what members of this group can see within Tenable Security Center.
Returns:
:obj:`dict`:
The newly updated group.
Examples:
>>> group = sc.groups.edit()
'''
payload = self._constructor(**kw)
return self._api.patch('group/{}'.format(
self._check('id', id, int)), json=payload).json()['response']
def delete(self, id):
'''
Removes a group.
:sc-api:`group: delete <Group.htm#group_id_DELETE>`
Args:
id (int): The numeric identifier for the group to remove.
Returns:
:obj:`str`:
An empty response.
Examples:
>>> sc.groups.delete(1)
'''
return self._api.delete('group/{}'.format(
self._check('id', id, int))).json()['response']
def list(self, fields=None):
'''
Retrieves the list of group definitions.
:sc-api:`group: list <Group.htm#group_GET>`
Args:
fields (list, optional):
A list of attributes to return for each group.
Returns:
:obj:`list`:
A list of group resources.
Examples:
>>> for group in sc.groups.list():
... pprint(group)
'''
params = dict()
if fields:
params['fields'] = ','.join([self._check('field', f, str)
for f in fields])
return self._api.get('group', params=params).json()['response'] |
3,097 | validate changes | """Contains the WidgetPropertiesDialog class."""
__all__ = ['WidgetPropertiesDialog']
import Pmw
import tkinter as tk
class WidgetPropertiesDialog(tk.Toplevel):
"""Class to open dialogs to adjust widget properties."""
def __init__(self, propertyDict, propertyList = None, parent = None,
title = 'Widget Properties'):
"""Initialize a dialog.
Arguments:
propertyDict -- a dictionary of properties to be edited
parent -- a parent window (the application window)
title -- the dialog title
"""
# Record property list
self.propertyDict = propertyDict
self.propertyList = propertyList
if self.propertyList is None:
self.propertyList = sorted(self.propertyDict)
# Use default parent if none specified
if not parent:
parent = tk._default_root
# Create toplevel window
tk.Toplevel.__init__(self, parent)
self.transient(parent)
# Set title
if title:
self.title(title)
# Record parent
self.parent = parent
# Initialize modifications
self.modifiedDict = {}
# Create body
body = tk.Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
# Create OK Cancel button
self.buttonbox()
# Initialize window state
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx() + 50,
parent.winfo_rooty() + 50))
self.initial_focus.focus_set()
self.wait_window(self)
def destroy(self):
"""Destroy the window"""
self.propertyDict = {}
self.initial_focus = None
# Clean up balloons!
for balloon in self.balloonList:
balloon.withdraw()
tk.Toplevel.destroy(self)
#
# construction hooks
def body(self, master):
"""create dialog body.
return entry that should have initial focus.
This method should be overridden, and is called
by the __init__ method.
"""
count = 0
entryList = []
self.balloonList = []
for property in self.propertyList:
propertySet = self.propertyDict[property]
# Widget
widget = propertySet.get('widget', None)
# Get initial value
initialvalue = widget[property]
# Type of entry
entryType = propertySet.get('type', 'real')
# Is None an allowable value?
fAllowNone = propertySet.get('fNone', 0)
# Help string specified?
helpString = propertySet.get('help', None)
# Create label
label = tk.Label(master, text=property, justify=tk.LEFT)
label.grid(row=count, column=0, padx=5, sticky=tk.W)
# Create entry
entry = Pmw.EntryField(master, entry_justify=tk.RIGHT)
entry.grid(row=count, column=1, padx=5, sticky=tk.W + tk.E)
if initialvalue is None:
entry.insert(0, 'None')
else:
entry.insert(0, initialvalue)
# Create balloon for help
balloon = Pmw.Balloon(state='balloon')
self.balloonList.append(balloon)
# extra info if None is allowed value
if helpString is None:
if fAllowNone:
extra = ' or None'
else:
extra = ''
# Set up help string and validator based upon type
if entryType == 'real':
# Only allow real numbers
if fAllowNone:
entry['validate'] = {'validator': self.realOrNone}
else:
entry['validate'] = {'validator': 'real'}
if helpString is None:
helpString = 'Enter a floating point number' + extra + '.'
elif entryType == 'integer':
# Only allow integer values
if fAllowNone:
entry['validate'] = {'validator': self.intOrNone}
else:
entry['validate'] = {'validator': 'integer'}
if helpString is None:
helpString = f'Enter an integer{extra}.'
else:
# Anything goes with a string widget
if helpString is None:
helpString = f'Enter a string{extra}.'
# Bind balloon with help string to entry
balloon.bind(entry, helpString)
# Create callback to execute whenever a value is changed
modifiedCallback = (lambda f=self.modified, w=widget, e=entry,
p=property, t=entryType, fn=fAllowNone:
f(w, e, p, t, fn))
entry['modifiedcommand'] = modifiedCallback
# Keep track of the entrys
entryList.append(entry)
count += 1
# Set initial focus
if len(entryList) > 0:
entry = entryList[0]
entry.select_range(0, tk.END)
# Set initial focus to first entry in the list
return entryList[0]
else:
# Just set initial focus to self
return self
def modified(self, widget, entry, property, type, fNone):
self.modifiedDict[property] = (widget, entry, type, fNone)
def buttonbox(self):
"""add standard button box buttons.
"""
box = tk.Frame(self)
# Create buttons
w = tk.Button(box, text="OK", width=10, command=self.ok)
w.pack(side=tk.LEFT, padx=5, pady=5)
# Create buttons
w = tk.Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=tk.LEFT, padx=5, pady=5)
# Bind commands
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
# Pack
box.pack()
def realOrNone(self, val):
val = val.lower()
if 'none'.find(val) != -1:
if val == 'none':
return Pmw.OK
else:
return Pmw.PARTIAL
return Pmw.realvalidator(val)
def intOrNone(self, val):
val = val.lower()
if 'none'.find(val) != -1:
if val == 'none':
return Pmw.OK
else:
return Pmw.PARTIAL
return Pmw.integervalidator(val)
#
# standard button semantics
def ok(self, event=None):
self.withdraw()
self.update_idletasks()
self.METHOD_NAME()
self.apply()
self.cancel()
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
def METHOD_NAME(self):
for property in self.modifiedDict:
tuple = self.modifiedDict[property]
widget = tuple[0]
entry = tuple[1]
type = tuple[2]
fNone = tuple[3]
value = entry.get()
lValue = value.lower()
if 'none'.find(lValue) != -1:
if fNone and (lValue == 'none'):
widget[property] = None
else:
if type == 'real':
value = float(value)
elif type == 'integer':
value = int(value)
widget[property] = value
def apply(self):
"""process the data
This method is called automatically to process the data, *after*
the dialog is destroyed. By default, it does nothing.
""" |
3,098 | call | """Custom loss functions and metrics."""
import tensorflow as tf
from sleap.nn.config import HardKeypointMiningConfig
def compute_ohkm_loss(
y_gt: tf.Tensor,
y_pr: tf.Tensor,
hard_to_easy_ratio: float = 2.0,
min_hard_keypoints: int = 2,
max_hard_keypoints: int = -1,
loss_scale: float = 5.0,
) -> tf.Tensor:
"""Compute the online hard keypoint mining loss."""
# Compute elementwise squared difference.
loss = tf.math.squared_difference(y_gt, y_pr) # rank 4
# Store initial shape for normalization.
batch_shape = tf.shape(loss)
# Reduce over everything but channels axis.
loss = tf.reduce_sum(loss, axis=[0, 1, 2])
# Compute the loss for the "easy" keypoint.
best_loss = tf.math.reduce_min(loss)
# Find the number of hard keypoints.
is_hard_keypoint = (loss / best_loss) >= hard_to_easy_ratio
n_hard_keypoints = tf.reduce_sum(tf.cast(is_hard_keypoint, tf.int32))
# Work out the actual final number of keypoints to consider as hard.
if max_hard_keypoints < 0:
max_hard_keypoints = tf.shape(loss)[0]
else:
max_hard_keypoints = tf.minimum(max_hard_keypoints, tf.shape(loss)[0])
k = tf.minimum(tf.maximum(n_hard_keypoints, min_hard_keypoints), max_hard_keypoints)
# Pull out the top hard values.
k_vals, k_inds = tf.math.top_k(loss, k=k, sorted=False)
# Apply weights.
k_loss = k_vals * loss_scale
# Reduce over all channels.
n_elements = tf.cast(
batch_shape[0] * batch_shape[1] * batch_shape[2] * k, tf.float32
)
k_loss = tf.reduce_sum(k_loss) / n_elements
return k_loss
class OHKMLoss(tf.keras.losses.Loss):
"""Online hard keypoint mining loss.
This loss serves to dynamically reweight the MSE of the top-K worst channels in each
batch. This is useful when fine tuning a model to improve performance on a hard
part to optimize for (e.g., small, hard to see, often not visible).
Note: This works with any type of channel, so it can work for PAFs as well.
Attributes:
hard_to_easy_ratio: The minimum ratio of the individual keypoint loss with
respect to the lowest keypoint loss in order to be considered as "hard".
This helps to switch focus on across groups of keypoints during training.
min_hard_keypoints: The minimum number of keypoints that will be considered as
"hard", even if they are not below the `hard_to_easy_ratio`.
max_hard_keypoints: The maximum number of hard keypoints to apply scaling to.
This can help when there are few very easy keypoints which may skew the
ratio and result in loss scaling being applied to most keypoints, which can
reduce the impact of hard mining altogether.
loss_scale: Factor to scale the hard keypoint losses by.
"""
def __init__(
self,
hard_to_easy_ratio: float = 2.0,
min_hard_keypoints: int = 2,
max_hard_keypoints: int = -1,
loss_scale: float = 5.0,
name="ohkm",
**kwargs
):
super(OHKMLoss, self).__init__(name=name, **kwargs)
self.hard_to_easy_ratio = hard_to_easy_ratio
self.min_hard_keypoints = min_hard_keypoints
self.max_hard_keypoints = max_hard_keypoints
self.loss_scale = loss_scale
@classmethod
def from_config(cls, config: HardKeypointMiningConfig) -> "OHKMLoss":
return cls(
hard_to_easy_ratio=config.hard_to_easy_ratio,
min_hard_keypoints=config.min_hard_keypoints,
max_hard_keypoints=config.max_hard_keypoints
if config.max_hard_keypoints is not None
else -1,
loss_scale=config.loss_scale,
)
def METHOD_NAME(self, y_gt, y_pr, sample_weight=None):
return compute_ohkm_loss(
y_gt,
y_pr,
hard_to_easy_ratio=self.hard_to_easy_ratio,
min_hard_keypoints=self.min_hard_keypoints,
max_hard_keypoints=self.max_hard_keypoints,
loss_scale=self.loss_scale,
)
class PartLoss(tf.keras.metrics.Metric):
"""Compute channelwise loss.
Useful for monitoring the MSE for specific body parts (channels).
Attributes:
channel_ind: Index of channel to compute MSE for.
name: Name of the loss tensor.
"""
def __init__(self, channel_ind, name="part_loss", **kwargs):
super(PartLoss, self).__init__(name=name, **kwargs)
self.channel_ind = channel_ind
self.channel_mse = self.add_weight(
name=name + ".mse", initializer="zeros", dtype=tf.float32
)
self.n_samples = self.add_weight(
name=name + ".n_samples", initializer="zeros", dtype=tf.int32
)
self.height = self.add_weight(
name=name + ".height", initializer="zeros", dtype=tf.int32
)
self.width = self.add_weight(
name=name + ".width", initializer="zeros", dtype=tf.int32
)
def update_state(self, y_gt, y_pr, sample_weight=None):
shape = tf.shape(y_gt)
n_samples = shape[0]
channel_mse = tf.reduce_sum(
tf.math.squared_difference(
tf.gather(y_gt, self.channel_ind, axis=3),
tf.gather(y_pr, self.channel_ind, axis=3),
)
) # rank 4
self.height.assign(shape[1])
self.width.assign(shape[2])
self.n_samples.assign_add(n_samples)
self.channel_mse.assign_add(channel_mse)
def result(self):
return self.channel_mse / tf.cast(
self.n_samples * self.height * self.width, tf.float32
) |
3,099 | compare etfs | """Stockanalysis.com/etf Model"""
__docformat__ = "numpy"
import logging
import pathlib
from typing import List, Tuple
import pandas as pd
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent, request
logger = logging.getLogger(__name__)
csv_path = pathlib.Path(__file__).parent / "etfs.csv"
@log_start_end(log=logger)
def get_all_names_symbols() -> Tuple[List[str], List[str]]:
"""Gets all etf names and symbols
Returns
-------
Tuple[List[str], List[str]]
List of all available etf symbols, List of all available etf names
"""
etf_symbols = []
etf_names = []
# 11/25 I am hard coding the etf lists because of stockanalysis changing the format of their website
data = pd.read_csv(csv_path)
etf_symbols = data.s.to_list()
etf_names = data.n.to_list()
return etf_symbols, etf_names
@log_start_end(log=logger)
def get_etf_overview(symbol: str) -> pd.DataFrame:
"""Get overview data for selected etf
Parameters
----------
etf_symbol : str
Etf symbol to get overview for
Returns
-------
df : pd.DataFrame
Dataframe of stock overview data
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.etf.overview("SPY")
"""
r = request(
f"https://stockanalysis.com/etf/{symbol}",
headers={"User-Agent": get_user_agent()},
)
soup = BeautifulSoup(r.text, "html.parser")
tables = soup.findAll("table")
texts = []
for tab in tables[:2]:
entries = tab.findAll("td")
for ent in entries:
texts.append(ent.get_text())
var_cols = [0, 2, 4, 6, 8, 10, 12, 18, 20, 22, 26, 28, 30, 32]
vals = [idx + 1 for idx in var_cols]
columns = [texts[idx] for idx in var_cols]
data = [texts[idx] for idx in vals]
df = pd.DataFrame(data, index=columns, columns=[symbol.upper()])
return df
@log_start_end(log=logger)
def get_etf_holdings(symbol: str) -> pd.DataFrame:
"""Get ETF holdings
Parameters
----------
symbol: str
Symbol to get holdings for
Returns
-------
df: pd.DataFrame
Dataframe of holdings
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> openbb.etf.holdings("SPY")
"""
link = f"https://stockanalysis.com/etf/{symbol}/holdings/"
r = request(link, headers={"User-Agent": get_user_agent()})
try:
df = pd.read_html(r.content)[0]
df["Symbol"] = df["Symbol"].fillna("N/A")
df = df.set_index("Symbol")
df = df[["Name", "% Weight", "Shares"]]
df = df.rename(columns={"% Weight": "% Of Etf"})
except ValueError:
df = pd.DataFrame()
return df
@log_start_end(log=logger)
def METHOD_NAME(symbols: List[str]) -> pd.DataFrame:
"""Compare selected ETFs
Parameters
----------
symbols : List[str]
ETF symbols to compare
Returns
-------
df_compare : pd.DataFrame
Dataframe of etf comparisons
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> compare_etfs = openbb.etf.compare(["SPY", "QQQ", "IWM"])
"""
df_compare = pd.DataFrame()
for symbol in symbols:
df_compare = pd.concat([df_compare, get_etf_overview(symbol)], axis=1)
return df_compare
@log_start_end(log=logger)
def get_etfs_by_name(name_to_search: str) -> pd.DataFrame:
"""Get an ETF symbol and name based on ETF string to search. [Source: StockAnalysis]
Parameters
----------
name_to_search: str
ETF name to match
Returns
-------
df: pd.Dataframe
Dataframe with symbols and names
"""
all_symbols, all_names = get_all_names_symbols()
filtered_symbols = list()
filtered_names = list()
for symbol, name in zip(all_symbols, all_names):
if name_to_search.lower() in name.lower():
filtered_symbols.append(symbol)
filtered_names.append(name)
df = pd.DataFrame(
list(zip(filtered_symbols, filtered_names)), columns=["Symbol", "Name"]
)
return df |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.