id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
7,200 | extract data | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.ContainerService/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2021_11_01_preview.ContainerServiceClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.OperationValue"]:
"""Gets a list of operations.
Gets a list of operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationValue or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2021_11_01_preview.models.OperationValue]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2021-11-01-preview")
)
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, METHOD_NAME)
list.metadata = {"url": "/providers/Microsoft.ContainerService/operations"} |
7,201 | del handler | from .manager import Manager
from .template import TemplateFabric
from swsscommon import swsscommon
from .managers_rm import ROUTE_MAPS
import ipaddress
from .log import log_info, log_err, log_debug
class AdvertiseRouteMgr(Manager):
""" This class Advertises routes when ADVERTISE_NETWORK_TABLE in STATE_DB is updated """
def __init__(self, common_objs, db, table):
"""
Initialize the object
:param common_objs: common object dictionary
:param db: name of the db
:param table: name of the table in the db
"""
super(AdvertiseRouteMgr, self).__init__(
common_objs,
[],
db,
table,
)
self.directory.subscribe([("CONFIG_DB", swsscommon.CFG_DEVICE_METADATA_TABLE_NAME, "localhost/bgp_asn"),], self.on_bgp_asn_change)
self.advertised_routes = dict()
OP_DELETE = "DELETE"
OP_ADD = "ADD"
def set_handler(self, key, data):
log_debug("AdvertiseRouteMgr:: set handler")
if not self.__set_handler_validate(key, data):
return True
vrf, ip_prefix = self.split_key(key)
self.add_route_advertisement(vrf, ip_prefix, data)
return True
def METHOD_NAME(self, key):
log_debug("AdvertiseRouteMgr:: del handler")
vrf, ip_prefix = self.split_key(key)
self.remove_route_advertisement(vrf, ip_prefix)
def __set_handler_validate(self, key, data):
if data:
if ("profile" in data and data["profile"] in ROUTE_MAPS) or data == {"":""}:
"""
APP which config the data should be responsible to pass a valid IP prefix
"""
return True
log_err("BGPAdvertiseRouteMgr:: Invalid data %s for advertised route %s" % (data, key))
return False
def add_route_advertisement(self, vrf, ip_prefix, data):
if self.directory.path_exist("CONFIG_DB", swsscommon.CFG_DEVICE_METADATA_TABLE_NAME, "localhost/bgp_asn"):
if not self.advertised_routes.get(vrf, dict()):
self.bgp_network_import_check_commands(vrf, self.OP_ADD)
self.advertise_route_commands(ip_prefix, vrf, self.OP_ADD, data)
self.advertised_routes.setdefault(vrf, dict()).update({ip_prefix: data})
def remove_route_advertisement(self, vrf, ip_prefix):
if ip_prefix not in self.advertised_routes.get(vrf, dict()):
log_info("BGPAdvertiseRouteMgr:: %s|%s does not exist" % (vrf, ip_prefix))
return
self.advertised_routes.get(vrf, dict()).pop(ip_prefix)
if not self.advertised_routes.get(vrf, dict()):
self.advertised_routes.pop(vrf, None)
if self.directory.path_exist("CONFIG_DB", swsscommon.CFG_DEVICE_METADATA_TABLE_NAME, "localhost/bgp_asn"):
if not self.advertised_routes.get(vrf, dict()):
self.bgp_network_import_check_commands(vrf, self.OP_DELETE)
self.advertise_route_commands(ip_prefix, vrf, self.OP_DELETE)
def advertise_route_commands(self, ip_prefix, vrf, op, data=None):
is_ipv6 = TemplateFabric.is_ipv6(ip_prefix)
bgp_asn = self.directory.get_slot("CONFIG_DB", swsscommon.CFG_DEVICE_METADATA_TABLE_NAME)["localhost"]["bgp_asn"]
cmd_list = []
if vrf == "default":
cmd_list.append("router bgp %s" % bgp_asn)
else:
cmd_list.append("router bgp %s vrf %s" % (bgp_asn, vrf))
cmd_list.append(" address-family %s unicast" % ("ipv6" if is_ipv6 else "ipv4"))
if data and "profile" in data:
cmd_list.append(" network %s route-map %s" % (ip_prefix, "%s_RM" % data["profile"]))
log_debug(
"BGPAdvertiseRouteMgr:: Update bgp %s network %s with route-map %s"
% (bgp_asn, vrf + "|" + ip_prefix, "%s_RM" % data["profile"])
)
else:
cmd_list.append(" %snetwork %s" % ("no " if op == self.OP_DELETE else "", ip_prefix))
log_debug(
"BGPAdvertiseRouteMgr:: %sbgp %s network %s"
% ("Remove " if op == self.OP_DELETE else "Update ", bgp_asn, vrf + "|" + ip_prefix)
)
self.cfg_mgr.push_list(cmd_list)
log_debug("BGPAdvertiseRouteMgr::Done")
def bgp_network_import_check_commands(self, vrf, op):
bgp_asn = self.directory.get_slot("CONFIG_DB", swsscommon.CFG_DEVICE_METADATA_TABLE_NAME)["localhost"]["bgp_asn"]
cmd_list = []
if vrf == "default":
cmd_list.append("router bgp %s" % bgp_asn)
else:
cmd_list.append("router bgp %s vrf %s" % (bgp_asn, vrf))
cmd_list.append(" %sbgp network import-check" % ("" if op == self.OP_DELETE else "no "))
self.cfg_mgr.push_list(cmd_list)
def on_bgp_asn_change(self):
if self.directory.path_exist("CONFIG_DB", swsscommon.CFG_DEVICE_METADATA_TABLE_NAME, "localhost/bgp_asn"):
for vrf, ip_prefixes in self.advertised_routes.items():
self.bgp_network_import_check_commands(vrf, self.OP_ADD)
for ip_prefix in ip_prefixes:
self.add_route_advertisement(vrf, ip_prefix, ip_prefixes[ip_prefix])
@staticmethod
def split_key(key):
"""
Split key into vrf name and prefix.
:param key: key to split
:return: vrf name extracted from the key, ip prefix extracted from the key
"""
if "|" not in key:
return "default", key
else:
return tuple(key.split("|", 1)) |
7,202 | method | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"workloads sap-virtual-instance delete",
is_preview=True,
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete a Virtual Instance for SAP solutions resource and its child resources, that is the associated Central Services Instance, Application Server Instances and Database Instance.
:example: Delete a Virtual Instance for SAP solutions (VIS)
az workloads sap-virtual-instance delete -g <Resource_Group_Name> -n <VIS Name>
:example: Remove a Virtual Instance for SAP solutions (VIS) using the Azure resource ID of the VIS
az workloads sap-virtual-instance delete --id <ResourceID>
"""
_aaz_info = {
"version": "2023-04-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.workloads/sapvirtualinstances/{}", "2023-04-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.sap_virtual_instance_name = AAZStrArg(
options=["-n", "--name", "--sap-virtual-instance-name"],
help="The name of the Virtual Instances for SAP solutions resource",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.SAPVirtualInstancesDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class SAPVirtualInstancesDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Workloads/sapVirtualInstances/{sapVirtualInstanceName}",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"sapVirtualInstanceName", self.ctx.args.sap_virtual_instance_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-04-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
7,203 | decode | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Petru Paler
"""bencode.py - bencode decoder."""
from bencodepy.compat import to_binary
from bencodepy.exceptions import BencodeDecodeError
from collections import OrderedDict
try:
from typing import Dict, List, Tuple, Deque, Union, TextIO, BinaryIO, Any
except ImportError:
Dict = List = Tuple = Deque = Union = TextIO = BinaryIO = Any = None
try:
import pathlib
except ImportError:
pathlib = None
ENCODING_FALLBACK_TYPES = ('key', 'value')
class BencodeDecoder(object):
def __init__(self, encoding=None, encoding_fallback=None, dict_ordered=False, dict_ordered_sort=False):
self.encoding = encoding
self.dict_ordered = dict_ordered
self.dict_ordered_sort = dict_ordered_sort
if dict_ordered_sort and not dict_ordered:
raise ValueError(
'Invalid value for "dict_ordered_sort" (requires "dict_ordered" to be enabled)'
)
# Parse encoding fallback
if encoding_fallback is not None and encoding_fallback not in ENCODING_FALLBACK_TYPES + ('all',):
raise ValueError(
'Invalid value for "encoding_fallback" (expected "all", "keys", "values" or None)'
)
if encoding_fallback == 'all':
self.encoding_fallback = ENCODING_FALLBACK_TYPES
elif encoding_fallback is not None:
self.encoding_fallback = (encoding_fallback,)
else:
self.encoding_fallback = tuple()
# noinspection PyDictCreation
self.decode_func = {}
self.decode_func[b'l'] = self.decode_list
self.decode_func[b'i'] = self.decode_int
self.decode_func[b'0'] = self.decode_string
self.decode_func[b'1'] = self.decode_string
self.decode_func[b'2'] = self.decode_string
self.decode_func[b'3'] = self.decode_string
self.decode_func[b'4'] = self.decode_string
self.decode_func[b'5'] = self.decode_string
self.decode_func[b'6'] = self.decode_string
self.decode_func[b'7'] = self.decode_string
self.decode_func[b'8'] = self.decode_string
self.decode_func[b'9'] = self.decode_string
self.decode_func[b'd'] = self.decode_dict
def METHOD_NAME(self, value):
# type: (bytes) -> Union[Tuple, List, OrderedDict, bool, int, str, bytes]
"""
Decode bencode formatted byte string ``value``.
:param value: Bencode formatted string
:type value: bytes
:return: Decoded value
:rtype: object
"""
try:
value = to_binary(value)
data, length = self.decode_func[value[0:1]](value, 0)
except (IndexError, KeyError, TypeError, ValueError):
raise BencodeDecodeError("not a valid bencoded string")
if length != len(value):
raise BencodeDecodeError("invalid bencoded value (data after valid prefix)")
return data
def decode_int(self, x, f):
# type: (bytes, int) -> Tuple[int, int]
f += 1
newf = x.index(b'e', f)
n = int(x[f:newf])
if x[f:f + 1] == b'-':
if x[f + 1:f + 2] == b'0':
raise ValueError
elif x[f:f + 1] == b'0' and newf != f + 1:
raise ValueError
return n, newf + 1
def decode_string(self, x, f, kind='value'):
# type: (bytes, int) -> Tuple[bytes, int]
"""Decode torrent bencoded 'string' in x starting at f."""
colon = x.index(b':', f)
n = int(x[f:colon])
if x[f:f + 1] == b'0' and colon != f + 1:
raise ValueError
colon += 1
s = x[colon:colon + n]
if self.encoding:
try:
return s.METHOD_NAME(self.encoding), colon + n
except UnicodeDecodeError:
if kind not in self.encoding_fallback:
raise
return bytes(s), colon + n
def decode_list(self, x, f):
# type: (bytes, int) -> Tuple[List, int]
r, f = [], f + 1
while x[f:f + 1] != b'e':
v, f = self.decode_func[x[f:f + 1]](x, f)
r.append(v)
return r, f + 1
def decode_dict(self, x, f):
# type: (bytes, int) -> Tuple[OrderedDict[str, Any], int]
"""Decode bencoded dictionary."""
f += 1
if self.dict_ordered:
r = OrderedDict()
else:
r = {}
while x[f:f + 1] != b'e':
k, f = self.decode_string(x, f, kind='key')
r[k], f = self.decode_func[x[f:f + 1]](x, f)
if self.dict_ordered_sort:
r = OrderedDict(sorted(r.items()))
return r, f + 1 |
7,204 | handle | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import numpy as np
from mxnet_utils import image
from mxnet_vision_service import MXNetVisionService
class SSDService(MXNetVisionService):
"""
SSD Service to perform real time multi-object detection using pre-trained MXNet SSD model.
This class extends MXNetVisionService to add custom preprocessing of input
and preparing the output.
Reuses input image transformation functionality of MXNetVisionService.
"""
def __init__(self):
super(SSDService, self).__init__()
# Threshold is used to pick the detection boxes with score > threshold.
# The detections from this network will be of the format - [[class_id, score, x1, y1, x2, y2]].
# We pick all detections where 'score > threshold'.
# You can experiment with different threshold to see the best threshold for the use-case.
self.threshold = 0.2
# This is used to save the original input image shape.
# This is required for preparing the bounding box of the detected object "relative to
# original input"
self.input_width = None
self.input_height = None
def preprocess(self, batch):
"""
Input image buffer from data is read into NDArray. Then, resized to
expected shape. Swaps axes to convert image from BGR format to RGB.
Returns the preprocessed NDArray as a list for next step, Inference.
"""
# Read input
img = batch[0].get("data")
if img is None:
img = batch[0].get("body")
input_image = image.read(img)
# Save original input image shape.
# This is required for preparing the bounding box of the detected object relative to
# original input
self.input_height = input_image.shape[0]
self.input_width = input_image.shape[1]
# Transform input image - resize, BGR to RGB.
# Reuse MXNetVisionService preprocess to achieve above transformations.
return super(SSDService, self).preprocess(batch)
def postprocess(self, data):
"""
From the detections, prepares the output in the format of list of
[(object_class, xmin, ymin, xmax, ymax)]
object_class is name of the object detected. xmin, ymin, xmax, ymax
provides the bounding box coordinates.
Example: [(person, 555, 175, 581, 242), (dog, 306, 446, 468, 530)]
"""
# Read the detections output after forward pass (inference)
detections = data[0].asnumpy()
result = []
for i in range(detections.shape[0]):
det = detections[i, :, :]
res = det[np.where(det[:, 0] >= 0)[0]]
result.append(res)
# Prepare the output
dets = result[0]
classes = self.labels
width = self.input_width # original input image width
height = self.input_height # original input image height
response = []
for i in range(dets.shape[0]):
cls_id = int(dets[i, 0])
if cls_id >= 0:
score = dets[i, 1]
if score > self.threshold:
xmin = int(dets[i, 2] * width)
ymin = int(dets[i, 3] * height)
xmax = int(dets[i, 4] * width)
ymax = int(dets[i, 5] * height)
class_name = str(cls_id)
if classes and len(classes) > cls_id:
class_name = classes[cls_id]
response.append((class_name, xmin, ymin, xmax, ymax))
return [response]
_service = SSDService()
def METHOD_NAME(data, context):
if not _service.initialized:
_service.initialize(context)
if data is None:
return None
return _service.METHOD_NAME(data, context) |
7,205 | test no tds in solute list error | #################################################################################
# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,
# National Renewable Energy Laboratory, and National Energy Technology
# Laboratory (subject to receipt of any required approvals from the U.S. Dept.
# of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#################################################################################
"""
Tests for zero-order electrodialysis reversal model
"""
import pytest
from pyomo.environ import (
Block,
check_optimal_termination,
ConcreteModel,
Constraint,
value,
Var,
)
from pyomo.util.check_units import assert_units_consistent
from idaes.core import FlowsheetBlock
from idaes.core.solvers import get_solver
from idaes.core.util.model_statistics import degrees_of_freedom
from idaes.core.util.testing import initialization_tester
from idaes.core import UnitModelCostingBlock
from watertap.unit_models.zero_order import ElectrodialysisReversalZO
from watertap.core.wt_database import Database
from watertap.core.zero_order_properties import WaterParameterBlock
from watertap.core.zero_order_costing import ZeroOrderCosting
solver = get_solver()
class TestElectrodialysisReversalZO_w_default_removal:
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(dynamic=False)
m.fs.params = WaterParameterBlock(solute_list=["tds", "foo"])
m.fs.unit = ElectrodialysisReversalZO(
property_package=m.fs.params, database=m.db
)
m.fs.unit.inlet.flow_mass_comp[0, "H2O"].fix(10000)
m.fs.unit.inlet.flow_mass_comp[0, "tds"].fix(250)
m.fs.unit.inlet.flow_mass_comp[0, "foo"].fix(1)
return m
@pytest.mark.unit
def test_build(self, model):
assert model.fs.unit.config.database is model.db
assert isinstance(model.fs.unit.electricity_constraint, Constraint)
assert isinstance(model.fs.unit.electricity, Var)
assert isinstance(model.fs.unit.elec_coeff_1, Var)
assert isinstance(model.fs.unit.elec_coeff_2, Var)
@pytest.mark.component
def test_load_parameters(self, model):
data = model.db.get_unit_operation_parameters("electrodialysis_reversal")
model.fs.unit.load_parameters_from_database(use_default_removal=True)
assert model.fs.unit.recovery_frac_mass_H2O[0].fixed
assert (
model.fs.unit.recovery_frac_mass_H2O[0].value
== data["recovery_frac_mass_H2O"]["value"]
)
for (t, j), v in model.fs.unit.removal_frac_mass_comp.items():
assert v.fixed
if j == "foo":
assert v.value == data["default_removal_frac_mass_comp"]["value"]
else:
assert v.value == data["removal_frac_mass_comp"][j]["value"]
assert model.fs.unit.elec_coeff_1.fixed
assert model.fs.unit.elec_coeff_1.value == data["elec_coeff_1"]["value"]
assert model.fs.unit.elec_coeff_2.fixed
assert model.fs.unit.elec_coeff_2.value == data["elec_coeff_2"]["value"]
@pytest.mark.component
def test_degrees_of_freedom(self, model):
assert degrees_of_freedom(model.fs.unit) == 0
@pytest.mark.component
def test_unit_consistency(self, model):
assert_units_consistent(model.fs.unit)
@pytest.mark.component
def test_initialize(self, model):
initialization_tester(model)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert pytest.approx(9.2628, rel=1e-5) == value(
model.fs.unit.properties_treated[0].flow_vol
)
assert pytest.approx(5.1632, rel=1e-5) == value(
model.fs.unit.properties_treated[0].conc_mass_comp["tds"]
)
assert pytest.approx(0.1079584, rel=1e-5) == value(
model.fs.unit.properties_treated[0].conc_mass_comp["foo"]
)
assert pytest.approx(204.5935, rel=1e-5) == value(
model.fs.unit.properties_byproduct[0].conc_mass_comp["tds"]
)
assert pytest.approx(1.01197e-08, rel=1e-5) == value(
model.fs.unit.properties_byproduct[0].conc_mass_comp["foo"]
)
assert pytest.approx(472761.372, rel=1e-5) == value(
model.fs.unit.electricity[0]
)
assert pytest.approx(12.8107, rel=1e-5) == value(
model.fs.unit.electricity_intensity[0]
)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, model):
for j in model.fs.params.component_list:
assert 1e-6 >= abs(
value(
model.fs.unit.inlet.flow_mass_comp[0, j]
- model.fs.unit.treated.flow_mass_comp[0, j]
- model.fs.unit.byproduct.flow_mass_comp[0, j]
)
)
@pytest.mark.component
def test_report(self, model):
model.fs.unit.report()
def test_costing():
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(dynamic=False)
m.fs.params = WaterParameterBlock(solute_list=["tds", "toc", "tss"])
m.fs.costing = ZeroOrderCosting()
m.fs.unit1 = ElectrodialysisReversalZO(property_package=m.fs.params, database=m.db)
m.fs.unit1.inlet.flow_mass_comp[0, "H2O"].fix(10000)
m.fs.unit1.inlet.flow_mass_comp[0, "tds"].fix(1)
m.fs.unit1.inlet.flow_mass_comp[0, "toc"].fix(2)
m.fs.unit1.inlet.flow_mass_comp[0, "tss"].fix(3)
m.fs.unit1.load_parameters_from_database(use_default_removal=True)
assert degrees_of_freedom(m.fs.unit1) == 0
m.fs.unit1.costing = UnitModelCostingBlock(flowsheet_costing_block=m.fs.costing)
assert isinstance(m.fs.costing.electrodialysis_reversal, Block)
assert isinstance(m.fs.costing.electrodialysis_reversal.capital_a_parameter, Var)
assert isinstance(m.fs.costing.electrodialysis_reversal.capital_b_parameter, Var)
assert isinstance(m.fs.costing.electrodialysis_reversal.reference_state, Var)
assert isinstance(m.fs.unit1.costing.capital_cost, Var)
assert isinstance(m.fs.unit1.costing.capital_cost_constraint, Constraint)
assert_units_consistent(m.fs)
assert degrees_of_freedom(m.fs.unit1) == 0
assert m.fs.unit1.electricity[0] in m.fs.costing._registered_flows["electricity"]
db = Database()
params = db._get_technology("electrodialysis_reversal")
@pytest.mark.unit
def METHOD_NAME():
m = ConcreteModel()
m.fs = FlowsheetBlock(dynamic=False)
m.fs.params = WaterParameterBlock(solute_list=["foo"])
with pytest.raises(
KeyError,
match="TDS must be included in the solute list for determining"
" electricity intensity and power consumption of the electrodialysis "
"reversal unit.",
):
m.fs.unit = ElectrodialysisReversalZO(property_package=m.fs.params, database=db) |
7,206 | test expected failure | import unittest
class TestUnittestAssertions(unittest.TestCase):
def testFail(self):
with self.assertRaises(AssertionError):
self.fail("failure")
def testEqual(self):
self.assertEqual(0, 0)
self.assertEqual([0, 1, 2], [0, 1, 2])
with self.assertRaises(AssertionError):
self.assertEqual(0, None)
with self.assertRaises(AssertionError):
self.assertEqual([0, 1, 2], [1, 2, 3])
def test_AlmostEqual(self):
self.assertAlmostEqual(1.00000001, 1.0)
self.assertNotAlmostEqual(1.0000001, 1.0)
with self.assertRaises(AssertionError):
self.assertAlmostEqual(1.0000001, 1.0)
with self.assertRaises(AssertionError):
self.assertNotAlmostEqual(1.00000001, 1.0)
self.assertAlmostEqual(1.1, 1.0, places=0)
with self.assertRaises(AssertionError):
self.assertAlmostEqual(1.1, 1.0, places=1)
self.assertAlmostEqual(0, 0.1 + 0.1j, places=0)
self.assertNotAlmostEqual(0, 0.1 + 0.1j, places=1)
with self.assertRaises(AssertionError):
self.assertAlmostEqual(0, 0.1 + 0.1j, places=1)
with self.assertRaises(AssertionError):
self.assertNotAlmostEqual(0, 0.1 + 0.1j, places=0)
self.assertAlmostEqual(float("inf"), float("inf"))
with self.assertRaises(AssertionError):
self.assertNotAlmostEqual(float("inf"), float("inf"))
def test_AlmostEqualWithDelta(self):
self.assertAlmostEqual(1.1, 1.0, delta=0.5)
self.assertAlmostEqual(1.0, 1.1, delta=0.5)
self.assertNotAlmostEqual(1.1, 1.0, delta=0.05)
self.assertNotAlmostEqual(1.0, 1.1, delta=0.05)
self.assertAlmostEqual(1.0, 1.0, delta=0.5)
with self.assertRaises(AssertionError):
self.assertNotAlmostEqual(1.0, 1.0, delta=0.5)
with self.assertRaises(AssertionError):
self.assertAlmostEqual(1.1, 1.0, delta=0.05)
with self.assertRaises(AssertionError):
self.assertNotAlmostEqual(1.1, 1.0, delta=0.5)
with self.assertRaises(TypeError):
self.assertAlmostEqual(1.1, 1.0, places=2, delta=2)
with self.assertRaises(TypeError):
self.assertNotAlmostEqual(1.1, 1.0, places=2, delta=2)
def testNotEqual(self):
self.assertNotEqual([0, 1, 2], [0, 2, 1])
with self.assertRaises(AssertionError):
self.assertNotEqual(0, 0)
with self.assertRaises(AssertionError):
self.assertNotEqual([0, 1, 2], [0, 1, 2])
def testIs(self):
self.assertIs(None, None)
with self.assertRaises(AssertionError):
self.assertIs([1, 2, 3], [1, 2, 3])
def testIsNot(self):
self.assertIsNot([1, 2, 3], [1, 2, 3])
with self.assertRaises(AssertionError):
self.assertIsNot(None, None)
def testIsNone(self):
self.assertIsNone(None)
with self.assertRaises(AssertionError):
self.assertIsNone(0)
def testIsNotNone(self):
self.assertIsNotNone(0)
with self.assertRaises(AssertionError):
self.assertIsNotNone(None)
def testTrue(self):
self.assertTrue(True)
with self.assertRaises(AssertionError):
self.assertTrue(False)
def testFalse(self):
self.assertFalse(False)
with self.assertRaises(AssertionError):
self.assertFalse(True)
def testIn(self):
self.assertIn("t", "cat")
with self.assertRaises(AssertionError):
self.assertIn("x", "cat")
def testIsInstance(self):
self.assertIsInstance("cat", str)
with self.assertRaises(AssertionError):
self.assertIsInstance(7, str)
def testRaises(self):
with self.assertRaises(ZeroDivisionError):
1 / 0
pass
@unittest.skip("test of skipping")
def testSkip(self):
self.fail("this should be skipped")
def testAssert(self):
e1 = None
try:
def func_under_test(a):
assert a > 10
self.assertRaises(AssertionError, func_under_test, 20)
except AssertionError as e:
e1 = e
if not e1 or "not raised" not in e1.args[0]:
self.fail("Expected to catch lack of AssertionError from assert in func_under_test")
@unittest.expectedFailure
def METHOD_NAME(self):
self.assertEqual(1, 0)
def testExpectedFailureNot(self):
@unittest.expectedFailure
def testInner():
self.assertEqual(1, 1)
try:
testInner()
except:
pass
else:
self.fail("Unexpected success was not detected")
def test_subtest_even(self):
"""
Test that numbers between 0 and 5 are all even.
"""
for i in range(0, 10, 2):
with self.subTest("Should only pass for even numbers", i=i):
self.assertEqual(i % 2, 0)
if __name__ == "__main__":
unittest.main() |
7,207 | get signature | """
This type stub file was generated by pyright.
"""
import logging
logger = logging.getLogger(__name__)
EMPTY_SHA256_HASH = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
PAYLOAD_BUFFER = 1024 * 1024
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
SIGV4_TIMESTAMP = "%Y%m%dT%H%M%SZ"
SIGNED_HEADERS_BLACKLIST = ["expect", "user-agent", "x-amzn-trace-id"]
UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD"
class BaseSigner(object):
REQUIRES_REGION = ...
def add_auth(self, request): ...
class SigV2Auth(BaseSigner):
"""
Sign a request with Signature V2.
"""
def __init__(self, credentials) -> None: ...
def calc_signature(self, request, params): ...
def add_auth(self, request): ...
class SigV3Auth(BaseSigner):
def __init__(self, credentials) -> None: ...
def add_auth(self, request): ...
class SigV4Auth(BaseSigner):
"""
Sign a request with Signature V4.
"""
REQUIRES_REGION = ...
def __init__(self, credentials, service_name, region_name) -> None: ...
def headers_to_sign(self, request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
...
def canonical_query_string(self, request): ...
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
...
def signed_headers(self, headers_to_sign): ...
def payload(self, request): ...
def canonical_request(self, request): ...
def scope(self, request): ...
def credential_scope(self, request): ...
def string_to_sign(self, request, canonical_request):
"""
Return the canonical StringToSign as well as a dict
containing the original version of all headers that
were included in the StringToSign.
"""
...
def signature(self, string_to_sign, request): ...
def add_auth(self, request): ...
class S3SigV4Auth(SigV4Auth): ...
class SigV4QueryAuth(SigV4Auth):
DEFAULT_EXPIRES = ...
def __init__(self, credentials, service_name, region_name, expires=...) -> None: ...
class S3SigV4QueryAuth(SigV4QueryAuth):
"""S3 SigV4 auth using query parameters.
This signer will sign a request using query parameters and signature
version 4, i.e a "presigned url" signer.
Based off of:
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
"""
def payload(self, request): ...
class S3SigV4PostAuth(SigV4Auth):
"""
Presigns a s3 post
Implementation doc here:
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-UsingHTTPPOST.html
"""
def add_auth(self, request): ...
class HmacV1Auth(BaseSigner):
QSAOfInterest = ...
def __init__(self, credentials, service_name=..., region_name=...) -> None: ...
def sign_string(self, string_to_sign): ...
def canonical_standard_headers(self, headers): ...
def canonical_custom_headers(self, headers): ...
def unquote_v(self, nv):
"""
TODO: Do we need this?
"""
...
def canonical_resource(self, split, auth_path=...): ...
def canonical_string(self, method, split, headers, expires=..., auth_path=...): ...
def METHOD_NAME(self, method, split, headers, expires=..., auth_path=...): ...
def add_auth(self, request): ...
class HmacV1QueryAuth(HmacV1Auth):
"""
Generates a presigned request for s3.
Spec from this document:
http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
#RESTAuthenticationQueryStringAuth
"""
DEFAULT_EXPIRES = ...
def __init__(self, credentials, expires=...) -> None: ...
class HmacV1PostAuth(HmacV1Auth):
"""
Generates a presigned post for s3.
Spec from this document:
http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingHTTPPOST.html
"""
def add_auth(self, request): ...
AUTH_TYPE_MAPS = {
"v2": SigV2Auth,
"v4": SigV4Auth,
"v4-query": SigV4QueryAuth,
"v3": SigV3Auth,
"v3https": SigV3Auth,
"s3": HmacV1Auth,
"s3-query": HmacV1QueryAuth,
"s3-presign-post": HmacV1PostAuth,
"s3v4": S3SigV4Auth,
"s3v4-query": S3SigV4QueryAuth,
"s3v4-presign-post": S3SigV4PostAuth,
} |
7,208 | test upgrade 1 default allowed | ######################################################################
#
# File: test/unit/account_info/test_sqlite_account_info.py
#
# Copyright 2021 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from __future__ import annotations
import os
import pytest
from apiver_deps import (
B2_ACCOUNT_INFO_DEFAULT_FILE,
B2_ACCOUNT_INFO_ENV_VAR,
XDG_CONFIG_HOME_ENV_VAR,
AbstractAccountInfo,
SqliteAccountInfo,
)
from .fixtures import *
class TestDatabseMigrations:
@pytest.fixture(autouse=True)
def setup(self, sqlite_account_info_factory, account_info_default_data_schema_0):
self.sqlite_account_info_factory = sqlite_account_info_factory
self.account_info_default_data = account_info_default_data_schema_0
def METHOD_NAME(self):
"""The 'allowed' field should be the default for upgraded databases."""
old_account_info = self.sqlite_account_info_factory(schema_0=True)
old_account_info.set_auth_data_with_schema_0_for_test(**self.account_info_default_data)
new_account_info = self.sqlite_account_info_factory(file_name=old_account_info.filename)
assert AbstractAccountInfo.DEFAULT_ALLOWED == new_account_info.get_allowed()
def test_upgrade_2_default_app_key(self):
"""The 'application_key_id' field should default to the account ID."""
old_account_info = self.sqlite_account_info_factory(schema_0=True)
old_account_info.set_auth_data_with_schema_0_for_test(**self.account_info_default_data)
new_account_info = self.sqlite_account_info_factory(file_name=old_account_info.filename)
assert 'account_id' == new_account_info.get_application_key_id()
def test_upgrade_3_default_s3_api_url(self):
"""The 's3_api_url' field should be set."""
old_account_info = self.sqlite_account_info_factory(schema_0=True)
old_account_info.set_auth_data_with_schema_0_for_test(**self.account_info_default_data)
new_account_info = self.sqlite_account_info_factory(file_name=old_account_info.filename)
assert '' == new_account_info.get_s3_api_url()
def test_migrate_to_4(self):
old_account_info = self.sqlite_account_info_factory(schema_0=True)
old_account_info.set_auth_data_with_schema_0_for_test(**self.account_info_default_data)
new_account_info = self.sqlite_account_info_factory(file_name=old_account_info.filename)
with new_account_info._get_connection() as conn:
sizes = conn.execute(
"SELECT recommended_part_size, absolute_minimum_part_size from account"
).fetchone()
assert (100, 5000000) == sizes
class TestSqliteAccountProfileFileLocation:
@pytest.fixture(autouse=True)
def setup(self, monkeypatch, tmpdir):
monkeypatch.setenv(
'HOME', str(tmpdir)
) # this affects .expanduser() and protects the real HOME folder
monkeypatch.setenv("USERPROFILE", str(tmpdir)) # same as HOME, but for Windows
monkeypatch.delenv(B2_ACCOUNT_INFO_ENV_VAR, raising=False)
monkeypatch.delenv(XDG_CONFIG_HOME_ENV_VAR, raising=False)
def test_invalid_profile_name(self):
with pytest.raises(ValueError):
SqliteAccountInfo._get_user_account_info_path(profile='&@(*$')
def test_profile_and_file_name_conflict(self):
with pytest.raises(ValueError):
SqliteAccountInfo._get_user_account_info_path(file_name='foo', profile='bar')
def test_profile_and_env_var_conflict(self, monkeypatch):
monkeypatch.setenv(B2_ACCOUNT_INFO_ENV_VAR, 'foo')
with pytest.raises(ValueError):
SqliteAccountInfo._get_user_account_info_path(profile='bar')
def test_profile_and_xdg_config_env_var(self, monkeypatch):
monkeypatch.setenv(XDG_CONFIG_HOME_ENV_VAR, os.path.join('~', 'custom'))
account_info_path = SqliteAccountInfo._get_user_account_info_path(profile='secondary')
assert account_info_path == os.path.expanduser(
os.path.join('~', 'custom', 'b2', 'db-secondary.sqlite')
)
def test_profile(self):
account_info_path = SqliteAccountInfo._get_user_account_info_path(profile='foo')
assert account_info_path == os.path.expanduser(os.path.join('~', '.b2db-foo.sqlite'))
def test_file_name(self):
account_info_path = SqliteAccountInfo._get_user_account_info_path(
file_name=os.path.join('~', 'foo')
)
assert account_info_path == os.path.expanduser(os.path.join('~', 'foo'))
def test_env_var(self, monkeypatch):
monkeypatch.setenv(B2_ACCOUNT_INFO_ENV_VAR, os.path.join('~', 'foo'))
account_info_path = SqliteAccountInfo._get_user_account_info_path()
assert account_info_path == os.path.expanduser(os.path.join('~', 'foo'))
def test_default_file_if_exists(self, monkeypatch):
# ensure that XDG_CONFIG_HOME_ENV_VAR doesn't matter if default file exists
monkeypatch.setenv(XDG_CONFIG_HOME_ENV_VAR, 'some')
account_file_path = os.path.expanduser(B2_ACCOUNT_INFO_DEFAULT_FILE)
parent_dir = os.path.abspath(os.path.join(account_file_path, os.pardir))
os.makedirs(parent_dir, exist_ok=True)
with open(account_file_path, 'w') as account_file:
account_file.write('')
account_info_path = SqliteAccountInfo._get_user_account_info_path()
assert account_info_path == os.path.expanduser(B2_ACCOUNT_INFO_DEFAULT_FILE)
def test_xdg_config_env_var(self, monkeypatch):
monkeypatch.setenv(XDG_CONFIG_HOME_ENV_VAR, os.path.join('~', 'custom'))
account_info_path = SqliteAccountInfo._get_user_account_info_path()
assert account_info_path == os.path.expanduser(
os.path.join('~', 'custom', 'b2', 'account_info')
)
def test_default_file(self):
account_info_path = SqliteAccountInfo._get_user_account_info_path()
assert account_info_path == os.path.expanduser(B2_ACCOUNT_INFO_DEFAULT_FILE) |
7,209 | get filtered trials | import abc
from typing import Callable
from typing import cast
from typing import Collection
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import numpy
from optuna._transform import _SearchSpaceTransform
from optuna.distributions import BaseDistribution
from optuna.search_space import intersection_search_space
from optuna.study import Study
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
class BaseImportanceEvaluator(abc.ABC):
"""Abstract parameter importance evaluator."""
@abc.abstractmethod
def evaluate(
self,
study: Study,
params: Optional[List[str]] = None,
*,
target: Optional[Callable[[FrozenTrial], float]] = None,
) -> Dict[str, float]:
"""Evaluate parameter importances based on completed trials in the given study.
.. note::
This method is not meant to be called by library users.
.. seealso::
Please refer to :func:`~optuna.importance.get_param_importances` for how a concrete
evaluator should implement this method.
Args:
study:
An optimized study.
params:
A list of names of parameters to assess.
If :obj:`None`, all parameters that are present in all of the completed trials are
assessed.
target:
A function to specify the value to evaluate importances.
If it is :obj:`None` and ``study`` is being used for single-objective optimization,
the objective values are used. Can also be used for other trial attributes, such as
the duration, like ``target=lambda t: t.duration.total_seconds()``.
.. note::
Specify this argument if ``study`` is being used for multi-objective
optimization. For example, to get the hyperparameter importance of the first
objective, use ``target=lambda t: t.values[0]`` for the target parameter.
Returns:
A :obj:`dict` where the keys are parameter names and the values are assessed
importances.
"""
# TODO(hvy): Reconsider the interface as logic might violate DRY among multiple evaluators.
raise NotImplementedError
def _get_distributions(study: Study, params: Optional[List[str]]) -> Dict[str, BaseDistribution]:
completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))
_check_evaluate_args(completed_trials, params)
if params is None:
return intersection_search_space(study.get_trials(deepcopy=False))
# New temporary required to pass mypy. Seems like a bug.
params_not_none = params
assert params_not_none is not None
# Compute the search space based on the subset of trials containing all parameters.
distributions = None
for trial in completed_trials:
trial_distributions = trial.distributions
if not all(name in trial_distributions for name in params_not_none):
continue
if distributions is None:
distributions = dict(
filter(
lambda name_and_distribution: name_and_distribution[0] in params_not_none,
trial_distributions.items(),
)
)
continue
if any(
trial_distributions[name] != distribution
for name, distribution in distributions.items()
):
raise ValueError(
"Parameters importances cannot be assessed with dynamic search spaces if "
"parameters are specified. Specified parameters: {}.".format(params)
)
assert distributions is not None # Required to pass mypy.
distributions = dict(
sorted(distributions.items(), key=lambda name_and_distribution: name_and_distribution[0])
)
return distributions
def _check_evaluate_args(completed_trials: List[FrozenTrial], params: Optional[List[str]]) -> None:
if len(completed_trials) == 0:
raise ValueError("Cannot evaluate parameter importances without completed trials.")
if len(completed_trials) == 1:
raise ValueError("Cannot evaluate parameter importances with only a single trial.")
if params is not None:
if not isinstance(params, (list, tuple)):
raise TypeError(
"Parameters must be specified as a list. Actual parameters: {}.".format(params)
)
if any(not isinstance(p, str) for p in params):
raise TypeError(
"Parameters must be specified by their names with strings. Actual parameters: "
"{}.".format(params)
)
if len(params) > 0:
at_least_one_trial = False
for trial in completed_trials:
if all(p in trial.distributions for p in params):
at_least_one_trial = True
break
if not at_least_one_trial:
raise ValueError(
"Study must contain completed trials with all specified parameters. "
"Specified parameters: {}.".format(params)
)
def METHOD_NAME(
study: Study, params: Collection[str], target: Optional[Callable[[FrozenTrial], float]]
) -> List[FrozenTrial]:
trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))
return [
trial
for trial in trials
if set(params) <= set(trial.params)
and numpy.isfinite(target(trial) if target is not None else cast(float, trial.value))
]
def _param_importances_to_dict(
params: Collection[str], param_importances: Union[numpy.ndarray, float]
) -> Dict[str, float]:
return {
name: value
for name, value in zip(params, numpy.broadcast_to(param_importances, (len(params),)))
}
def _get_trans_params(trials: List[FrozenTrial], trans: _SearchSpaceTransform) -> numpy.ndarray:
return numpy.array([trans.transform(trial.params) for trial in trials])
def _get_target_values(
trials: List[FrozenTrial], target: Optional[Callable[[FrozenTrial], float]]
) -> numpy.ndarray:
return numpy.array([target(trial) if target is not None else trial.value for trial in trials])
def _sort_dict_by_importance(param_importances: Dict[str, float]) -> Dict[str, float]:
return dict(
reversed(
sorted(
param_importances.items(), key=lambda name_and_importance: name_and_importance[1]
)
)
) |
7,210 | get n mosaic | """CSA header reader from SPM spec
"""
import numpy as np
from .structreader import Unpacker
from .utils import find_private_section
# DICOM VR code to Python type
_CONVERTERS = {
'FL': float, # float
'FD': float, # double
'DS': float, # decimal string
'SS': int, # signed short
'US': int, # unsigned short
'SL': int, # signed long
'UL': int, # unsigned long
'IS': int, # integer string
}
MAX_CSA_ITEMS = 1000
class CSAError(Exception):
pass
class CSAReadError(CSAError):
pass
def get_csa_header(dcm_data, csa_type='image'):
"""Get CSA header information from DICOM header
Return None if the header does not contain CSA information of the
specified `csa_type`
Parameters
----------
dcm_data : dicom.Dataset
DICOM dataset. Should implement ``__getitem__`` and, if initial check
for presence of ``dcm_data[(0x29, 0x10)]`` passes, should satisfy
interface for ``find_private_section``.
csa_type : {'image', 'series'}, optional
Type of CSA field to read; default is 'image'
Returns
-------
csa_info : None or dict
Parsed CSA field of `csa_type` or None, if we cannot find the CSA
information.
"""
csa_type = csa_type.lower()
if csa_type == 'image':
element_offset = 0x10
elif csa_type == 'series':
element_offset = 0x20
else:
raise ValueError(f'Invalid CSA header type "{csa_type}"')
if not (0x29, 0x10) in dcm_data: # Cannot be Siemens CSA
return None
section_start = find_private_section(dcm_data, 0x29, 'SIEMENS CSA HEADER')
if section_start is None:
return None
element_no = section_start + element_offset
try:
tag = dcm_data[(0x29, element_no)]
except KeyError:
# The element could be missing due to anonymization
return None
return read(tag.value)
def read(csa_str):
"""Read CSA header from string `csa_str`
Parameters
----------
csa_str : str
byte string containing CSA header information
Returns
-------
header : dict
header information as dict, where `header` has fields (at least)
``type, n_tags, tags``. ``header['tags']`` is also a dictionary
with one key, value pair for each tag in the header.
"""
csa_len = len(csa_str)
csa_dict = {'tags': {}}
hdr_id = csa_str[:4]
up_str = Unpacker(csa_str, endian='<')
if hdr_id == b'SV10': # CSA2
hdr_type = 2
up_str.ptr = 4 # omit the SV10
csa_dict['unused0'] = up_str.read(4)
else: # CSA1
hdr_type = 1
csa_dict['type'] = hdr_type
csa_dict['n_tags'], csa_dict['check'] = up_str.unpack('2I')
if not 0 < csa_dict['n_tags'] <= MAX_CSA_ITEMS:
raise CSAReadError(
f'Number of tags `t` should be 0 < t <= {MAX_CSA_ITEMS}. '
f'Instead found {csa_dict["n_tags"]} tags.'
)
for tag_no in range(csa_dict['n_tags']):
name, vm, vr, syngodt, n_items, last3 = up_str.unpack('64si4s3i')
vr = nt_str(vr)
name = nt_str(name)
tag = {
'n_items': n_items,
'vm': vm, # value multiplicity
'vr': vr, # value representation
'syngodt': syngodt,
'last3': last3,
'tag_no': tag_no,
}
if vm == 0:
n_values = n_items
else:
n_values = vm
# data converter
converter = _CONVERTERS.get(vr)
# CSA1 specific length modifier
if tag_no == 1:
tag0_n_items = n_items
if n_items > MAX_CSA_ITEMS:
raise CSAReadError(f'Expected <= {MAX_CSA_ITEMS} tags, got {n_items}')
items = []
for item_no in range(n_items):
x0, x1, x2, x3 = up_str.unpack('4i')
ptr = up_str.ptr
if hdr_type == 1: # CSA1 - odd length calculation
item_len = x0 - tag0_n_items
if item_len < 0 or (ptr + item_len) > csa_len:
if item_no < vm:
items.append('')
break
else: # CSA2
item_len = x1
if (ptr + item_len) > csa_len:
raise CSAReadError('Item is too long, aborting read')
if item_no >= n_values:
assert item_len == 0
continue
item = nt_str(up_str.read(item_len))
if converter:
# we may have fewer real items than are given in
# n_items, but we don't know how many - assume that
# we've reached the end when we hit an empty item
if item_len == 0:
n_values = item_no
continue
item = converter(item)
items.append(item)
# go to 4 byte boundary
plus4 = item_len % 4
if plus4 != 0:
up_str.ptr += 4 - plus4
tag['items'] = items
csa_dict['tags'][name] = tag
return csa_dict
def get_scalar(csa_dict, tag_name):
try:
items = csa_dict['tags'][tag_name]['items']
except KeyError:
return None
if len(items) == 0:
return None
return items[0]
def get_vector(csa_dict, tag_name, n):
try:
items = csa_dict['tags'][tag_name]['items']
except KeyError:
return None
if len(items) == 0:
return None
if len(items) != n:
raise ValueError('Expecting %d vector' % n)
return np.array(items)
def is_mosaic(csa_dict):
"""Return True if the data is of Mosaic type
Parameters
----------
csa_dict : dict
dict containing read CSA data
Returns
-------
tf : bool
True if the `dcm_data` appears to be of Siemens mosaic type,
False otherwise
"""
if csa_dict is None:
return False
if get_acq_mat_txt(csa_dict) is None:
return False
n_o_m = METHOD_NAME(csa_dict)
return not (n_o_m is None) and n_o_m != 0
def METHOD_NAME(csa_dict):
return get_scalar(csa_dict, 'NumberOfImagesInMosaic')
def get_acq_mat_txt(csa_dict):
return get_scalar(csa_dict, 'AcquisitionMatrixText')
def get_slice_normal(csa_dict):
return get_vector(csa_dict, 'SliceNormalVector', 3)
def get_b_matrix(csa_dict):
vals = get_vector(csa_dict, 'B_matrix', 6)
if vals is None:
return
# the 6 vector is the upper triangle of the symmetric B matrix
inds = np.array([0, 1, 2, 1, 3, 4, 2, 4, 5])
B = np.array(vals)[inds]
return B.reshape(3, 3)
def get_b_value(csa_dict):
return get_scalar(csa_dict, 'B_value')
def get_g_vector(csa_dict):
return get_vector(csa_dict, 'DiffusionGradientDirection', 3)
def get_ice_dims(csa_dict):
dims = get_scalar(csa_dict, 'ICE_Dims')
if dims is None:
return None
return dims.split('_')
def nt_str(s):
"""Strip string to first null
Parameters
----------
s : bytes
Returns
-------
sdash : str
s stripped to first occurrence of null (0)
"""
zero_pos = s.find(b'\x00')
if zero_pos == -1:
return s
return s[:zero_pos].decode('latin-1') |
7,211 | eq msg clear | import asyncio
import contextlib
from typing import List
import discord
import lavalink
from lavalink import NodeNotFound, PlayerNotFound
from red_commons.logging import getLogger
from redbot.core import commands
from redbot.core.utils.chat_formatting import box
from ...equalizer import Equalizer
from ..abc import MixinMeta
from ..cog_utils import CompositeMetaClass
log = getLogger("red.cogs.Audio.cog.Utilities.equalizer")
class EqualizerUtilities(MixinMeta, metaclass=CompositeMetaClass):
async def _apply_gain(self, guild_id: int, band: int, gain: float) -> None:
const = {
"op": "equalizer",
"guildId": str(guild_id),
"bands": [{"band": band, "gain": gain}],
}
try:
await lavalink.get_player(guild_id).node.send({**const})
except (NodeNotFound, PlayerNotFound):
pass
async def _apply_gains(self, guild_id: int, gains: List[float]) -> None:
const = {
"op": "equalizer",
"guildId": str(guild_id),
"bands": [{"band": x, "gain": y} for x, y in enumerate(gains)],
}
try:
await lavalink.get_player(guild_id).node.send({**const})
except (NodeNotFound, PlayerNotFound):
pass
async def _eq_check(self, ctx: commands.Context, player: lavalink.Player) -> None:
eq = player.fetch("eq", Equalizer())
config_bands = await self.config.custom("EQUALIZER", ctx.guild.id).eq_bands()
if not config_bands:
config_bands = eq.bands
await self.config.custom("EQUALIZER", ctx.guild.id).eq_bands.set(eq.bands)
if eq.bands != config_bands:
band_num = list(range(0, eq.band_count))
band_value = config_bands
eq_dict = {}
for k, v in zip(band_num, band_value):
eq_dict[k] = v
for band, value in eq_dict.items():
eq.set_gain(band, value)
player.store("eq", eq)
await self._apply_gains(ctx.guild.id, config_bands)
async def _eq_interact(
self,
ctx: commands.Context,
player: lavalink.Player,
eq: Equalizer,
message: discord.Message,
selected: int,
) -> None:
player.store("eq", eq)
emoji = {
"far_left": "\N{BLACK LEFT-POINTING TRIANGLE}\N{VARIATION SELECTOR-16}",
"one_left": "\N{LEFTWARDS BLACK ARROW}\N{VARIATION SELECTOR-16}",
"max_output": "\N{BLACK UP-POINTING DOUBLE TRIANGLE}",
"output_up": "\N{UP-POINTING SMALL RED TRIANGLE}",
"output_down": "\N{DOWN-POINTING SMALL RED TRIANGLE}",
"min_output": "\N{BLACK DOWN-POINTING DOUBLE TRIANGLE}",
"one_right": "\N{BLACK RIGHTWARDS ARROW}\N{VARIATION SELECTOR-16}",
"far_right": "\N{BLACK RIGHT-POINTING TRIANGLE}\N{VARIATION SELECTOR-16}",
"reset": "\N{BLACK CIRCLE FOR RECORD}\N{VARIATION SELECTOR-16}",
"info": "\N{INFORMATION SOURCE}\N{VARIATION SELECTOR-16}",
}
selector = f'{" " * 8}{" " * selected}^^'
try:
await message.edit(content=box(f"{eq.visualise()}\n{selector}", lang="ini"))
except discord.errors.NotFound:
return
try:
(react_emoji, react_user) = await self._get_eq_reaction(ctx, message, emoji)
except TypeError:
return
if not react_emoji:
await self.config.custom("EQUALIZER", ctx.guild.id).eq_bands.set(eq.bands)
await self._clear_react(message, emoji)
if react_emoji == "\N{LEFTWARDS BLACK ARROW}\N{VARIATION SELECTOR-16}":
await self.remove_react(message, react_emoji, react_user)
await self._eq_interact(ctx, player, eq, message, max(selected - 1, 0))
if react_emoji == "\N{BLACK RIGHTWARDS ARROW}\N{VARIATION SELECTOR-16}":
await self.remove_react(message, react_emoji, react_user)
await self._eq_interact(ctx, player, eq, message, min(selected + 1, 14))
if react_emoji == "\N{UP-POINTING SMALL RED TRIANGLE}":
await self.remove_react(message, react_emoji, react_user)
_max = float("{:.2f}".format(min(eq.get_gain(selected) + 0.1, 1.0)))
eq.set_gain(selected, _max)
await self._apply_gain(ctx.guild.id, selected, _max)
await self._eq_interact(ctx, player, eq, message, selected)
if react_emoji == "\N{DOWN-POINTING SMALL RED TRIANGLE}":
await self.remove_react(message, react_emoji, react_user)
_min = float("{:.2f}".format(max(eq.get_gain(selected) - 0.1, -0.25)))
eq.set_gain(selected, _min)
await self._apply_gain(ctx.guild.id, selected, _min)
await self._eq_interact(ctx, player, eq, message, selected)
if react_emoji == "\N{BLACK UP-POINTING DOUBLE TRIANGLE}":
await self.remove_react(message, react_emoji, react_user)
_max = 1.0
eq.set_gain(selected, _max)
await self._apply_gain(ctx.guild.id, selected, _max)
await self._eq_interact(ctx, player, eq, message, selected)
if react_emoji == "\N{BLACK DOWN-POINTING DOUBLE TRIANGLE}":
await self.remove_react(message, react_emoji, react_user)
_min = -0.25
eq.set_gain(selected, _min)
await self._apply_gain(ctx.guild.id, selected, _min)
await self._eq_interact(ctx, player, eq, message, selected)
if react_emoji == "\N{BLACK LEFT-POINTING TRIANGLE}\N{VARIATION SELECTOR-16}":
await self.remove_react(message, react_emoji, react_user)
selected = 0
await self._eq_interact(ctx, player, eq, message, selected)
if react_emoji == "\N{BLACK RIGHT-POINTING TRIANGLE}\N{VARIATION SELECTOR-16}":
await self.remove_react(message, react_emoji, react_user)
selected = 14
await self._eq_interact(ctx, player, eq, message, selected)
if react_emoji == "\N{BLACK CIRCLE FOR RECORD}\N{VARIATION SELECTOR-16}":
await self.remove_react(message, react_emoji, react_user)
for band in range(eq.band_count):
eq.set_gain(band, 0.0)
await self._apply_gains(ctx.guild.id, eq.bands)
await self._eq_interact(ctx, player, eq, message, selected)
if react_emoji == "\N{INFORMATION SOURCE}\N{VARIATION SELECTOR-16}":
await self.remove_react(message, react_emoji, react_user)
await ctx.send_help(self.command_equalizer)
await self._eq_interact(ctx, player, eq, message, selected)
async def METHOD_NAME(self, eq_message: discord.Message):
if eq_message is not None:
with contextlib.suppress(discord.HTTPException):
await eq_message.delete()
async def _get_eq_reaction(self, ctx: commands.Context, message: discord.Message, emoji):
try:
reaction, user = await self.bot.wait_for(
"reaction_add",
check=lambda r, u: r.message.id == message.id
and u.id == ctx.author.id
and r.emoji in emoji.values(),
timeout=30,
)
except asyncio.TimeoutError:
await self._clear_react(message, emoji)
return None
else:
return reaction.emoji, user |
7,212 | limit log | #**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
import sys
import logging
from logging import *
from . import package_root_dir
file = logging.FileHandler(filename=str(package_root_dir()/'rprblender.log'), # TODO: Add creation time to this log name. Could be configurable.
mode='w',
encoding='utf-8')
file.setFormatter(logging.Formatter('%(asctime)s %(name)s [%(thread)d]: %(levelname)s %(message)s'))
console = logging.StreamHandler(stream=sys.stdout)
logger = logging.getLogger('rpr') # root logger for the addon
logger.addHandler(console)
console.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(name)s [%(thread)d]: %(message)s'))
logging.basicConfig(level=logging.DEBUG, handlers=[file])
console_filter = None
class Filter(logging.Filter):
level_show_always = logging.ERROR
def __init__(self, name, level_show_always, level_show_min):
super().__init__(name)
self.level_show_min = level_show_min
self.level_show_always = level_show_always
def filter(self, record: logging.LogRecord):
if self.level_show_always is not None:
if record.levelno >= self.level_show_always:
return True
return super().filter(record)
def is_level_allowed(levelno):
if not console_filter:
return True
if console_filter.level_show_min is not None:
if levelno < console_filter.level_show_min:
return False
return True
def METHOD_NAME(name, level_show_always=logging.INFO, level_show_min=logging.DEBUG):
global console_filter
if console_filter:
console.removeFilter(console_filter)
console_filter = None
if name is not None:
console_filter = Filter('rpr.'+name, level_show_always, level_show_min)
console.addFilter(console_filter)
def get_logger(tag):
return logger.getChild(tag) if tag else logger
def _log(log_fun, args):
msg = ' '.join(str(arg) for arg in args)
log_fun(msg)
def debug(*args, tag='default'):
if is_level_allowed(logging.DEBUG):
_log(get_logger(tag).debug, args)
def info(*args, tag='default'):
if is_level_allowed(logging.INFO):
_log(get_logger(tag).info, args)
def warn(*args, tag='default'):
if is_level_allowed(logging.WARN):
_log(get_logger(tag).warning, args)
def error(*args, tag='default'):
if is_level_allowed(logging.ERROR):
_log(get_logger(tag).error, args)
def critical(*args, tag='default'):
if is_level_allowed(logging.CRITICAL):
_log(get_logger(tag).critical, args)
class Log:
__tag: str = "default"
__default_level: int = logging.INFO
__default_method_name: str = 'info'
def __init__(self, tag: str = 'default', level: str = 'debug'):
if tag:
self.__tag = tag
level, method = {
'info': (logging.INFO, 'info'),
'debug': (logging.DEBUG, 'debug'),
'warn': (logging.WARN, 'warn'),
'error': (logging.ERROR, 'error'),
'critical': (logging.CRITICAL, 'critical'),
}.get(level, (None, None))
if method:
self.__default_level = level
self.__default_method_name = method
def __call__(self, *args):
if is_level_allowed(self.__default_level):
_log(getattr(get_logger(self.__tag), self.__default_method_name), args)
def info(self, *args):
info(*args, tag=self.__tag)
def debug(self, *args):
debug(*args, tag=self.__tag)
def warn(self, *args):
warn(*args, tag=self.__tag)
def error(self, *args):
error(*args, tag=self.__tag)
def critical(self, *args):
critical(*args, tag=self.__tag)
def dump_args(func):
"""This decorator dumps out the arguments passed to a function before calling it"""
arg_names = func.__code__.co_varnames[:func.__code__.co_argcount]
def echo_func(*args, **kwargs):
debug("<{}>: {}{}".format(
func.__name__,
tuple("{}={}".format(name, arg) for name, arg in zip(arg_names, args)),
# args if args else "",
" {}".format(kwargs.items()) if kwargs else "",
))
return func(*args, **kwargs)
return echo_func
|
7,213 | start bundle | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""UnitTests for DoFn lifecycle and bundle methods"""
# pytype: skip-file
import unittest
import pytest
import apache_beam as beam
from apache_beam.testing.test_pipeline import TestPipeline
class CallSequenceEnforcingDoFn(beam.DoFn):
def __init__(self):
self._setup_called = False
self._start_bundle_calls = 0
self._finish_bundle_calls = 0
self._teardown_called = False
def setup(self):
assert not self._setup_called, 'setup should not be called twice'
assert self._start_bundle_calls == 0, \
'setup should be called before start_bundle'
assert self._finish_bundle_calls == 0, \
'setup should be called before finish_bundle'
assert not self._teardown_called, 'setup should be called before teardown'
self._setup_called = True
def METHOD_NAME(self):
assert self._setup_called, 'setup should have been called'
assert self._start_bundle_calls == self._finish_bundle_calls, \
'there should be as many start_bundle calls as finish_bundle calls'
assert not self._teardown_called, 'teardown should not have been called'
self._start_bundle_calls += 1
def process(self, element):
assert self._setup_called, 'setup should have been called'
assert self._start_bundle_calls > 0, 'start_bundle should have been called'
assert self._start_bundle_calls == self._finish_bundle_calls + 1, \
'there should be one start_bundle call with no call to finish_bundle'
assert not self._teardown_called, 'teardown should not have been called'
return [element * element]
def finish_bundle(self):
assert self._setup_called, 'setup should have been called'
assert self._start_bundle_calls > 0, 'start_bundle should have been called'
assert self._start_bundle_calls == self._finish_bundle_calls + 1, \
'there should be one start_bundle call with no call to finish_bundle'
assert not self._teardown_called, 'teardown should not have been called'
self._finish_bundle_calls += 1
def teardown(self):
assert self._setup_called, 'setup should have been called'
assert self._start_bundle_calls == self._finish_bundle_calls, \
'there should be as many start_bundle calls as finish_bundle calls'
assert not self._teardown_called, 'teardown should not be called twice'
self._teardown_called = True
@pytest.mark.it_validatesrunner
class DoFnLifecycleTest(unittest.TestCase):
def test_dofn_lifecycle(self):
with TestPipeline() as p:
_ = (
p
| 'Start' >> beam.Create([1, 2, 3])
| 'Do' >> beam.ParDo(CallSequenceEnforcingDoFn()))
# Assumes that the worker is run in the same process as the test.
class LocalDoFnLifecycleTest(unittest.TestCase):
def test_dofn_lifecycle(self):
from apache_beam.runners.direct import direct_runner
from apache_beam.runners.portability import fn_api_runner
runners = [
direct_runner.BundleBasedDirectRunner(), fn_api_runner.FnApiRunner()
]
for r in runners:
with TestPipeline(runner=r) as p:
_ = (
p
| 'Start' >> beam.Create([1, 2, 3])
| 'Do' >> beam.ParDo(CallSequenceEnforcingDoFn()))
# Assumes that the worker is run in the same process as the test.
if __name__ == '__main__':
unittest.main() |
7,214 | title | import json
import logging
from abc import (
ABCMeta,
abstractmethod,
)
from typing import (
Optional,
Union,
)
from uuid import uuid4
from gitlab.exceptions import GitlabError
from jinja2 import Template
from reconcile.utils.constants import PROJ_ROOT
from reconcile.utils.gitlab_api import GitLabApi
from reconcile.utils.mr.labels import DO_NOT_MERGE_HOLD
from reconcile.utils.sqs_gateway import SQSGateway
EMAIL_TEMPLATE = PROJ_ROOT / "templates" / "email.yml.j2"
LOG = logging.getLogger(__name__)
class CancelMergeRequest(Exception):
"""
Used when the Merge Request processing is canceled.
"""
class MergeRequestProcessingError(Exception):
"""
Used when the merge request could not be processed for technical reasons
"""
MRClient = Union[GitLabApi, SQSGateway]
class MergeRequestBase(metaclass=ABCMeta):
"""
Base abstract class for all merge request types.
"""
name = "merge-request-base"
def __init__(self):
# Let's first get all the attributes from the instance
# and use for the SQS Msg payload. With that, the msg
# to the SQS is enough to create a new, similar, instance
# of the child class.
self.sqs_msg_data = {**self.__dict__}
self.gitlab_cli = None
self.labels = [DO_NOT_MERGE_HOLD]
random_id = str(uuid4())[:6]
self.branch = f"{self.name}-{random_id}"
self.branch_created = False
self.main_branch = "master"
self.remove_source_branch = True
self.cancelled = False
def cancel(self, message):
self.cancelled = True
raise CancelMergeRequest(
f"{self.name} MR canceled for "
f"branch {self.branch}. "
f"Reason: {message}"
)
@property
@abstractmethod
def METHOD_NAME(self) -> str:
"""
Title of the Merge Request.
:return: Merge Request title as seen in the Gitlab Web UI
:rtype: str
"""
@property
@abstractmethod
def description(self) -> str:
"""
Description of the Merge Request.
:return: Merge Request description as seen in the Gitlab Web UI
:rtype: str
"""
@abstractmethod
def process(self, gitlab_cli):
"""
Called by `submit_to_gitlab`, this method is the place for
user-defined steps to create the commits of a merge request.
:param gitlab_cli:
:type gitlab_cli: GitLabApi
"""
@property
def sqs_data(self):
"""
The SQS Message payload (MessageBody) generated out of
the Merge Request class instance.
"""
return {
"pr_type": self.name,
**self.sqs_msg_data,
}
def submit_to_sqs(self, sqs_cli) -> None:
"""
Sends the MR message to SQS.
:param sqs_cli: The SQS Client instance.
:type sqs_cli: SQSGateway
"""
sqs_cli.send_message(self.sqs_data)
@property
def gitlab_data(self):
"""
The Gitlab payload for creating the Merge Request.
"""
return {
"source_branch": self.branch,
"target_branch": self.main_branch,
"title": self.METHOD_NAME,
"description": self.description,
"remove_source_branch": self.remove_source_branch,
"labels": self.labels,
}
def submit_to_gitlab(self, gitlab_cli):
"""
Sends the MR to Gitlab.
:param gitlab_cli: The SQS Client instance.
:type gitlab_cli: GitLabApi
:raises:
MergeRequestProcessingError: Raised when it was not possible
to open a MR
"""
try:
# Avoiding duplicate MRs
if gitlab_cli.mr_exists(METHOD_NAME=self.METHOD_NAME):
self.cancel(
f"MR with the same name '{self.METHOD_NAME}' "
f"already exists. Aborting MR creation."
)
self.ensure_tmp_branch_exists(gitlab_cli)
self.process(gitlab_cli=gitlab_cli)
# Avoiding empty MRs
if not self.diffs(gitlab_cli):
self.cancel(
f"No changes when compared to {self.main_branch}. "
"Aborting MR creation."
)
return gitlab_cli.project.mergerequests.create(self.gitlab_data)
except CancelMergeRequest as mr_cancel:
# cancellation is a valid behaviour. it indicates, that the
# operation is not required, therefore we will not signal
# a problem back to the caller
self.delete_tmp_branch(gitlab_cli)
LOG.info(mr_cancel)
except Exception as err:
self.delete_tmp_branch(gitlab_cli)
# NOTE
# sqs_msg_data might some day include confidential data and
# we will need to revisit implications that will come from
# logging this exception
raise MergeRequestProcessingError(
f"error processing {self.name} changes "
f"{json.dumps(self.sqs_msg_data)} "
f"into temporary branch {self.branch}. "
f"Reason: {err}"
) from err
def ensure_tmp_branch_exists(self, gitlab_cli):
if not self.branch_created:
gitlab_cli.create_branch(
new_branch=self.branch, source_branch=self.main_branch
)
self.branch_created = True
def delete_tmp_branch(self, gitlab_cli):
if self.branch_created:
try:
gitlab_cli.delete_branch(branch=self.branch)
self.branch_created = False
except GitlabError as gitlab_error:
# we are not going to let an otherwise fine MR
# processing fail just because of this
LOG.error(
f"Failed to delete branch {self.branch}. " f"Reason: {gitlab_error}"
)
def diffs(self, gitlab_cli):
return gitlab_cli.project.repository_compare(
from_=self.main_branch, to=self.branch
)["diffs"]
def submit(self, cli: MRClient):
if isinstance(cli, GitLabApi):
return self.submit_to_gitlab(gitlab_cli=cli)
if isinstance(cli, SQSGateway):
return self.submit_to_sqs(sqs_cli=cli)
raise AttributeError(f"client {cli} not supported")
def app_interface_email(
name: str,
subject: str,
body: str,
users: Optional[list[str]] = None,
aliases: Optional[list[str]] = None,
aws_accounts: Optional[list[str]] = None,
apps: Optional[list[str]] = None,
) -> str:
"""Render app-interface-email template."""
with open(EMAIL_TEMPLATE) as file_obj:
email_template = Template(
file_obj.read(), keep_trailing_newline=True, trim_blocks=True
)
return email_template.render(
NAME=name,
SUBJECT=subject,
BODY=body,
USERS=users,
ALIASES=aliases,
AWS_ACCOUNTS=aws_accounts,
SERVICES=apps,
) |
7,215 | generate config map | # SPDX-FileCopyrightText: The RamenDR authors
# SPDX-License-Identifier: Apache-2.0
import drenv
from drenv import kubectl
from drenv import minio
from . import command
def register(commands):
parser = commands.add_parser(
"config",
help="Configure ramen hub operator",
)
parser.set_defaults(func=run)
command.add_common_arguments(parser)
command.add_ramen_arguments(parser)
def run(args):
env = command.env_info(args)
s3_secret = generate_ramen_s3_secret(args)
cloud_secret = generate_cloud_credentials_secret(env["clusters"][0], args)
if env["hub"]:
hub_cm = METHOD_NAME("hub", env["clusters"], args)
wait_for_ramen_hub_operator(env["hub"], args)
create_ramen_s3_secret(env["hub"], s3_secret)
for cluster in env["clusters"]:
create_cloud_credentials_secret(cluster, cloud_secret)
create_ramen_config_map(env["hub"], hub_cm)
create_hub_dr_resources(env["hub"], env["clusters"], env["topology"])
wait_for_dr_clusters(env["hub"], env["clusters"], args)
wait_for_dr_policy(env["hub"], args)
else:
dr_cluster_cm = METHOD_NAME("dr-cluster", env["clusters"], args)
for cluster in env["clusters"]:
create_ramen_s3_secret(cluster, s3_secret)
create_cloud_credentials_secret(cluster, cloud_secret)
create_ramen_config_map(cluster, dr_cluster_cm)
def wait_for_ramen_hub_operator(hub, args):
command.info("Waiting until ramen-hub-operator is rolled out")
kubectl.rollout(
"status",
"deploy/ramen-hub-operator",
f"--namespace={args.ramen_namespace}",
"--timeout=180s",
context=hub,
log=command.debug,
)
def generate_ramen_s3_secret(args):
template = drenv.template(command.resource("ramen-s3-secret.yaml"))
return template.substitute(namespace=args.ramen_namespace)
def create_ramen_s3_secret(cluster, yaml):
command.info("Creating ramen s3 secret in cluster '%s'", cluster)
kubectl.apply("--filename=-", input=yaml, context=cluster, log=command.debug)
def generate_cloud_credentials_secret(cluster, args):
command.debug("Getting velero cloud credentials from cluster '%s'", cluster)
cloud = kubectl.get(
"secret/cloud-credentials",
"--namespace=velero",
"--output=jsonpath={.data.cloud}",
context=cluster,
)
template = drenv.template(command.resource("cloud-credentials-secret.yaml"))
return template.substitute(cloud=cloud, namespace=args.ramen_namespace)
def create_cloud_credentials_secret(cluster, yaml):
command.info("Creating cloud credentials secret in cluster '%s'", cluster)
kubectl.apply("--filename=-", input=yaml, context=cluster, log=command.debug)
def METHOD_NAME(controller, clusters, args):
template = drenv.template(command.resource("configmap.yaml"))
return template.substitute(
name=f"ramen-{controller}-operator-config",
auto_deploy="true",
cluster1=clusters[0],
cluster2=clusters[1],
minio_url_cluster1=minio.service_url(clusters[0]),
minio_url_cluster2=minio.service_url(clusters[1]),
namespace=args.ramen_namespace,
)
def create_ramen_config_map(cluster, yaml):
command.info("Updating ramen config map in cluster '%s'", cluster)
kubectl.apply("--filename=-", input=yaml, context=cluster, log=command.debug)
def create_hub_dr_resources(hub, clusters, topology):
for name in ["dr-clusters", "dr-policy"]:
command.info("Creating %s for %s", name, topology)
template = drenv.template(command.resource(f"{topology}/{name}.yaml"))
yaml = template.substitute(cluster1=clusters[0], cluster2=clusters[1])
kubectl.apply("--filename=-", input=yaml, context=hub, log=command.debug)
def wait_for_dr_clusters(hub, clusters, args):
command.info("Waiting until DRClusters report phase")
for name in clusters:
drenv.wait_for(
f"drcluster/{name}",
output="jsonpath={.status.phase}",
namespace=args.ramen_namespace,
timeout=180,
profile=hub,
log=command.debug,
)
command.info("Waiting until DRClusters phase is available")
kubectl.wait(
"drcluster",
"--all",
"--for=jsonpath={.status.phase}=Available",
f"--namespace={args.ramen_namespace}",
context=hub,
log=command.debug,
)
def wait_for_dr_policy(hub, args):
command.info("Waiting until DRPolicy is validated")
kubectl.wait(
"drpolicy/dr-policy",
"--for=condition=Validated",
f"--namespace={args.ramen_namespace}",
context=hub,
log=command.debug,
) |
7,216 | load onnx model | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
from collections import defaultdict
from typing import Callable, Optional
import numpy
import onnx
from onnxruntime import InferenceSession
from sparseml.onnx.utils import get_tensor_shape
from sparsezoo import Model
__all__ = [
"get_configs_with_cadence",
"model_op_counts_test",
"model_inputs_outputs_test",
"TEST_OPS",
]
def get_configs_with_cadence(cadence: str, dir_path: str = "."):
"""
Find all config files in the given directory with a matching cadence.
:param cadence: string signifying how often to run this test. Possible values are:
commit, daily, weekly
:param dir_path: path to the directory in which to search for the config files
:return List of file paths to matching configs
"""
all_files_found = glob.glob(
os.path.join(dir_path, "configs", "**", "test*.yaml"), recursive=True
)
matching_files = []
for file in all_files_found:
with open(file) as f:
lines = f.readlines()
for line in lines:
if line.startswith("cadence:"):
if line.split(":")[1].strip().strip('"').lower() == cadence:
matching_files.append(file)
break
print(f"\nFor {cadence} found matching files: {matching_files}")
return matching_files
"""
Network graph operations to include when comparing operation counts between models
"""
TEST_OPS = {
"MatMul",
"Gemm",
"Conv",
"MatMulInteger",
"ConvInteger",
"QLinearMatMul",
"QLinearConv",
}
def model_op_counts_test(
model_path_a: str,
model_path_b: str,
):
"""
Test that the number of operations of each type, are the same between two onnx
models.
:param model_path_a: path to one onnx model
:param model_path_b: path to other onnx model
"""
model_a = METHOD_NAME(model_path_a)
model_b = METHOD_NAME(model_path_b)
def _get_model_op_counts(model):
op_counts = defaultdict(int)
for node in model.graph.node:
if node.op_type in TEST_OPS:
op_counts[node.op_type] += 1
return op_counts
op_counts_a = _get_model_op_counts(model_a)
op_counts_b = _get_model_op_counts(model_b)
assert len(op_counts_a) > 0
assert len(op_counts_a) == len(op_counts_b)
for op, count_a in op_counts_a.items():
assert op in op_counts_b
assert count_a == op_counts_b[op]
def model_inputs_outputs_test(
model_path_a: str,
model_path_b: str,
input_getter: Optional[Callable] = None,
**input_getter_kwargs,
):
"""
Test that the output generated by two onnx models is similar to within some error
when given the same input
:param model_path_a: path to one onnx model
:param model_path_b: path to other onnx model
:input_getter: optional function to replace generic input generation routine. To be
used for models/integrations which don't take numpy arrays as input
"""
# compare export and target graphs and build fake data
model_a = METHOD_NAME(model_path_a)
model_b = METHOD_NAME(model_path_b)
assert len(model_a.graph.input) == len(model_b.graph.input)
assert len(model_a.graph.output) == len(model_b.graph.output)
sample_input = {}
output_names = []
if input_getter:
sample_input = input_getter(**input_getter_kwargs)
else:
for input_a, input_b in zip(model_a.graph.input, model_b.graph.input):
assert input_a.name == input_b.name
input_a_shape = get_tensor_shape(input_a)
assert input_a_shape == get_tensor_shape(input_b)
sample_input[input_a.name] = numpy.random.randn(*input_a_shape).astype(
numpy.float32
)
for output_a, output_b in zip(model_a.graph.output, model_b.graph.output):
assert output_a.name == output_b.name
assert get_tensor_shape(output_a) == get_tensor_shape(output_b)
output_names.append(output_a.name)
# run sample forward and test absolute max diff
ort_sess_a = InferenceSession(model_path_a)
ort_sess_b = InferenceSession(model_path_b)
forward_output_a = ort_sess_a.run(output_names, sample_input)
forward_output_b = ort_sess_b.run(output_names, sample_input)
for out_a, out_b in zip(forward_output_a, forward_output_b):
assert numpy.max(numpy.abs(out_a - out_b)) <= 1e-4
def METHOD_NAME(path: str):
if path.startswith("zoo:"):
model = Model(path)
path_onnx = model.onnx_model.path
else:
path_onnx = path
return onnx.load(path_onnx) |
7,217 | test git command not found | # -*- coding: utf-8 -*-
# test_exc.py
# Copyright (C) 2008, 2009, 2016 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import re
import ddt
from git.exc import (
InvalidGitRepositoryError,
WorkTreeRepositoryUnsupported,
NoSuchPathError,
CommandError,
GitCommandNotFound,
GitCommandError,
CheckoutError,
CacheError,
UnmergedEntriesError,
HookExecutionError,
RepositoryDirtyError,
)
from git.util import remove_password_if_present
from test.lib import TestBase
import itertools as itt
_cmd_argvs = (
("cmd",),
("θνιψοδε",),
("θνιψοδε", "normal", "argvs"),
("cmd", "ελληνικα", "args"),
("θνιψοδε", "κι", "αλλα", "strange", "args"),
("θνιψοδε", "κι", "αλλα", "non-unicode", "args"),
(
"git",
"clone",
"-v",
"https://fakeuser:fakepassword1234@fakerepo.example.com/testrepo",
),
)
_causes_n_substrings = (
(None, None), # noqa: E241 @IgnorePep8
(7, "exit code(7)"), # noqa: E241 @IgnorePep8
("Some string", "'Some string'"), # noqa: E241 @IgnorePep8
("παλιο string", "'παλιο string'"), # noqa: E241 @IgnorePep8
(Exception("An exc."), "Exception('An exc.')"), # noqa: E241 @IgnorePep8
(Exception("Κακια exc."), "Exception('Κακια exc.')"), # noqa: E241 @IgnorePep8
(object(), "<object object at "), # noqa: E241 @IgnorePep8
)
_streams_n_substrings = (
None,
"steram",
"ομορφο stream",
)
@ddt.ddt
class TExc(TestBase):
def test_ExceptionsHaveBaseClass(self):
from git.exc import GitError
self.assertIsInstance(GitError(), Exception)
exception_classes = [
InvalidGitRepositoryError,
WorkTreeRepositoryUnsupported,
NoSuchPathError,
CommandError,
GitCommandNotFound,
GitCommandError,
CheckoutError,
CacheError,
UnmergedEntriesError,
HookExecutionError,
RepositoryDirtyError,
]
for ex_class in exception_classes:
self.assertTrue(issubclass(ex_class, GitError))
@ddt.data(*list(itt.product(_cmd_argvs, _causes_n_substrings, _streams_n_substrings)))
def test_CommandError_unicode(self, case):
argv, (cause, subs), stream = case
cls = CommandError
c = cls(argv, cause)
s = str(c)
self.assertIsNotNone(c._msg)
self.assertIn(" cmdline: ", s)
for a in remove_password_if_present(argv):
self.assertIn(a, s)
if not cause:
self.assertIn("failed!", s)
else:
self.assertIn(" failed due to:", s)
if subs is not None:
# Substrings (must) already contain opening `'`.
subs = "(?<!')%s(?!')" % re.escape(subs)
self.assertRegex(s, subs)
if not stream:
c = cls(argv, cause)
s = str(c)
self.assertNotIn(" stdout:", s)
self.assertNotIn(" stderr:", s)
else:
c = cls(argv, cause, stream)
s = str(c)
self.assertIn(" stderr:", s)
self.assertIn(stream, s)
c = cls(argv, cause, None, stream)
s = str(c)
self.assertIn(" stdout:", s)
self.assertIn(stream, s)
c = cls(argv, cause, stream, stream + "no2")
s = str(c)
self.assertIn(" stderr:", s)
self.assertIn(stream, s)
self.assertIn(" stdout:", s)
self.assertIn(stream + "no2", s)
@ddt.data(
(["cmd1"], None),
(["cmd1"], "some cause"),
(["cmd1"], Exception()),
)
def METHOD_NAME(self, init_args):
argv, cause = init_args
c = GitCommandNotFound(argv, cause)
s = str(c)
self.assertIn(argv[0], s)
if cause:
self.assertIn(" not found due to: ", s)
self.assertIn(str(cause), s)
else:
self.assertIn(" not found!", s)
@ddt.data(
(["cmd1"], None),
(["cmd1"], "some cause"),
(["cmd1", "https://fakeuser@fakerepo.example.com/testrepo"], Exception()),
)
def test_GitCommandError(self, init_args):
argv, cause = init_args
c = GitCommandError(argv, cause)
s = str(c)
for arg in remove_password_if_present(argv):
self.assertIn(arg, s)
if cause:
self.assertIn(" failed due to: ", s)
self.assertIn(str(cause), s)
else:
self.assertIn(" failed!", s)
@ddt.data(
(["cmd1"], None),
(["cmd1"], "some cause"),
(["cmd1"], Exception()),
)
def test_HookExecutionError(self, init_args):
argv, cause = init_args
c = HookExecutionError(argv, cause)
s = str(c)
self.assertIn(argv[0], s)
if cause:
self.assertTrue(s.startswith("Hook("), s)
self.assertIn(str(cause), s)
else:
self.assertIn(" failed!", s) |
7,218 | test import parser direct | import sys
import unittest
import pytest
import six
MODULE_TYPE = type(sys)
# Tests live in datetutil/test which cause a RuntimeWarning for Python2 builds.
# But since we expect lazy imports tests to fail for Python < 3.7 we'll ignore those
# warnings with this filter.
if six.PY2:
filter_import_warning = pytest.mark.filterwarnings("ignore::RuntimeWarning")
else:
def filter_import_warning(f):
return f
@pytest.fixture(scope="function")
def clean_import():
"""Create a somewhat clean import base for lazy import tests"""
du_modules = {
mod_name: mod
for mod_name, mod in sys.modules.items()
if mod_name.startswith("dateutil")
}
other_modules = {
mod_name for mod_name in sys.modules if mod_name not in du_modules
}
for mod_name in du_modules:
del sys.modules[mod_name]
yield
# Delete anything that wasn't in the origin sys.modules list
for mod_name in list(sys.modules):
if mod_name not in other_modules:
del sys.modules[mod_name]
# Restore original modules
for mod_name, mod in du_modules.items():
sys.modules[mod_name] = mod
@filter_import_warning
@pytest.mark.parametrize(
"module",
["easter", "parser", "relativedelta", "rrule", "tz", "utils", "zoneinfo"],
)
def test_lazy_import(clean_import, module):
"""Test that dateutil.[submodule] works for py version > 3.7"""
import dateutil, importlib
if sys.version_info < (3, 7):
pytest.xfail("Lazy loading does not work for Python < 3.7")
mod_obj = getattr(dateutil, module, None)
assert isinstance(mod_obj, MODULE_TYPE)
mod_imported = importlib.import_module("dateutil.%s" % module)
assert mod_obj is mod_imported
HOST_IS_WINDOWS = sys.platform.startswith('win')
def test_import_version_str():
""" Test that dateutil.__version__ can be imported"""
from dateutil import __version__
def test_import_version_root():
import dateutil
assert hasattr(dateutil, '__version__')
# Test that dateutil.easter-related imports work properly
def test_import_easter_direct():
import dateutil.easter
def test_import_easter_from():
from dateutil import easter
def test_import_easter_start():
from dateutil.easter import easter
# Test that dateutil.parser-related imports work properly
def METHOD_NAME():
import dateutil.parser
def test_import_parser_from():
from dateutil import parser
def test_import_parser_all():
# All interface
from dateutil.parser import parse
from dateutil.parser import parserinfo
# Other public classes
from dateutil.parser import parser
for var in (parse, parserinfo, parser):
assert var is not None
# Test that dateutil.relativedelta-related imports work properly
def test_import_relative_delta_direct():
import dateutil.relativedelta
def test_import_relative_delta_from():
from dateutil import relativedelta
def test_import_relative_delta_all():
from dateutil.relativedelta import relativedelta
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU
for var in (relativedelta, MO, TU, WE, TH, FR, SA, SU):
assert var is not None
# In the public interface but not in all
from dateutil.relativedelta import weekday
assert weekday is not None
# Test that dateutil.rrule related imports work properly
def test_import_rrule_direct():
import dateutil.rrule
def test_import_rrule_from():
from dateutil import rrule
def test_import_rrule_all():
from dateutil.rrule import rrule
from dateutil.rrule import rruleset
from dateutil.rrule import rrulestr
from dateutil.rrule import YEARLY, MONTHLY, WEEKLY, DAILY
from dateutil.rrule import HOURLY, MINUTELY, SECONDLY
from dateutil.rrule import MO, TU, WE, TH, FR, SA, SU
rr_all = (rrule, rruleset, rrulestr,
YEARLY, MONTHLY, WEEKLY, DAILY,
HOURLY, MINUTELY, SECONDLY,
MO, TU, WE, TH, FR, SA, SU)
for var in rr_all:
assert var is not None
# In the public interface but not in all
from dateutil.rrule import weekday
assert weekday is not None
# Test that dateutil.tz related imports work properly
def test_import_tztest_direct():
import dateutil.tz
def test_import_tz_from():
from dateutil import tz
def test_import_tz_all():
from dateutil.tz import tzutc
from dateutil.tz import tzoffset
from dateutil.tz import tzlocal
from dateutil.tz import tzfile
from dateutil.tz import tzrange
from dateutil.tz import tzstr
from dateutil.tz import tzical
from dateutil.tz import gettz
from dateutil.tz import tzwin
from dateutil.tz import tzwinlocal
from dateutil.tz import UTC
from dateutil.tz import datetime_ambiguous
from dateutil.tz import datetime_exists
from dateutil.tz import resolve_imaginary
tz_all = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "gettz", "datetime_ambiguous",
"datetime_exists", "resolve_imaginary", "UTC"]
tz_all += ["tzwin", "tzwinlocal"] if sys.platform.startswith("win") else []
lvars = locals()
for var in tz_all:
assert lvars[var] is not None
# Test that dateutil.tzwin related imports work properly
@pytest.mark.skipif(not HOST_IS_WINDOWS, reason="Requires Windows")
def test_import_tz_windows_direct():
import dateutil.tzwin
@pytest.mark.skipif(not HOST_IS_WINDOWS, reason="Requires Windows")
def test_import_tz_windows_from():
from dateutil import tzwin
@pytest.mark.skipif(not HOST_IS_WINDOWS, reason="Requires Windows")
def test_import_tz_windows_star():
from dateutil.tzwin import tzwin
from dateutil.tzwin import tzwinlocal
tzwin_all = [tzwin, tzwinlocal]
for var in tzwin_all:
assert var is not None
# Test imports of Zone Info
def test_import_zone_info_direct():
import dateutil.zoneinfo
def test_import_zone_info_from():
from dateutil import zoneinfo
def test_import_zone_info_star():
from dateutil.zoneinfo import gettz
from dateutil.zoneinfo import gettz_db_metadata
from dateutil.zoneinfo import rebuild
zi_all = (gettz, gettz_db_metadata, rebuild)
for var in zi_all:
assert var is not None |
7,219 | get merged state | from enum import Enum
import cftime
import pace.util
import numpy as np
import pytest
import xarray as xr
from runtime.derived_state import DerivedFV3State, FV3StateMapper, MergedState
class MockFV3GFS:
def __init__(self):
self.set_state_called = False
np.random.seed(0)
nx, ny = 10, 10
lat = pace.util.Quantity(
np.random.rand(ny, nx), dims=["y", "x"], units="radians"
)
lon = pace.util.Quantity(
np.random.rand(ny, nx), dims=["y", "x"], units="radians"
)
lhtfl = pace.util.Quantity(np.random.rand(ny, nx), dims=["y", "x"], units="deg")
self.state = {
"time": cftime.DatetimeJulian(2016, 1, 1),
"latitude": lat,
"longitude": lon,
"lhtfl": lhtfl,
"a": lhtfl,
}
def get_state(self, names):
# need this for the data to remain unchanged for equality tests
if any(name not in self.state for name in names):
raise pace.util.InvalidQuantityError("blah")
return {name: self.state[name] for name in names}
def set_state_mass_conserving(self, data):
self.set_state_called = True
for key, value in data.items():
assert isinstance(value, pace.util.Quantity)
if key not in self.state:
raise ValueError(f"{key} not in data.")
self.state[key] = value
def get_diagnostic_by_name(self, diagnostic):
return self.get_state([diagnostic])[diagnostic]
def get_tracer_metadata(self):
return []
class Getters(Enum):
original = 1
merged = 2
@pytest.fixture(params=Getters)
def getter(request):
fv3gfs = MockFV3GFS()
state = DerivedFV3State(fv3gfs)
if request.param == Getters.original:
return state
elif request.param == Getters.merged:
return MergedState(state, {})
def test_DerivedFV3State(getter):
assert isinstance(getter["longitude"], xr.DataArray)
def test_State_keys(getter):
assert len(getter.keys()) > 0
# test that function registered under DerivedMapping works
def test_DerivedFV3State_cos_zenith(getter):
output = getter["cos_zenith_angle"]
assert isinstance(output, xr.DataArray)
assert "time" not in output.dims
def test_DerivedFV3State_latent_heat_flux(getter):
output = getter["latent_heat_flux"]
assert isinstance(output, xr.DataArray)
def test_DerivedFV3State_time_property(getter):
assert isinstance(getter.time, cftime.DatetimeJulian)
def test_DerivedFV3State_time_dataarray(getter):
assert isinstance(getter["time"], xr.DataArray)
def test_DerivedFV3State_setitem(getter):
item = xr.DataArray([1.0], dims=["x"], attrs={"units": "m"})
# Check that data is passed to `MockFV3GFS.set_state` correctly
getter["a"] = item
xr.testing.assert_equal(item, getter["a"])
def test_FV3StateMapper():
fv3gfs = MockFV3GFS()
mapper = FV3StateMapper(fv3gfs)
assert isinstance(mapper["latitude"], xr.DataArray)
def test_FV3StateMapper_alternate_keys():
fv3gfs = MockFV3GFS()
mapper = FV3StateMapper(fv3gfs, alternate_keys={"lon": "longitude"})
np.testing.assert_array_almost_equal(mapper["lon"], mapper["longitude"])
def test_FV3StateMapper_raises_key_error_on_get():
fv3gfs = MockFV3GFS()
mapper = FV3StateMapper(fv3gfs)
with pytest.raises(KeyError):
assert "not in fv3" not in mapper
mapper["not in fv3"]
def test_DerivedFV3State_raises_key_error_on_set():
mapper = DerivedFV3State(MockFV3GFS())
assert "not in fv3" not in mapper
with pytest.raises(KeyError):
mapper["not in fv3"] = xr.DataArray(0, attrs=dict(units=""))
def METHOD_NAME():
fv3gfs = MockFV3GFS()
getter = DerivedFV3State(fv3gfs)
python_state = {}
return getter, python_state, MergedState(getter, python_state)
def test_MergedState_time():
wrapper, _, state = METHOD_NAME()
assert state.time == wrapper.time
def test_MergedState_use_python_state():
wrapper, python_state, merged_state = METHOD_NAME()
item = xr.DataArray([1.0], dims=["x"], attrs={"units": "m"})
merged_state["not in wrapper"] = item
xr.testing.assert_equal(item, python_state["not in wrapper"])
def test_MergedState_keys():
wrapper, _, state = METHOD_NAME()
var = "not in wrapper"
state[var] = xr.DataArray(1.0, attrs=dict(units=""))
assert var in state.keys() |
7,220 | lint setup py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pathlib
import shutil
import subprocess
import sys
import nox # type: ignore
ALL_PYTHON = [
"3.7",
"3.8",
"3.9",
"3.10",
"3.11",
]
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt"
PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8")
BLACK_VERSION = "black==22.3.0"
BLACK_PATHS = ["docs", "google", "tests", "samples", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.11"
nox.sessions = [
"unit",
"cover",
"mypy",
"check_lower_bounds"
# exclude update_lower_bounds from default
"docs",
"blacken",
"lint",
"lint_setup_py",
]
@nox.session(python=ALL_PYTHON)
def unit(session):
"""Run the unit test suite."""
session.install('coverage', 'pytest', 'pytest-cov', 'pytest-asyncio', 'asyncmock; python_version < "3.8"')
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google/cloud/logging_v2/',
'--cov=tests/',
'--cov-config=.coveragerc',
'--cov-report=term',
'--cov-report=html',
os.path.join('tests', 'unit', ''.join(session.posargs))
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=ALL_PYTHON)
def mypy(session):
"""Run the type checker."""
session.install(
'mypy',
'types-requests',
'types-protobuf'
)
session.install('.')
session.run(
'mypy',
'--explicit-package-bases',
'google',
)
@nox.session
def update_lower_bounds(session):
"""Update lower bounds in constraints.txt to match setup.py"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'update',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session
def check_lower_bounds(session):
"""Check lower bounds in setup.py are reflected in constraints file"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'check',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx==7.0.1", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run(
"black",
"--check",
*BLACK_PATHS,
)
session.run("flake8", "google", "tests", "samples")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def blacken(session):
"""Run black. Format code to uniform standard."""
session.install(BLACK_VERSION)
session.run(
"black",
*BLACK_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def METHOD_NAME(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict") |
7,221 | base loaded | # This file is a part of the AnyBlok project
#
# Copyright (C) 2018 Jean-Sebastien SUZANNE <jssuzanne@anybox.fr>
# Copyright (C) 2018 Denis VIVIÈS <dvivies@geoblink.com>
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file,You can
# obtain one at http://mozilla.org/MPL/2.0/.
import logging
from copy import deepcopy
import pytest
from sqlalchemy_utils.functions import (
create_database,
database_exists,
drop_database,
)
from anyblok.blok import BlokManager
from anyblok.config import Configuration, get_url
from anyblok.environment import EnvironmentManager
from anyblok.registry import RegistryManager
from anyblok.testing import sgdb_in
logger = logging.getLogger(__name__)
def init_registry_with_bloks(bloks, function, **kwargs):
if bloks is None:
bloks = []
if isinstance(bloks, tuple):
bloks = list(bloks)
if isinstance(bloks, str):
bloks = [bloks]
anyblok_test_name = "anyblok-test"
if anyblok_test_name not in bloks:
bloks.append(anyblok_test_name)
loaded_bloks = deepcopy(RegistryManager.loaded_bloks)
if function is not None:
EnvironmentManager.set("current_blok", anyblok_test_name)
try:
function(**kwargs)
finally:
EnvironmentManager.set("current_blok", None)
try:
registry = RegistryManager.get(
Configuration.get("db_name"), unittest=True
)
# update required blok
registry_bloks = registry.get_bloks_by_states("installed", "toinstall")
toinstall = [x for x in bloks if x not in registry_bloks]
toupdate = [x for x in bloks if x in registry_bloks]
registry.upgrade(install=toinstall, update=toupdate)
finally:
RegistryManager.loaded_bloks = loaded_bloks
return registry
def init_registry(function, **kwargs):
return init_registry_with_bloks([], function, **kwargs)
@pytest.fixture(scope="session")
def METHOD_NAME(request, configuration_loaded):
if sgdb_in(["MySQL", "MariaDB"]):
return
url = get_url()
if not database_exists(url):
db_template_name = Configuration.get("db_template_name", None)
create_database(url, template=db_template_name)
BlokManager.load()
registry = init_registry_with_bloks([], None)
registry.commit()
registry.close()
BlokManager.unload()
@pytest.fixture(scope="module")
def bloks_loaded(request, METHOD_NAME):
request.addfinalizer(BlokManager.unload)
BlokManager.load()
@pytest.fixture(scope="module")
def testbloks_loaded(request, METHOD_NAME):
request.addfinalizer(BlokManager.unload)
BlokManager.load(entry_points=("bloks", "test_bloks"))
def reset_db():
if sgdb_in(["MySQL", "MariaDB"]):
url = get_url()
if database_exists(url):
drop_database(url)
db_template_name = Configuration.get("db_template_name", None)
create_database(url, template=db_template_name)
@pytest.fixture(scope="class")
def registry_blok(request, bloks_loaded):
reset_db()
registry = init_registry_with_bloks([], None)
request.addfinalizer(registry.close)
return registry
@pytest.fixture(scope="class")
def registry_testblok(request, testbloks_loaded):
reset_db()
registry = init_registry_with_bloks([], None)
request.addfinalizer(registry.close)
return registry
@pytest.fixture(scope="function")
def registry_testblok_func(request, testbloks_loaded):
reset_db()
registry = init_registry_with_bloks([], None)
request.addfinalizer(registry.close)
return registry
@pytest.fixture(
scope="class",
params=[
("prefix", "suffix"),
("", ""),
],
)
def db_schema(request, bloks_loaded):
Configuration.set("prefix_db_schema.Model.*", request.param[0])
Configuration.set("suffix_db_schema.Model.*", request.param[1])
def rollback():
Configuration.set("prefix_db_schema.Model.*", "")
Configuration.set("suffix_db_schema.Model.*", "")
request.addfinalizer(rollback) |
7,222 | test linear solves equivalent | import pytest
from firedrake import *
from firedrake.petsc import PETSc
from numpy.linalg import norm as np_norm
import gc
def count_refs(cls):
""" Counts references of type `cls`
"""
gc.collect()
# A list comprehension here can trigger:
# > ReferenceError: weakly-referenced object no longer exists
# So we count references the "slow" way and ignore `ReferenceError`s
count = 0
object_list = gc.get_objects()
for obj in object_list:
try:
if isinstance(obj, cls):
count += 1
except ReferenceError:
pass
return count
@pytest.fixture
def a_L_out():
mesh = UnitCubeMesh(1, 1, 1)
fs = FunctionSpace(mesh, 'CG', 1)
f = Function(fs)
out = Function(fs)
u = TrialFunction(fs)
v = TestFunction(fs)
return inner(u, v) * dx, inner(f, v) * dx, out
def test_linear_solver_api(a_L_out):
a, L, out = a_L_out
p = LinearVariationalProblem(a, L, out)
solver = LinearVariationalSolver(p, solver_parameters={'ksp_type': 'cg'})
assert solver.parameters['snes_type'] == 'ksponly'
assert solver.parameters['ksp_rtol'] == 1e-7
assert solver.snes.getType() == solver.snes.Type.KSPONLY
assert solver.snes.getKSP().getType() == solver.snes.getKSP().Type.CG
rtol, _, _, _ = solver.snes.getKSP().getTolerances()
assert rtol == solver.parameters['ksp_rtol']
def test_petsc_options_cleared(a_L_out):
a, L, out = a_L_out
opts = PETSc.Options()
original = {}
original.update(opts.getAll())
solve(a == L, out, solver_parameters={'foo': 'bar'})
assert original == opts.getAll()
def test_linear_solver_gced(a_L_out):
a, L, out = a_L_out
before = count_refs(LinearVariationalSolver)
solve(a == L, out)
out.dat.data_ro # force evaluation
after = count_refs(LinearVariationalSolver)
assert before == after
def test_assembled_solver_gced(a_L_out):
a, L, out = a_L_out
A = assemble(a)
b = assemble(L)
before = count_refs(LinearSolver)
solve(A, out, b)
out.dat.data_ro
after = count_refs(LinearSolver)
assert before == after
def test_nonlinear_solver_gced(a_L_out):
a, L, out = a_L_out
before = count_refs(NonlinearVariationalSolver)
F = action(a, out) - L
solve(F == 0, out)
out.dat.data_ro # force evaluation
after = count_refs(NonlinearVariationalSolver)
assert before == after
def test_nonlinear_solver_api(a_L_out):
a, L, out = a_L_out
J = a
F = action(a, out) - L
p = NonlinearVariationalProblem(F, out, J=J)
solver = NonlinearVariationalSolver(p, solver_parameters={'snes_type': 'ksponly'})
assert solver.snes.getType() == solver.snes.Type.KSPONLY
rtol, _, _, _ = solver.snes.getTolerances()
assert rtol == 1e-8
def test_nonlinear_solver_flattens_params(a_L_out):
a, L, out = a_L_out
J = a
F = action(a, out) - L
p = NonlinearVariationalProblem(F, out, J=J)
solver1 = NonlinearVariationalSolver(
p, solver_parameters={'snes_type': 'ksponly', 'ksp_rtol': 1e-10}
)
solver2 = NonlinearVariationalSolver(
p, solver_parameters={'snes_type': 'ksponly', 'ksp': {'rtol': 1e-10}}
)
assert solver1.parameters["ksp_rtol"] == 1e-10
assert solver2.parameters["ksp_rtol"] == 1e-10
def METHOD_NAME():
"""solve(a == L, out) should return the same as solving with the assembled objects.
This relies on two different code paths agreeing on the same set of solver parameters."""
mesh = UnitSquareMesh(50, 50)
V = FunctionSpace(mesh, "CG", 1)
f = Function(V)
f.assign(1)
f.vector()[:] = 1.
t = TestFunction(V)
q = TrialFunction(V)
a = inner(q, t) * dx
L = inner(f, t) * dx
# Solve the system using forms
sol = Function(V)
solve(a == L, sol)
# And again
sol2 = Function(V)
solve(a == L, sol2)
assert np_norm(sol.vector()[:] - sol2.vector()[:]) == 0
# Solve the system using preassembled objects
sol3 = Function(V)
solve(assemble(a), sol3, assemble(L))
assert np_norm(sol.vector()[:] - sol3.vector()[:]) < 5e-14
# Same, solving into vector
sol4 = sol3.vector()
solve(assemble(a), sol4, assemble(L))
assert np_norm(sol.vector()[:] - sol4[:]) < 5e-14
def test_linear_solver_flattens_params(a_L_out):
a, _, _ = a_L_out
A = assemble(a)
solver1 = LinearSolver(A, solver_parameters={"ksp_rtol": 1e-10})
solver2 = LinearSolver(A, solver_parameters={"ksp": {"rtol": 1e-10}})
assert solver1.parameters["ksp_rtol"] == 1e-10
assert solver2.parameters["ksp_rtol"] == 1e-10
def test_constant_jacobian_lvs():
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, "CG", 1)
u = TrialFunction(V)
v = TestFunction(V)
q = Function(V)
q.assign(1)
a = q * inner(u, v) * dx
f = Function(V)
f.assign(1)
L = inner(f, v) * dx
out = Function(V)
# Non-constant jacobian set
lvp = LinearVariationalProblem(a, L, out, constant_jacobian=False)
lvs = LinearVariationalSolver(lvp)
lvs.solve()
assert norm(assemble(out - f)) < 1e-7
q.assign(5)
lvs.solve()
assert norm(assemble(out*5 - f)) < 2e-7
q.assign(1)
# This one should fail (because Jac is wrong)
lvp = LinearVariationalProblem(a, L, out, constant_jacobian=True)
lvs = LinearVariationalSolver(lvp)
lvs.solve()
assert norm(assemble(out - f)) < 1e-7
q.assign(5)
lvs.solve()
assert not (norm(assemble(out*5 - f)) < 2e-7) |
7,223 | parse args | # serpentarium must be the first import, as it needs to save the state of the
# import system prior to any imports
# isort: off
import serpentarium # noqa: F401
from serpentarium.logging import configure_host_process_logger
# isort: on
import argparse
import logging
import logging.handlers
import os
import re
import sys
import tempfile
import time
import traceback
from multiprocessing import Queue, freeze_support, get_context
from pathlib import Path
from typing import Sequence, Tuple, Union
from psutil import Process
# dummy import for pyinstaller
# noinspection PyUnresolvedReferences
from common.common_consts import AGENT_OTP_ENVIRONMENT_VARIABLE
from common.version import get_version
from infection_monkey.dropper import MonkeyDrops
from infection_monkey.model import DROPPER_ARG, MONKEY_ARG
from infection_monkey.monkey import InfectionMonkey
class OTPFormatter(logging.Formatter):
"""
Formatter that replaces OTPs in log messages with asterisks
"""
OTP_REGEX = re.compile(f"{AGENT_OTP_ENVIRONMENT_VARIABLE}=\\S+[\\s;]+")
OTP_REPLACEMENT = f"{AGENT_OTP_ENVIRONMENT_VARIABLE}={'*' * 6}"
def format(self, record):
original_log_message = logging.Formatter.format(self, record)
formatted_log_message = re.sub(
OTPFormatter.OTP_REGEX, OTPFormatter.OTP_REPLACEMENT, original_log_message
)
return formatted_log_message
def main():
freeze_support() # required for multiprocessing + pyinstaller on windows
mode, mode_specific_args = METHOD_NAME()
# TODO: Use an Enum for this
if mode not in [MONKEY_ARG, DROPPER_ARG]:
raise ValueError(f'The mode argument must be either "{MONKEY_ARG}" or "{DROPPER_ARG}"')
multiprocessing_context = get_context(method="spawn")
ipc_logger_queue = multiprocessing_context.Queue()
log_path = _create_secure_log_file(mode)
queue_listener = _configure_queue_listener(ipc_logger_queue, log_path)
queue_listener.start()
logger = _configure_logger()
logger.info(f"writing log file to {log_path}")
try:
_run_agent(mode, mode_specific_args, ipc_logger_queue, logger, log_path)
except Exception as err:
logger.exception(f"An unexpected error occurred while running the agent: {err}")
finally:
logger.debug("Stopping the queue listener")
queue_listener.stop()
def METHOD_NAME() -> Tuple[str, Sequence[str]]:
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"mode",
choices=[MONKEY_ARG, DROPPER_ARG],
help=f"'{MONKEY_ARG}' mode will run the agent in the current session/terminal."
f"'{DROPPER_ARG}' will detach the agent from the current session "
f"and will start it on a separate process.",
)
mode_args, mode_specific_args = arg_parser.parse_known_args()
mode = str(mode_args.mode)
return mode, mode_specific_args
def _create_secure_log_file(monkey_arg: str) -> Path:
"""
Create and cache secure log file
:param monkey_arg: Argument for the agent. Possible `agent` or `dropper`
:return: Path of the secure log file
"""
mode = "agent" if monkey_arg == MONKEY_ARG else "dropper"
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
prefix = f"infection-monkey-{mode}-{timestamp}-"
suffix = ".log"
handle, monkey_log_path = tempfile.mkstemp(suffix=suffix, prefix=prefix)
os.close(handle)
return Path(monkey_log_path)
def _configure_queue_listener(
ipc_logger_queue: Queue, log_file_path: Path
) -> logging.handlers.QueueListener:
"""
Gets unstarted configured QueueListener object
We configure the root logger to use QueueListener with Stream and File handler.
:param ipc_logger_queue: A Queue shared by the host and child process that stores log messages
:param log_path: A Path used to configure the FileHandler
"""
log_format = (
"%(asctime)s [%(process)d:%(threadName)s:%(levelname)s] %(module)s.%("
"funcName)s.%(lineno)d: %(message)s"
)
formatter = OTPFormatter(log_format)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
file_handler = logging.FileHandler(log_file_path)
file_handler.setFormatter(formatter)
queue_listener = configure_host_process_logger(ipc_logger_queue, [stream_handler, file_handler])
return queue_listener
def _configure_logger() -> logging.Logger:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def log_uncaught_exceptions(ex_cls, ex, tb):
logger.critical("".join(traceback.format_tb(tb)))
logger.critical("{0}: {1}".format(ex_cls, ex))
sys.excepthook = log_uncaught_exceptions
return logger
def _run_agent(
mode: str,
mode_specific_args: Sequence[str],
ipc_logger_queue: Queue,
logger: logging.Logger,
log_path: Path,
):
logger.info(
">>>>>>>>>> Initializing the Infection Monkey Agent: PID %s <<<<<<<<<<", os.getpid()
)
logger.info(f"version: {get_version()}")
monkey: Union[InfectionMonkey, MonkeyDrops]
if mode == MONKEY_ARG:
monkey = InfectionMonkey(
mode_specific_args, ipc_logger_queue=ipc_logger_queue, log_path=log_path
)
elif mode == DROPPER_ARG:
monkey = MonkeyDrops(mode_specific_args)
try:
logger.info(f"Starting {monkey.__class__.__name__}")
monkey.start()
except Exception as err:
logger.exception("Exception thrown from monkey's start function. More info: {}".format(err))
try:
monkey.cleanup()
except Exception as err:
logger.exception(
"Exception thrown from monkey's cleanup function: More info: {}".format(err)
)
finally:
if mode == MONKEY_ARG:
_kill_hung_child_processes(logger)
def _kill_hung_child_processes(logger: logging.Logger):
for p in Process().children(recursive=True):
logger.debug(
"Found child process: "
f"pid={p.pid}, name={p.name()}, status={p.status()}, cmdline={p.cmdline()}"
)
if _process_is_resource_tracker(p):
# This process will clean itself up, but no other processes should be running at
# this time.
logger.debug(f"Ignoring resource_tracker process: {p.pid}")
continue
if _process_is_windows_self_removal(p):
logger.debug(f"Ignoring self removal process: {p.pid}")
continue
logger.warning(f"Killing hung child process: {p.pid}")
p.kill()
def _process_is_resource_tracker(process: Process) -> bool:
for arg in process.cmdline():
if "multiprocessing.resource_tracker" in arg:
return True
return False
def _process_is_windows_self_removal(process: Process) -> bool:
if process.name() in ["cmd.exe", "timeout.exe"]:
return True
return False
if "__main__" == __name__:
main() |
7,224 | set up | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for alert receiving from VR on service failure in VR
"""
# Import Local Modules
# import marvin
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import (get_process_status, validateList,
cleanup_resources)
from marvin.lib.base import (Account,
ServiceOffering,
VirtualMachine)
from marvin.lib.common import (list_hosts,
list_routers,
get_zone,
get_domain,
get_template)
from nose.plugins.attrib import attr
from marvin.codes import FAILED
from marvin.codes import PASS
_multiprocess_shared_ = True
class TestVR(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls._cleanup = []
cls.testClient = super(
TestVR,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__
# Get Zone, Domain and templates
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
domain = get_domain(cls.api_client)
cls.services['mode'] = cls.zone.networktype
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
if template == FAILED:
assert False, "get_template() failed to return template with \
description %s" % cls.services["ostype"]
# Set Zones and disk offerings ??
cls.services["small"]["zoneid"] = cls.zone.id
cls.services["small"]["template"] = template.id
# Create account, service offerings, vm.
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=domain.id
)
cls.small_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offerings"]["small"]
)
cls._cleanup.append(cls.small_offering)
cls._cleanup.append(cls.account)
return
@classmethod
def tearDownClass(cls):
cls.api_client = super(
TestVR,
cls).getClsTestClient().getApiClient()
cleanup_resources(cls.api_client, cls._cleanup)
return
def METHOD_NAME(self):
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
self.cleanup = []
def tearDown(self):
# Clean up, terminate the created ISOs
cleanup_resources(self.apiclient, self.cleanup)
return
@attr(tags=["advanced"], required_hardware="true")
def test_01_FTPModulesInVR(self):
"""
@desc: Verify FTP modules are loaded in VR of advance zone
step1 : create a VR in advance zone
step2: Verify FTP modules are there in created VR
"""
if self.zone.networktype == "Basic":
self.skipTest("This test can be run only in advance zone")
# create a virtual machine
vm = VirtualMachine.create(
self.api_client,
self.services["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.small_offering.id,
mode=self.services["mode"]
)
self.assertIsNotNone(vm, "Failed to deploy virtual machine")
self.cleanup.append(vm)
response = VirtualMachine.list(self.api_client, id=vm.id)
status = validateList(response)
self.assertEqual(
status[0],
PASS,
"list vm response returned invalid list")
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
status = validateList(list_router_response)
self.assertEqual(
status[0], PASS, "Check list response returns a valid list")
router = list_router_response[0]
self.debug("Router ID: %s, state: %s" % (router.id, router.state))
self.assertEqual(
router.state,
'Running',
"Check list router response for router state"
)
if self.hypervisor.lower() in ('vmware', 'hyperv'):
result = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
router.linklocalip,
"lsmod | grep ftp",
hypervisor=self.hypervisor
)
else:
try:
hosts = list_hosts(
self.apiclient,
zoneid=router.zoneid,
type='Routing',
state='Up',
id=router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list host returns a valid list"
)
host = hosts[0]
result = get_process_status(
host.ipaddress,
22,
self.hostConfig["username"],
self.hostConfig["password"],
router.linklocalip,
"lsmod | grep ftp"
)
except Exception as e:
raise Exception("Exception raised in getting host\
credentials: %s " % e)
res = str(result)
self.debug("lsmod | grep ftp: %s" % res)
if "nf_nat_ftp" in res and "nf_conntrack_ftp" in res:
ismoduleinstalled = True
else:
ismoduleinstalled = False
self.assertEqual(
ismoduleinstalled,
True,
"nf_conntrack_ftp and nf_nat_ftp modules not installed on routers")
return |
7,225 | fast name | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The classes in this file are interfaces for metrics. They are not intended
to be subclassed or created directly by users. To work with and access metrics,
users should use the classes and methods exposed in metric.py.
Available classes:
- Metric - Base interface of a metrics object.
- Counter - Counter metric interface. Allows a count to be incremented or
decremented during pipeline execution.
- Distribution - Distribution Metric interface. Allows statistics about the
distribution of a variable to be collected during pipeline execution.
- Gauge - Gauge Metric interface. Allows to track the latest value of a
variable during pipeline execution.
- MetricName - Namespace and name used to refer to a Metric.
"""
# pytype: skip-file
from typing import Dict
from typing import Optional
__all__ = [
'Metric', 'Counter', 'Distribution', 'Gauge', 'Histogram', 'MetricName'
]
class MetricName(object):
"""The name of a metric.
The name of a metric consists of a namespace and a name. The namespace
allows grouping related metrics together and also prevents collisions
between multiple metrics of the same name.
"""
def __init__(self, namespace, name, urn=None, labels=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[Dict[str, str]]) -> None
"""Initializes ``MetricName``.
Note: namespace and name should be set for user metrics,
urn and labels should be set for an arbitrary metric to package into a
MonitoringInfo.
Args:
namespace: A string with the namespace of a metric.
name: A string with the name of a metric.
urn: URN to populate on a MonitoringInfo, when sending to RunnerHarness.
labels: Labels to populate on a MonitoringInfo
"""
if not urn:
if not namespace:
raise ValueError('Metric namespace must be non-empty')
if not name:
raise ValueError('Metric name must be non-empty')
self.namespace = namespace
self.name = name
self.urn = urn
self.labels = labels if labels else {}
def __eq__(self, other):
return (
self.namespace == other.namespace and self.name == other.name and
self.urn == other.urn and self.labels == other.labels)
def __str__(self):
if self.urn:
return 'MetricName(namespace={}, name={}, urn={}, labels={})'.format(
self.namespace, self.name, self.urn, self.labels)
else: # User counter case.
return 'MetricName(namespace={}, name={})'.format(
self.namespace, self.name)
def __hash__(self):
return hash((self.namespace, self.name, self.urn) +
tuple(self.labels.items()))
def METHOD_NAME(self):
name = self.name or ''
namespace = self.namespace or ''
urn = self.urn or ''
labels = ''
if self.labels:
labels = '_'.join(['%s=%s' % (k, v) for (k, v) in self.labels.items()])
return '%d_%s%s%s%s' % (len(name), name, namespace, urn, labels)
class Metric(object):
"""Base interface of a metric object."""
def __init__(self, metric_name):
# type: (MetricName) -> None
self.metric_name = metric_name
class Counter(Metric):
"""Counter metric interface. Allows a count to be incremented/decremented
during pipeline execution."""
def inc(self, n=1):
raise NotImplementedError
def dec(self, n=1):
self.inc(-n)
class Distribution(Metric):
"""Distribution Metric interface.
Allows statistics about the distribution of a variable to be collected during
pipeline execution."""
def update(self, value):
raise NotImplementedError
class Gauge(Metric):
"""Gauge Metric interface.
Allows tracking of the latest value of a variable during pipeline
execution."""
def set(self, value):
raise NotImplementedError
class Histogram(Metric):
"""Histogram Metric interface.
Allows statistics about the percentile of a variable to be collected during
pipeline execution."""
def update(self, value):
raise NotImplementedError |
7,226 | add quarantined replicas | # -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from typing import TYPE_CHECKING
from sqlalchemy import and_, or_
from sqlalchemy.sql.expression import false, insert
from rucio.common.utils import chunks
from rucio.db.sqla import models, filter_thread_work
from rucio.db.sqla.session import read_session, transactional_session
if TYPE_CHECKING:
from sqlalchemy.orm import Session
@transactional_session
def METHOD_NAME(rse_id, replicas, *, session: "Session"):
"""
Bulk add quarantined file replicas.
:param rse_id: The rse id.
:param replicas: A list of dicts with the replica information.
:param session: The database session in use.
"""
# Exlude files that have a registered replica. This is a
# safeguard against potential issues in the Auditor.
file_clause = []
for replica in replicas:
if "scope" in replica and "name" in replica:
file_clause.append(and_(models.RSEFileAssociation.scope == replica['scope'],
models.RSEFileAssociation.name == replica['name'],
models.RSEFileAssociation.rse_id == rse_id))
if file_clause:
file_query = session.query(models.RSEFileAssociation.scope,
models.RSEFileAssociation.name,
models.RSEFileAssociation.rse_id).\
with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\
filter(or_(*file_clause))
existing_replicas = [(scope, name, rseid) for scope, name, rseid in file_query]
replicas = [replica for replica in replicas if (replica.get('scope', None), replica.get('name', None), rse_id) not in existing_replicas]
# Exclude files that have already been added to the quarantined
# replica table.
quarantine_clause = []
for replica in replicas:
quarantine_clause.append(and_(models.QuarantinedReplica.path == replica['path'],
models.QuarantinedReplica.rse_id == rse_id))
quarantine_query = session.query(models.QuarantinedReplica.path,
models.QuarantinedReplica.rse_id).\
filter(or_(*quarantine_clause))
quarantine_replicas = [(path, rseid) for path, rseid in quarantine_query]
replicas = [replica for replica in replicas if (replica['path'], rse_id) not in quarantine_replicas]
session.execute(
insert(models.QuarantinedReplica),
[{'rse_id': rse_id,
'path': file['path'],
'scope': file.get('scope'),
'name': file.get('name'),
'bytes': file.get('bytes')}
for file in replicas])
@transactional_session
def delete_quarantined_replicas(rse_id, replicas, *, session: "Session"):
"""
Delete file replicas.
:param rse_id: the rse id.
:param replicas: A list of dicts with the replica information.
:param session: The database session in use.
"""
conditions = []
for replica in replicas:
conditions.append(models.QuarantinedReplica.path == replica['path'])
if conditions:
session.query(models.QuarantinedReplica).\
filter(models.QuarantinedReplica.rse_id == rse_id).\
filter(or_(*conditions)).\
delete(synchronize_session=False)
if replicas:
session.execute(
insert(models.QuarantinedReplicaHistory),
[{'rse_id': rse_id, 'path': replica['path'],
'bytes': replica.get('bytes'),
'created_at': replica.get('created_at'),
'deleted_at': datetime.datetime.utcnow()}
for replica in replicas]
)
@read_session
def list_quarantined_replicas(rse_id, limit, worker_number=None, total_workers=None, *, session: "Session"):
"""
List RSE Quarantined File replicas.
:param rse_id: the rse id.
:param limit: The maximum number of replicas returned.
:param worker_number: id of the executing worker.
:param total_workers: Number of total workers.
:param session: The database session in use.
:returns: two lists :
- The first one contains quarantine replicas actually registered in the replicas tables
- The second one contains real "dark" files
"""
replicas_clause = []
quarantined_replicas = {}
real_replicas = []
dark_replicas = []
query = session.query(models.QuarantinedReplica.path,
models.QuarantinedReplica.bytes,
models.QuarantinedReplica.scope,
models.QuarantinedReplica.name,
models.QuarantinedReplica.created_at).\
filter(models.QuarantinedReplica.rse_id == rse_id)
query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='path')
for path, bytes_, scope, name, created_at in query.limit(limit):
if not (scope, name) in quarantined_replicas:
quarantined_replicas[(scope, name)] = []
replicas_clause.append(and_(models.RSEFileAssociation.scope == scope,
models.RSEFileAssociation.name == name))
quarantined_replicas[(scope, name)].append((path, bytes_, created_at))
for chunk in chunks(replicas_clause, 20):
query = session.query(models.RSEFileAssociation.scope,
models.RSEFileAssociation.name).\
filter(models.RSEFileAssociation.rse_id == rse_id).\
filter(or_(*chunk))
for scope, name in query.all():
reps = quarantined_replicas.pop((scope, name))
real_replicas.extend([{'scope': scope,
'name': name,
'rse_id': rse_id,
'path': rep[0],
'bytes': rep[1],
'created_at': rep[2]}
for rep in reps])
for key, value in quarantined_replicas.items():
dark_replicas.extend([{'scope': key[0],
'name': key[1],
'rse_id': rse_id,
'path': rep[0],
'bytes': rep[1],
'created_at': rep[2]}
for rep in value])
return real_replicas, dark_replicas
@read_session
def list_rses_with_quarantined_replicas(filters=None, *, session: "Session"):
"""
List RSEs in the Quarantined Queues.
:param filters: dictionary of attributes by which the results should be filtered.
:param session: The database session in use.
:returns: a list of RSEs.
"""
query = session.query(models.RSE.id).distinct(models.RSE.id).\
filter(models.QuarantinedReplica.rse_id == models.RSE.id).\
filter(models.RSE.deleted == false())
if filters and filters.get('vo'):
query = query.filter(getattr(models.RSE, 'vo') == filters.get('vo'))
return [rse for (rse,) in query] |
7,227 | kill pilots |
__copyright__ = 'Copyright 2022, The RADICAL-Cybertools Team'
__license__ = 'MIT'
# configure the psij logger (captured in the launch components stderr)
import logging
logging.basicConfig(level='DEBUG')
import threading as mt
from .base import PilotLauncherBase
from ... import states as rps
# psij is optional
psij = None
psij_ex = None
try:
import psij
except ImportError as ex:
psij_ex = ex
# ------------------------------------------------------------------------------
#
class PilotLauncherPSIJ(PilotLauncherBase):
# --------------------------------------------------------------------------
#
def __init__(self, name, log, prof, state_cb):
# psij is an optional dependency - let an import exception fall through
# to disable this pilot launcher
if psij_ex:
raise psij_ex
assert psij
PilotLauncherBase.__init__(self, name, log, prof, state_cb)
self._jobs = dict() # map pilot id to psi_j job
self._pilots = dict() # map psi_j id to pilot job
self._jex = dict() # map launch schema to psi_j job executors
self._lock = mt.RLock() # lock above structures
# --------------------------------------------------------------------------
#
def _get_schema(self, rcfg):
url = rcfg['job_manager_endpoint']
schemas = url.split(':')[0].split('+')
if 'ssh' in schemas:
schemas.remove('ssh')
if 'gsissh' in schemas:
schemas.remove('gsissh')
if len(schemas) > 1:
return
if not schemas:
return
schema = schemas[0]
if schema == 'fork':
schema = 'local'
return schema
# --------------------------------------------------------------------------
#
def _translate_state(self, status):
if status.state == psij.JobState.NEW : return rps.NEW
elif status.state == psij.JobState.QUEUED : return rps.PMGR_LAUNCHING
elif status.state == psij.JobState.ACTIVE : return rps.PMGR_ACTIVE
elif status.state == psij.JobState.COMPLETED : return rps.DONE
elif status.state == psij.JobState.FAILED : return rps.FAILED
elif status.state == psij.JobState.CANCELED : return rps.CANCELED
else:
raise ValueError('cannot interpret psij state: %s' % repr(status))
# --------------------------------------------------------------------------
#
def _job_status_cb(self, job, status):
try:
with self._lock:
if job.id not in self._pilots:
return
rp_state = self._translate_state(status)
pilot = self._pilots[job.id]
self._state_cb(pilot, rp_state)
except Exception:
self._log.exception('job status callback failed')
# --------------------------------------------------------------------------
#
def can_launch(self, rcfg, pilot):
schema = self._get_schema(rcfg)
if not schema:
return False
if schema not in self._jex:
self._log.debug('create executor for %s', schema)
try:
self._jex[schema] = psij.JobExecutor.get_instance(schema)
self._jex[schema].set_job_status_callback(self._job_status_cb)
except:
self._log.exception('failed to create psij executor')
return False
return True
# --------------------------------------------------------------------------
#
def launch_pilots(self, rcfg, pilots):
assert psij
for pilot in pilots:
pid = pilot['uid']
schema = self._get_schema(rcfg)
assert schema
jex = self._jex.get(schema)
assert jex
jd = pilot['jd_dict']
proj, res = None, None
if jd.project:
if ':' in jd.project:
proj, res = jd.project.split(':', 1)
else:
proj = jd.project
attr = psij.JobAttributes()
attr.duration = jd.wall_time_limit
attr.queue_name = jd.queue
attr.project_name = proj
attr.reservation_id = res
spec = psij.JobSpec()
spec.attributes = attr
spec.executable = jd.executable
spec.arguments = jd.arguments
spec.environment = jd.environment
spec.directory = jd.working_directory
spec.stdout_path = jd.output
spec.stderr_path = jd.error
spec.resources = psij.ResourceSpecV1()
spec.resources.node_count = jd.node_count
spec.resources.process_count = jd.total_cpu_count
# spec.resources.cpu_cores_per_process = 1
# spec.resources.gpu_cores_per_process = jd.total_gpu_count
job = psij.Job(spec)
self._jobs[pid] = job
self._pilots[job.id] = pilot
self._log.debug('added %s: %s', job.id, pid)
jex.submit(job)
# --------------------------------------------------------------------------
#
def METHOD_NAME(self, pids):
for pid in pids:
if pid not in pids:
continue
self._jobs[pid].cancel()
# ------------------------------------------------------------------------------
|
7,228 | display error | # -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2006-2022 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Mesar Hameed, Joseph Lee,
# Thomas Stivers, Babbage B.V., Accessolutions, Julien Cochuyt
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
import threading
from typing import Optional
import wx
import extensionPoints
_messageBoxCounterLock = threading.Lock()
_messageBoxCounter = 0
def isModalMessageBoxActive() -> bool:
"""
`gui.message.messageBox` is a function which blocks the calling thread,
until a user responds to the modal dialog.
When some action (e.g. quitting NVDA) should be prevented due to any active modal message box,
even if unrelated, use `isModalMessageBoxActive` to check before triggering the action.
NVDA is in an uncertain state while waiting for an answer from a `gui.message.messageBox`.
It's possible for multiple message boxes to be open at a time.
This function can be used to check before opening subsequent `gui.message.messageBox` instances.
Because an answer is required to continue after a modal messageBox is opened,
some actions such as shutting down are prevented while NVDA is in a possibly uncertain state.
@return: True if a thread blocking modal response is still pending.
"""
with _messageBoxCounterLock:
return _messageBoxCounter != 0
def displayDialogAsModal(dialog: wx.Dialog) -> int:
"""Display a dialog as modal.
@return: Same as for wx.MessageBox.
`displayDialogAsModal` is a function which blocks the calling thread,
until a user responds to the modal dialog.
This function should be used when an answer is required before proceeding.
It's possible for multiple message boxes to be open at a time.
Before opening a new messageBox, use `isModalMessageBoxActive`
to check if another messageBox modal response is still pending.
Because an answer is required to continue after a modal messageBox is opened,
some actions such as shutting down are prevented while NVDA is in a possibly uncertain state.
"""
from gui import mainFrame
global _messageBoxCounter
with _messageBoxCounterLock:
_messageBoxCounter += 1
try:
if not dialog.GetParent():
mainFrame.prePopup()
res = dialog.ShowModal()
finally:
if not dialog.GetParent():
mainFrame.postPopup()
with _messageBoxCounterLock:
_messageBoxCounter -= 1
return res
def messageBox(
message: str,
caption: str = wx.MessageBoxCaptionStr,
style: int = wx.OK | wx.CENTER,
parent: Optional[wx.Window] = None
) -> int:
"""Display a message dialog.
Avoid using C{wx.MessageDialog} and C{wx.MessageBox} directly.
@param message: The message text.
@param caption: The caption (title) of the dialog.
@param style: Same as for wx.MessageBox.
@param parent: The parent window.
@return: Same as for wx.MessageBox.
`gui.message.messageBox` is a function which blocks the calling thread,
until a user responds to the modal dialog.
This function should be used when an answer is required before proceeding.
Consider using a custom subclass of a wxDialog if an answer is not required
or a default answer can be provided.
It's possible for multiple message boxes to be open at a time.
Before opening a new messageBox, use `isModalMessageBoxActive`
to check if another messageBox modal response is still pending.
Because an answer is required to continue after a modal messageBox is opened,
some actions such as shutting down are prevented while NVDA is in a possibly uncertain state.
"""
from gui import mainFrame
global _messageBoxCounter
with _messageBoxCounterLock:
_messageBoxCounter += 1
try:
if not parent:
mainFrame.prePopup()
res = wx.MessageBox(message, caption, style, parent or mainFrame)
finally:
if not parent:
mainFrame.postPopup()
with _messageBoxCounterLock:
_messageBoxCounter -= 1
return res
class DisplayableError(Exception):
OnDisplayableErrorT = extensionPoints.Action
"""
A type of extension point used to notify a handler when an error occurs.
This allows a handler to handle displaying an error.
@param displayableError: Error that can be displayed to the user.
@type displayableError: DisplayableError
"""
def __init__(self, displayMessage: str, titleMessage: Optional[str] = None):
"""
@param displayMessage: A translated message, to be displayed to the user.
@param titleMessage: A translated message, to be used as a title for the display message.
If left None, "Error" is presented as the title by default.
"""
self.displayMessage = displayMessage
if titleMessage is None:
# Translators: A message indicating that an error occurred.
self.titleMessage = _("Error")
else:
self.titleMessage = titleMessage
def METHOD_NAME(self, parentWindow: wx.Window):
wx.CallAfter(
messageBox,
message=self.displayMessage,
caption=self.titleMessage,
style=wx.OK | wx.ICON_ERROR,
parent=parentWindow,
) |
7,229 | test interpolation rank0 | # Copyright (C) 2011-2022 Garth N. Wells, Jørgen S. Dokken
#
# This file is part of DOLFINx (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
"""Unit tests for the Function class"""
import importlib
import cffi
import numpy as np
import pytest
import ufl
from basix.ufl import element, mixed_element
from dolfinx.fem import (Function, FunctionSpace, TensorFunctionSpace,
VectorFunctionSpace)
from dolfinx.geometry import (bb_tree, compute_colliding_cells,
compute_collisions_points)
from dolfinx.mesh import create_mesh, create_unit_cube
from mpi4py import MPI
from dolfinx import default_real_type, la
@pytest.fixture
def mesh():
return create_unit_cube(MPI.COMM_WORLD, 3, 3, 3)
@pytest.fixture
def V(mesh):
return FunctionSpace(mesh, ('Lagrange', 1))
@pytest.fixture
def W(mesh):
return VectorFunctionSpace(mesh, ('Lagrange', 1))
@pytest.fixture
def Q(mesh):
return TensorFunctionSpace(mesh, ('Lagrange', 1))
def test_name_argument(W):
u = Function(W)
v = Function(W, name="v")
assert u.name == "f"
assert v.name == "v"
assert str(v) == "v"
def test_copy(V):
u = Function(V)
u.interpolate(lambda x: x[0] + 2 * x[1])
v = u.copy()
assert np.allclose(u.x.array, v.x.array)
u.x.array[:] = 1
assert not np.allclose(u.x.array, v.x.array)
def test_eval(V, W, Q, mesh):
u1 = Function(V)
u2 = Function(W)
u3 = Function(Q)
def e2(x):
values = np.empty((3, x.shape[1]))
values[0] = x[0] + x[1] + x[2]
values[1] = x[0] - x[1] - x[2]
values[2] = x[0] + x[1] + x[2]
return values
def e3(x):
values = np.empty((9, x.shape[1]))
values[0] = x[0] + x[1] + x[2]
values[1] = x[0] - x[1] - x[2]
values[2] = x[0] + x[1] + x[2]
values[3] = x[0]
values[4] = x[1]
values[5] = x[2]
values[6] = -x[0]
values[7] = -x[1]
values[8] = -x[2]
return values
u1.interpolate(lambda x: x[0] + x[1] + x[2])
u2.interpolate(e2)
u3.interpolate(e3)
x0 = (mesh.geometry.x[0] + mesh.geometry.x[1]) / 2.0
tree = bb_tree(mesh, mesh.geometry.dim)
cell_candidates = compute_collisions_points(tree, x0)
cell = compute_colliding_cells(mesh, cell_candidates, x0)
assert len(cell) > 0
first_cell = cell[0]
assert np.allclose(u3.eval(x0, first_cell)[:3], u2.eval(x0, first_cell), rtol=1e-15, atol=1e-15)
@pytest.mark.skip_in_parallel
def test_eval_manifold():
# Simple two-triangle surface in 3d
vertices = np.array([(0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0)], dtype=default_real_type)
cells = [(0, 1, 2), (0, 1, 3)]
domain = ufl.Mesh(element("Lagrange", "triangle", 1, gdim=3, rank=1))
mesh = create_mesh(MPI.COMM_WORLD, cells, vertices, domain)
Q = FunctionSpace(mesh, ("Lagrange", 1))
u = Function(Q)
u.interpolate(lambda x: x[0] + x[1])
assert np.isclose(u.eval([0.75, 0.25, 0.5], 0)[0], 1.0)
def test_interpolation_mismatch_rank0(W):
u = Function(W)
with pytest.raises(RuntimeError):
u.interpolate(lambda x: np.ones(x.shape[1]))
def test_interpolation_mismatch_rank1(W):
u = Function(W)
with pytest.raises(RuntimeError):
u.interpolate(lambda x: np.ones((2, x.shape[1])))
def test_mixed_element_interpolation():
mesh = create_unit_cube(MPI.COMM_WORLD, 3, 3, 3)
el = element("Lagrange", mesh.basix_cell(), 1)
V = FunctionSpace(mesh, mixed_element([el, el]))
u = Function(V)
with pytest.raises(RuntimeError):
u.interpolate(lambda x: np.ones(2, x.shape[1]))
def METHOD_NAME(V):
class MyExpression:
def __init__(self):
self.t = 0.0
def eval(self, x):
return np.full(x.shape[1], self.t)
f = MyExpression()
f.t = 1.0
w = Function(V)
w.interpolate(f.eval)
assert (w.x.array[:] == 1.0).all()
num_vertices = V.mesh.topology.index_map(0).size_global
assert np.isclose(w.x.norm(la.Norm.l1) - num_vertices, 0)
f.t = 2.0
w.interpolate(f.eval)
assert (w.x.array[:] == 2.0).all()
def test_interpolation_rank1(W):
def f(x):
values = np.empty((3, x.shape[1]))
values[0] = 1.0
values[1] = 2.0
values[2] = 3.0
return values
w = Function(W)
w.interpolate(f)
x = w.vector
assert x.max()[1] == 3.0
assert x.min()[1] == 1.0
num_vertices = W.mesh.topology.index_map(0).size_global
assert round(w.x.norm(la.Norm.l1) - 6 * num_vertices, 7) == 0
@pytest.mark.parametrize("types", [
# (np.float32, "float"), # Fails on Redhat CI, needs further investigation
(np.float64, "double")
])
def test_cffi_expression(types, V):
vtype, xtype = types
mesh = create_unit_cube(MPI.COMM_WORLD, 3, 3, 3, dtype=vtype)
V = FunctionSpace(mesh, ('Lagrange', 1))
code_h = f"void eval({xtype}* values, int num_points, int value_size, const {xtype}* x);"
code_c = """
void eval(xtype* values, int num_points, int value_size, const xtype* x)
{
/* x0 + x1 */
for (int i = 0; i < num_points; ++i)
values[i] = x[i] + x[i + num_points];
}
"""
code_c = code_c.replace("xtype", xtype)
# Build the kernel
module = "_expr_eval" + xtype + str(MPI.COMM_WORLD.rank)
ffi = cffi.FFI()
ffi.set_source(module, code_c)
ffi.cdef(code_h)
ffi.compile()
# Import the compiled kernel
kernel_mod = importlib.import_module(module)
ffi, lib = kernel_mod.ffi, kernel_mod.lib
# Get pointer to the compiled function
eval_ptr = ffi.cast("uintptr_t", ffi.addressof(lib, "eval"))
# Handle C func address by hand
f1 = Function(V, dtype=vtype)
f1.interpolate(int(eval_ptr))
f2 = Function(V, dtype=vtype)
f2.interpolate(lambda x: x[0] + x[1])
f1.x.array[:] -= f2.x.array
assert f1.x.norm() < 1.0e-12
def test_interpolation_function(mesh):
V = FunctionSpace(mesh, ("Lagrange", 1))
u = Function(V)
u.x.array[:] = 1
Vh = FunctionSpace(mesh, ("Lagrange", 1))
uh = Function(Vh)
uh.interpolate(u)
assert np.allclose(uh.x.array, 1) |
7,230 | test tk err | # -*- coding: utf-8 -*-
from unittest.mock import Mock
import numpy as np
from pytest import raises
from discopy.quantum import tk
from discopy.quantum.gates import *
from discopy.tensor import Tensor, Dim
def test_Circuit_to_tk():
bell_state = Circuit.caps(qubit, qubit)
bell_effect = bell_state[::-1]
snake = (bell_state @ qubit >> qubit @ bell_effect)[::-1]
tk_circ = snake.to_tk()
assert repr(tk_circ) ==\
'tk.Circuit(3, 2)'\
'.H(1)'\
'.CX(1, 2)'\
'.CX(0, 1)'\
'.Measure(1, 1)'\
'.H(0)'\
'.Measure(0, 0)'\
'.post_select({0: 0, 1: 0})'\
'.scale(4)'
assert repr((CX >> Measure(2) >> Swap(bit, bit)).to_tk())\
== "tk.Circuit(2, 2).CX(0, 1).Measure(1, 0).Measure(0, 1)"
assert repr((Bits(0) >> Id(bit) @ Bits(0)).to_tk())\
== "tk.Circuit(0, 2)"
assert "Swap" in repr((Bra(0) @ Bits(0) >> Bits(0) @ Id(bit)).to_tk())
def test_Sum_from_tk():
assert Circuit.from_tk(*(X + X).to_tk()) == (X + X).init_and_discard()
assert Circuit.from_tk() == Sum([], qubit ** 0, qubit ** 0)
def METHOD_NAME():
with raises(TypeError):
Circuit.from_tk("foo")
with raises(NotImplementedError):
QuantumGate("foo", qubit, qubit, [1, 2, 3, 4]).to_tk()
with raises(NotImplementedError):
Bits(1).to_tk()
with raises(NotImplementedError):
Circuit.from_tk(tk.Circuit(3).CSWAP(0, 1, 2))
def test_Circuit_from_tk():
def back_n_forth(f):
return Circuit.from_tk(f.to_tk())
m = Measure(1, destructive=False, override_bits=True)
assert back_n_forth(m) == m.init_and_discard()
assert back_n_forth(CRx(0.5)) ==\
Ket(0) @ Ket(0) >> CRx(0.5) >> Discard() @ Discard()
assert back_n_forth(CRz(0.5)) ==\
Ket(0) @ Ket(0) >> CRz(0.5) >> Discard() @ Discard()
assert Id(qubit @ bit).init_and_discard()\
== back_n_forth(Swap(qubit, bit)) == back_n_forth(Swap(bit, qubit))
c = (T >> T.dagger()).init_and_discard()
assert c == back_n_forth(c)
def test_ClassicalGate_to_tk():
post = ClassicalGate('post', bit ** 2, bit ** 0, data=[0, 0, 0, 1])
assert (post[::-1] >> Swap(bit, bit)).to_tk().post_processing\
== post[::-1] >> Swap(bit, bit)
circuit = sqrt(2) @ Ket(0, 0) >> H @ Rx(0) >> CX >> Measure(2) >> post
assert Circuit.from_tk(circuit.to_tk())[-1] == post
def test_tk_dagger():
assert S.dagger().to_tk() == tk.Circuit(1).Sdg(0)
assert T.dagger().to_tk() == tk.Circuit(1).Tdg(0)
def test_Circuit_get_counts_snake():
compilation = Mock()
compilation.apply = lambda x: x
backend = tk.mockBackend({
(0, 0): 240, (0, 1): 242, (1, 0): 271, (1, 1): 271})
scaled_bell = Circuit.caps(qubit, qubit)
snake = scaled_bell @ qubit >> qubit @ scaled_bell[::-1]
result = np.round(snake.eval(
backend=backend, compilation=compilation, measure_all=True).array)
assert result == 1
def test_Circuit_get_counts_empty():
assert not Id(qubit).get_counts(backend=tk.mockBackend({}))
def test_Bra_and_Measure_to_tk():
boxes = [
Ket(0), Rx(0.552), Rz(0.512), Rx(0.917), Ket(0, 0, 0), H, H, H,
CRz(0.18), CRz(0.847), CX, H, sqrt(2), Bra(0, 0), Ket(0),
Rx(0.446), Rz(0.256), Rx(0.177), CX, H, sqrt(2), Bra(0, 0), Measure()]
offsets=[
0, 0, 0, 0, 0, 0, 1, 2, 0, 1, 2, 2, 3, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0]
c = Circuit.decode(qubit ** 0, zip(boxes, offsets))
assert repr(c.to_tk()) ==\
"tk.Circuit(5, 5)"\
".Rx(0.892, 0)"\
".H(1)"\
".H(2)"\
".H(3)"\
".Rx(1.104, 4)"\
".Rz(0.512, 0)"\
".CRz(0.36, 1, 2)"\
".Rz(1.024, 4)"\
".Rx(0.354, 0)"\
".CRz(1.694, 2, 3)"\
".Rx(1.834, 4)"\
".Measure(2, 4)"\
".CX(0, 1)"\
".CX(3, 4)"\
".Measure(4, 1)"\
".Measure(1, 3)"\
".H(0)"\
".H(3)"\
".Measure(3, 0)"\
".Measure(0, 2)"\
".post_select({0: 0, 1: 0, 2: 0, 3: 0})"\
".scale(4)"
def test_ClassicalGate_eval():
backend = tk.mockBackend({
(0, 0): 256, (0, 1): 256, (1, 0): 256, (1, 1): 256})
post = ClassicalGate('post', bit ** 2, bit ** 0, [1, 0, 0, 0])
assert post.eval(backend=backend) == Tensor[float]([0.25], Dim(1), Dim(1)) |
7,231 | test zero findings count report | from django.test import TestCase
from audit.models import SingleAuditChecklist
from .errors import err_findings_count_inconsistent
from .check_findings_count_consistency import check_findings_count_consistency
from .sac_validation_shape import sac_validation_shape
from .utils import generate_random_integer
from model_bakery import baker
class CheckFindingsCountConsistencyTests(TestCase):
AWARD_MIN = 1000
AWARD_MAX = 2000
FINDINGS_MIN = 1
FINDINGS_MAX = 5
def _make_federal_awards(self, findings_count) -> dict:
return {
"FederalAwards": {
"federal_awards": [
{
"program": {"number_of_audit_findings": findings_count},
"award_reference": f"AWARD-{self.AWARD_MIN}",
},
{
"program": {"number_of_audit_findings": findings_count},
"award_reference": f"AWARD-{self.AWARD_MAX}",
},
]
}
}
def _make_findings_uniform_guidance(self, awards, mismatch) -> dict:
entries = []
for award in awards["FederalAwards"]["federal_awards"]:
award_reference = award["award_reference"]
count = award["program"]["number_of_audit_findings"]
for _ in range(count + mismatch):
entries.append({"program": {"award_reference": award_reference}})
findings = (
{
"auditee_uei": "AAA123456BBB",
"findings_uniform_guidance_entries": entries,
}
if len(entries) > 0
else {"auditee_uei": "AAA123456BBB"}
)
return {"FindingsUniformGuidance": findings}
def _make_sac(self, findings_count, mismatch=0) -> SingleAuditChecklist:
sac = baker.make(SingleAuditChecklist)
sac.federal_awards = self._make_federal_awards(findings_count)
sac.findings_uniform_guidance = self._make_findings_uniform_guidance(
sac.federal_awards, mismatch
)
return sac
def METHOD_NAME(self):
"""Ensure no error is returned for consistent zero findings."""
sac = self._make_sac(0)
errors = check_findings_count_consistency(sac_validation_shape(sac))
self.assertEqual(errors, [])
def test_findings_count_matches_across_workbooks(self):
"""Ensure no error is returned for consistent findings count."""
sac = self._make_sac(
generate_random_integer(self.FINDINGS_MIN, self.FINDINGS_MAX)
)
errors = check_findings_count_consistency(sac_validation_shape(sac))
self.assertEqual(errors, [])
def _test_findings_count_mismatch(self, base_count, mismatch):
sac = self._make_sac(base_count, mismatch)
errors = check_findings_count_consistency(sac_validation_shape(sac))
self.assertEqual(
len(errors), len(sac.federal_awards["FederalAwards"]["federal_awards"])
)
for award in sac.federal_awards["FederalAwards"]["federal_awards"]:
award_reference = award["award_reference"]
expected_error = err_findings_count_inconsistent(
base_count, base_count + mismatch, award_reference
)
self.assertIn({"error": expected_error}, errors)
def test_reported_findings_exceed_declared_count(self):
"""
Expect errors when the number of findings in the Federal Awards Audit Findings workbook,
a.k.a the Findings Uniform Guidance workbook, exceeds those declared in the Federal Awards workbook.
"""
self._test_findings_count_mismatch(
generate_random_integer(2, 4), generate_random_integer(1, 2)
)
def test_declared_findings_exceed_reported_count(self):
"""
Expect errors when the number of findings in the Federal Awards workbook
exceeds those reported in the Federal Awards Audit Findings workbook.
"""
self._test_findings_count_mismatch(
generate_random_integer(2, 4), generate_random_integer(-2, -1)
) |
7,232 | start element | #!/usr/bin/env python
# encoding: utf-8
# Federico Pellegrin, 2016-2019 (fedepell) adapted for Python
"""
This tool helps with finding Python Qt5 tools and libraries,
and provides translation from QT5 files to Python code.
The following snippet illustrates the tool usage::
def options(opt):
opt.load('py pyqt5')
def configure(conf):
conf.load('py pyqt5')
def build(bld):
bld(
features = 'py pyqt5',
source = 'main.py textures.qrc aboutDialog.ui',
)
Here, the UI description and resource files will be processed
to generate code.
Usage
=====
Load the "pyqt5" tool.
Add into the sources list also the qrc resources files or ui5
definition files and they will be translated into python code
with the system tools (PyQt5, PySide2, PyQt4 are searched in this
order) and then compiled
"""
try:
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
except ImportError:
has_xml = False
ContentHandler = object
else:
has_xml = True
import os
from waflib.Tools import python
from waflib import Task, Options
from waflib.TaskGen import feature, extension
from waflib.Configure import conf
from waflib import Logs
EXT_RCC = ['.qrc']
"""
File extension for the resource (.qrc) files
"""
EXT_UI = ['.ui']
"""
File extension for the user interface (.ui) files
"""
class XMLHandler(ContentHandler):
"""
Parses ``.qrc`` files
"""
def __init__(self):
self.buf = []
self.files = []
def METHOD_NAME(self, name, attrs):
if name == 'file':
self.buf = []
def endElement(self, name):
if name == 'file':
self.files.append(str(''.join(self.buf)))
def characters(self, cars):
self.buf.append(cars)
@extension(*EXT_RCC)
def create_pyrcc_task(self, node):
"Creates rcc and py task for ``.qrc`` files"
rcnode = node.change_ext('.py')
self.create_task('pyrcc', node, rcnode)
if getattr(self, 'install_from', None):
self.install_from = self.install_from.get_bld()
else:
self.install_from = self.path.get_bld()
self.install_path = getattr(self, 'install_path', '${PYTHONDIR}')
self.process_py(rcnode)
@extension(*EXT_UI)
def create_pyuic_task(self, node):
"Create uic tasks and py for user interface ``.ui`` definition files"
uinode = node.change_ext('.py')
self.create_task('ui5py', node, uinode)
if getattr(self, 'install_from', None):
self.install_from = self.install_from.get_bld()
else:
self.install_from = self.path.get_bld()
self.install_path = getattr(self, 'install_path', '${PYTHONDIR}')
self.process_py(uinode)
@extension('.ts')
def add_pylang(self, node):
"""Adds all the .ts file into ``self.lang``"""
self.lang = self.to_list(getattr(self, 'lang', [])) + [node]
@feature('pyqt5')
def apply_pyqt5(self):
"""
The additional parameters are:
:param lang: list of translation files (\\*.ts) to process
:type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension
:param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file
:type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension
"""
if getattr(self, 'lang', None):
qmtasks = []
for x in self.to_list(self.lang):
if isinstance(x, str):
x = self.path.find_resource(x + '.ts')
qmtasks.append(self.create_task('ts2qm', x, x.change_ext('.qm')))
if getattr(self, 'langname', None):
qmnodes = [k.outputs[0] for k in qmtasks]
rcnode = self.langname
if isinstance(rcnode, str):
rcnode = self.path.find_or_declare(rcnode + '.qrc')
t = self.create_task('qm2rcc', qmnodes, rcnode)
create_pyrcc_task(self, t.outputs[0])
class pyrcc(Task.Task):
"""
Processes ``.qrc`` files
"""
color = 'BLUE'
run_str = '${QT_PYRCC} ${SRC} -o ${TGT}'
ext_out = ['.py']
def rcname(self):
return os.path.splitext(self.inputs[0].name)[0]
def scan(self):
"""Parse the *.qrc* files"""
if not has_xml:
Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!')
return ([], [])
parser = make_parser()
curHandler = XMLHandler()
parser.setContentHandler(curHandler)
fi = open(self.inputs[0].abspath(), 'r')
try:
parser.parse(fi)
finally:
fi.close()
nodes = []
names = []
root = self.inputs[0].parent
for x in curHandler.files:
nd = root.find_resource(x)
if nd:
nodes.append(nd)
else:
names.append(x)
return (nodes, names)
class ui5py(Task.Task):
"""
Processes ``.ui`` files for python
"""
color = 'BLUE'
run_str = '${QT_PYUIC} ${SRC} -o ${TGT}'
ext_out = ['.py']
class ts2qm(Task.Task):
"""
Generates ``.qm`` files from ``.ts`` files
"""
color = 'BLUE'
run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}'
class qm2rcc(Task.Task):
"""
Generates ``.qrc`` files from ``.qm`` files
"""
color = 'BLUE'
after = 'ts2qm'
def run(self):
"""Create a qrc file including the inputs"""
txt = '\n'.join(['<file>%s</file>' % k.path_from(self.outputs[0].parent) for k in self.inputs])
code = '<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>' % txt
self.outputs[0].write(code)
def configure(self):
self.find_pyqt5_binaries()
# warn about this during the configuration too
if not has_xml:
Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!')
@conf
def find_pyqt5_binaries(self):
"""
Detects PyQt5 or PySide2 programs such as pyuic5/pyside2-uic, pyrcc5/pyside2-rcc
"""
env = self.env
if getattr(Options.options, 'want_pyqt5', True):
self.find_program(['pyuic5'], var='QT_PYUIC')
self.find_program(['pyrcc5'], var='QT_PYRCC')
self.find_program(['pylupdate5'], var='QT_PYLUPDATE')
elif getattr(Options.options, 'want_pyside2', True):
self.find_program(['pyside2-uic'], var='QT_PYUIC')
self.find_program(['pyside2-rcc'], var='QT_PYRCC')
self.find_program(['pyside2-lupdate'], var='QT_PYLUPDATE')
elif getattr(Options.options, 'want_pyqt4', True):
self.find_program(['pyuic4'], var='QT_PYUIC')
self.find_program(['pyrcc4'], var='QT_PYRCC')
self.find_program(['pylupdate4'], var='QT_PYLUPDATE')
else:
self.find_program(['pyuic5','pyside2-uic','pyuic4'], var='QT_PYUIC')
self.find_program(['pyrcc5','pyside2-rcc','pyrcc4'], var='QT_PYRCC')
self.find_program(['pylupdate5', 'pyside2-lupdate','pylupdate4'], var='QT_PYLUPDATE')
if not env.QT_PYUIC:
self.fatal('cannot find the uic compiler for python for qt5')
if not env.QT_PYRCC:
self.fatal('cannot find the rcc compiler for python for qt5')
self.find_program(['lrelease-qt5', 'lrelease'], var='QT_LRELEASE')
def options(opt):
"""
Command-line options
"""
pyqt5opt=opt.add_option_group("Python QT5 Options")
pyqt5opt.add_option('--pyqt5-pyqt5', action='store_true', default=False, dest='want_pyqt5', help='use PyQt5 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)')
pyqt5opt.add_option('--pyqt5-pyside2', action='store_true', default=False, dest='want_pyside2', help='use PySide2 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)')
pyqt5opt.add_option('--pyqt5-pyqt4', action='store_true', default=False, dest='want_pyqt4', help='use PyQt4 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)') |
7,233 | tear down module | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
import unittest
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import fci
from pyscf.fci import cistring, direct_spin1, direct_spin1_symm
import pyscf.symm
from pyscf.fci import fci_slow
def setUpModule():
global mol, m, h1e, g2e, ci0, cis
global norb, nelec, orbsym
mol = gto.Mole()
mol.verbose = 0
mol.atom = '''
O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587'''
mol.basis = 'sto-3g'
mol.symmetry = 'c2v'
mol.build()
m = scf.RHF(mol)
m.conv_tol_grad = 1e-8
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, scf.hf.get_hcore(mol), m.mo_coeff))
g2e = ao2mo.incore.full(m._eri, m.mo_coeff)
orbsym = m.orbsym
cis = fci.direct_spin0_symm.FCISolver(mol)
cis.orbsym = orbsym
numpy.random.seed(15)
na = fci.cistring.num_strings(norb, nelec//2)
ci0 = numpy.random.random((na,na))
ci0 = (ci0 + ci0.T) * .5
def METHOD_NAME():
global mol, m, h1e, g2e, ci0, cis
del mol, m, h1e, g2e, ci0, cis
class KnownValues(unittest.TestCase):
def test_contract(self):
ci1 = fci.addons.symmetrize_wfn(ci0, norb, nelec, orbsym, wfnsym=0)
ci1ref = direct_spin1.contract_2e(g2e, ci1, norb, nelec)
ci1 = cis.contract_2e(g2e, ci1, norb, nelec, wfnsym=0)
self.assertAlmostEqual(abs(ci1ref - ci1).max(), 0, 9)
self.assertAlmostEqual(numpy.linalg.norm(ci1), 83.221199436109003, 9)
ci1 = fci.addons.symmetrize_wfn(ci0, norb, nelec, orbsym, wfnsym=1)
ci1ref = direct_spin1.contract_2e(g2e, ci1, norb, nelec)
ci1 = cis.contract_2e(g2e, ci1, norb, nelec, wfnsym=1)
self.assertAlmostEqual(abs(ci1ref - ci1).max(), 0, 9)
self.assertAlmostEqual(numpy.linalg.norm(ci1), 82.571087072474697, 9)
ci1 = fci.addons.symmetrize_wfn(ci0, norb, nelec, orbsym, wfnsym=3)
ci1ref = direct_spin1.contract_2e(g2e, ci1, norb, nelec)
ci1 = cis.contract_2e(g2e, ci1, norb, nelec, wfnsym=3)
self.assertAlmostEqual(abs(ci1ref - ci1).max(), 0, 9)
self.assertAlmostEqual(numpy.linalg.norm(ci1), 82.257163492625622, 9)
ci1 = fci.addons.symmetrize_wfn(ci0, norb, nelec, orbsym, wfnsym=2)
ci1ref = direct_spin1.contract_2e(g2e, ci1, norb, nelec)
ci1 = cis.contract_2e(g2e, ci1, norb, nelec, wfnsym=2)
self.assertAlmostEqual(abs(ci1ref - ci1).max(), 0, 9)
self.assertAlmostEqual(numpy.linalg.norm(ci1), 81.010497935954916, 9)
def test_kernel(self):
e, c = fci.direct_spin0_symm.kernel(h1e, g2e, norb, nelec, orbsym=orbsym)
self.assertAlmostEqual(e, -84.200905534209554, 8)
e = fci.direct_spin0_symm.energy(h1e, g2e, c, norb, nelec)
self.assertAlmostEqual(e, -84.200905534209554, 8)
eref = fci_slow.kernel(h1e, g2e, norb, nelec)
self.assertAlmostEqual(e, eref, 9)
def test_linearmole(self):
mol = gto.M(
atom = 'Li 0 0 0; Li 0 0 2.913',
basis = '''
#BASIS SET: (9s,4p,1d) -> [3s,2p,1d]
Li S
1469.0000000 0.0007660 -0.0001200
220.5000000 0.0058920 -0.0009230
50.2600000 0.0296710 -0.0046890
14.2400000 0.1091800 -0.0176820
4.5810000 0.2827890 -0.0489020
1.5800000 0.4531230 -0.0960090
0.5640000 0.2747740 -0.1363800
0.0734500 0.0097510 0.5751020
Li P
1.5340000 0.0227840
0.2749000 0.1391070
0.0736200 0.5003750
Li P
0.0240300 1.0000000
''',
symmetry = True,
)
mf = mol.RHF().run()
mci = fci.FCI(mol, mf.mo_coeff, singlet=True)
ex, ci_x = mci.kernel(wfnsym='E1ux')
ey, ci_y = mci.kernel(wfnsym='E1uy')
self.assertAlmostEqual(ex - ey, 0, 7)
self.assertAlmostEqual(ex - -14.79681308052051, 0, 7)
ss, sz = mci.spin_square(ci_x, mf.mo_energy.size, mol.nelec)
self.assertAlmostEqual(ss, 0, 6)
swap_xy = numpy.array([
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
])
mo_swapxy = mol.ao_rotation_matrix(swap_xy).dot(mf.mo_coeff)
u = mf.mo_coeff.T.dot(mf.get_ovlp()).dot(mo_swapxy)
ci1 = fci.addons.transform_ci(ci_x, (3,3), u.T)
self.assertAlmostEqual(ci_x.ravel().dot(ci_y.ravel()), 0, 9)
self.assertAlmostEqual(abs(ci1.ravel().dot(ci_y.ravel())), 1, 9)
if __name__ == "__main__":
print("Full Tests for spin0 symm")
unittest.main() |
7,234 | test does identity match is set true | import pytest
from core.constants import INTEGER, STRING
from environments.identities.traits.models import Trait
from segments.models import (
EQUAL,
GREATER_THAN,
GREATER_THAN_INCLUSIVE,
IN,
IS_NOT_SET,
IS_SET,
LESS_THAN,
LESS_THAN_INCLUSIVE,
MODULO,
NOT_EQUAL,
Condition,
)
@pytest.mark.parametrize(
"operator, trait_value, condition_value, result",
[
(EQUAL, "1.0.0", "1.0.0:semver", True),
(EQUAL, "1.0.0", "1.0.1:semver", False),
(NOT_EQUAL, "1.0.0", "1.0.0:semver", False),
(NOT_EQUAL, "1.0.0", "1.0.1:semver", True),
(GREATER_THAN, "1.0.1", "1.0.0:semver", True),
(GREATER_THAN, "1.0.0", "1.0.0-beta:semver", True),
(GREATER_THAN, "1.0.1", "1.2.0:semver", False),
(GREATER_THAN, "1.0.1", "1.0.1:semver", False),
(GREATER_THAN, "1.2.4", "1.2.3-pre.2+build.4:semver", True),
(LESS_THAN, "1.0.0", "1.0.1:semver", True),
(LESS_THAN, "1.0.0", "1.0.0:semver", False),
(LESS_THAN, "1.0.1", "1.0.0:semver", False),
(LESS_THAN, "1.0.0-rc.2", "1.0.0-rc.3:semver", True),
(GREATER_THAN_INCLUSIVE, "1.0.1", "1.0.0:semver", True),
(GREATER_THAN_INCLUSIVE, "1.0.1", "1.2.0:semver", False),
(GREATER_THAN_INCLUSIVE, "1.0.1", "1.0.1:semver", True),
(LESS_THAN_INCLUSIVE, "1.0.0", "1.0.1:semver", True),
(LESS_THAN_INCLUSIVE, "1.0.0", "1.0.0:semver", True),
(LESS_THAN_INCLUSIVE, "1.0.1", "1.0.0:semver", False),
],
)
def test_does_identity_match_for_semver_values(
identity, operator, trait_value, condition_value, result
):
# Given
condition = Condition(operator=operator, property="version", value=condition_value)
traits = [
Trait(
trait_key="version",
string_value=trait_value,
identity=identity,
)
]
# Then
assert condition.does_identity_match(identity, traits) is result
@pytest.mark.parametrize(
"trait_value, condition_value, result",
[
(1, "2|0", False),
(2, "2|0", True),
(3, "2|0", False),
(34.2, "4|3", False),
(35.0, "4|3", True),
("dummy", "3|0", False),
("1.0.0", "3|0", False),
(False, "1|3", False),
],
)
def test_does_identity_match_for_modulo_operator(
identity, trait_value, condition_value, result
):
condition = Condition(operator=MODULO, property="user_id", value=condition_value)
trait_value_data = Trait.generate_trait_value_data(trait_value)
traits = [Trait(trait_key="user_id", identity=identity, **trait_value_data)]
assert condition.does_identity_match(identity, traits) is result
def METHOD_NAME(identity):
# Given
trait_key = "some_property"
condition = Condition(operator=IS_SET, property=trait_key)
traits = [Trait(trait_key=trait_key, identity=identity)]
# Then
assert condition.does_identity_match(identity, traits) is True
def test_does_identity_match_is_set_false(identity):
# Given
trait_key = "some_property"
condition = Condition(operator=IS_SET, property=trait_key)
traits = []
# Then
assert condition.does_identity_match(identity, traits) is False
def test_does_identity_match_is_not_set_true(identity):
# Given
trait_key = "some_property"
condition = Condition(operator=IS_NOT_SET, property=trait_key)
traits = [Trait(trait_key=trait_key, identity=identity)]
# Then
assert condition.does_identity_match(identity, traits) is False
def test_does_identity_match_is_not_set_false(identity):
# Given
trait_key = "some_property"
condition = Condition(operator=IS_NOT_SET, property=trait_key)
traits = []
# Then
assert condition.does_identity_match(identity, traits) is True
@pytest.mark.parametrize(
"condition_value, trait_value_type, trait_string_value, trait_integer_value, expected_result",
(
("", STRING, "foo", None, False),
("foo,bar", STRING, "foo", None, True),
("foo", STRING, "foo", None, True),
("1,2,3,4", INTEGER, None, 1, True),
("", INTEGER, None, 1, False),
("1", INTEGER, None, 1, True),
),
)
def test_does_identity_match_in(
identity,
condition_value,
trait_value_type,
trait_string_value,
trait_integer_value,
expected_result,
):
# Given
trait_key = "some_property"
condition = Condition(operator=IN, property=trait_key, value=condition_value)
traits = [
Trait(
trait_key=trait_key,
identity=identity,
value_type=trait_value_type,
string_value=trait_string_value,
integer_value=trait_integer_value,
)
]
# Then
assert condition.does_identity_match(identity, traits) is expected_result |
7,235 | listen | import socket
import sys
from _typeshed import ReadableBuffer
from builtins import type as Type # alias to avoid name clashes with property named "type"
from collections.abc import Iterable
from types import TracebackType
from typing import Any, BinaryIO, NoReturn, overload
from typing_extensions import TypeAlias
# These are based in socket, maybe move them out into _typeshed.pyi or such
_Address: TypeAlias = socket._Address
_RetAddress: TypeAlias = Any
_WriteBuffer: TypeAlias = bytearray | memoryview
_CMSG: TypeAlias = tuple[int, int, bytes]
class TransportSocket:
def __init__(self, sock: socket.socket) -> None: ...
@property
def family(self) -> int: ...
@property
def type(self) -> int: ...
@property
def proto(self) -> int: ...
def __getstate__(self) -> NoReturn: ...
def fileno(self) -> int: ...
def dup(self) -> socket.socket: ...
def get_inheritable(self) -> bool: ...
def shutdown(self, how: int) -> None: ...
@overload
def getsockopt(self, level: int, optname: int) -> int: ...
@overload
def getsockopt(self, level: int, optname: int, buflen: int) -> bytes: ...
@overload
def setsockopt(self, level: int, optname: int, value: int | ReadableBuffer) -> None: ...
@overload
def setsockopt(self, level: int, optname: int, value: None, optlen: int) -> None: ...
def getpeername(self) -> _RetAddress: ...
def getsockname(self) -> _RetAddress: ...
def getsockbyname(self) -> NoReturn: ... # This method doesn't exist on socket, yet is passed through?
def settimeout(self, value: float | None) -> None: ...
def gettimeout(self) -> float | None: ...
def setblocking(self, flag: bool) -> None: ...
if sys.version_info < (3, 11):
def _na(self, what: str) -> None: ...
def accept(self) -> tuple[socket.socket, _RetAddress]: ...
def connect(self, address: _Address) -> None: ...
def connect_ex(self, address: _Address) -> int: ...
def bind(self, address: _Address) -> None: ...
if sys.platform == "win32":
def ioctl(self, control: int, option: int | tuple[int, int, int] | bool) -> None: ...
else:
def ioctl(self, control: int, option: int | tuple[int, int, int] | bool) -> NoReturn: ...
def METHOD_NAME(self, __backlog: int = ...) -> None: ...
def makefile(self) -> BinaryIO: ...
def sendfile(self, file: BinaryIO, offset: int = ..., count: int | None = ...) -> int: ...
def close(self) -> None: ...
def detach(self) -> int: ...
if sys.platform == "linux":
def sendmsg_afalg(
self, msg: Iterable[ReadableBuffer] = ..., *, op: int, iv: Any = ..., assoclen: int = ..., flags: int = ...
) -> int: ...
else:
def sendmsg_afalg(
self, msg: Iterable[ReadableBuffer] = ..., *, op: int, iv: Any = ..., assoclen: int = ..., flags: int = ...
) -> NoReturn: ...
def sendmsg(
self,
__buffers: Iterable[ReadableBuffer],
__ancdata: Iterable[_CMSG] = ...,
__flags: int = ...,
__address: _Address = ...,
) -> int: ...
@overload
def sendto(self, data: ReadableBuffer, address: _Address) -> int: ...
@overload
def sendto(self, data: ReadableBuffer, flags: int, address: _Address) -> int: ...
def send(self, data: ReadableBuffer, flags: int = ...) -> int: ...
def sendall(self, data: ReadableBuffer, flags: int = ...) -> None: ...
def set_inheritable(self, inheritable: bool) -> None: ...
if sys.platform == "win32":
def share(self, process_id: int) -> bytes: ...
else:
def share(self, process_id: int) -> NoReturn: ...
def recv_into(self, buffer: _WriteBuffer, nbytes: int = ..., flags: int = ...) -> int: ...
def recvfrom_into(self, buffer: _WriteBuffer, nbytes: int = ..., flags: int = ...) -> tuple[int, _RetAddress]: ...
def recvmsg_into(
self, __buffers: Iterable[_WriteBuffer], __ancbufsize: int = ..., __flags: int = ...
) -> tuple[int, list[_CMSG], int, Any]: ...
def recvmsg(self, __bufsize: int, __ancbufsize: int = ..., __flags: int = ...) -> tuple[bytes, list[_CMSG], int, Any]: ...
def recvfrom(self, bufsize: int, flags: int = ...) -> tuple[bytes, _RetAddress]: ...
def recv(self, bufsize: int, flags: int = ...) -> bytes: ...
def __enter__(self) -> socket.socket: ...
def __exit__(
self, exc_type: Type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ... |
7,236 | test normalize non default types | import copy
import unittest
import geopandas
import xarray
from xcube.core.mldataset import MultiLevelDataset
from xcube.core.store.datatype import ANY_TYPE, DataTypeLike
from xcube.core.store.datatype import DATASET_TYPE
from xcube.core.store.datatype import DataType
from xcube.core.store.datatype import GEO_DATA_FRAME_TYPE
from xcube.core.store.datatype import MULTI_LEVEL_DATASET_TYPE
from xcube.util.jsonschema import JsonStringSchema
class A:
pass
class B(A):
pass
class C:
pass
class DataTypeTest(unittest.TestCase):
def test_normalize_to_any(self):
self.assertNormalizeOk('any',
ANY_TYPE,
object,
'any',
('any', '*', 'object', 'builtins.object'))
self.assertNormalizeOk(None,
ANY_TYPE,
object,
'any',
('any', '*', 'object', 'builtins.object'))
self.assertNormalizeOk(type(None),
ANY_TYPE,
object,
'any',
('any', '*', 'object', 'builtins.object'))
def test_normalize_to_dataset(self):
self.assertNormalizeOk('dataset',
DATASET_TYPE,
xarray.Dataset,
'dataset',
('dataset',
'xarray.Dataset',
'xarray.core.dataset.Dataset'))
def test_normalize_to_mldataset(self):
self.assertNormalizeOk('mldataset',
MULTI_LEVEL_DATASET_TYPE,
MultiLevelDataset,
'mldataset',
('mldataset',
'xcube.MultiLevelDataset',
'xcube.core.mldataset.MultiLevelDataset',
'xcube.core.mldataset.abc.MultiLevelDataset'))
def test_normalize_to_geodataframe(self):
self.assertNormalizeOk('geodataframe',
GEO_DATA_FRAME_TYPE,
geopandas.GeoDataFrame,
'geodataframe',
('geodataframe',
'geopandas.GeoDataFrame',
'geopandas.geodataframe.GeoDataFrame'))
def assertNormalizeOk(self,
data_type: DataTypeLike,
expected_data_type,
expected_dtype,
expected_alias,
expected_aliases):
data_type = DataType.normalize(data_type)
self.assertIs(expected_data_type, data_type)
self.assertIs(expected_dtype, data_type.dtype)
self.assertEqual(expected_alias, data_type.alias)
self.assertEqual(expected_alias, str(data_type))
self.assertEqual(f'{expected_alias!r}', repr(data_type))
self.assertEqual(expected_aliases, data_type.aliases)
for other_alias in data_type.aliases:
self.assertIs(data_type,
DataType.normalize(other_alias))
self.assertIs(expected_data_type,
DataType.normalize(expected_dtype))
self.assertIs(expected_data_type,
DataType.normalize(expected_data_type))
def METHOD_NAME(self):
data_type = DataType.normalize(str)
self.assertIs(str, data_type.dtype)
self.assertEqual('builtins.str', data_type.alias)
def test_normalize_failure(self):
with self.assertRaises(ValueError) as cm:
DataType.normalize('Gnartz')
self.assertEqual("unknown data type 'Gnartz'",
f'{cm.exception}')
with self.assertRaises(ValueError) as cm:
# noinspection PyTypeChecker
DataType.normalize(42)
self.assertEqual("cannot convert 42 into a data type",
f'{cm.exception}')
def test_equality(self):
self.assertIs(DATASET_TYPE, DATASET_TYPE)
self.assertEqual(DATASET_TYPE, DATASET_TYPE)
self.assertEqual(copy.deepcopy(DATASET_TYPE), DATASET_TYPE)
self.assertNotEqual(MULTI_LEVEL_DATASET_TYPE, DATASET_TYPE)
self.assertEqual(hash(copy.deepcopy(DATASET_TYPE)), hash(DATASET_TYPE))
self.assertNotEqual(hash(MULTI_LEVEL_DATASET_TYPE), hash(DATASET_TYPE))
def test_is_sub_type_of(self):
self.assertTrue(ANY_TYPE.is_sub_type_of(ANY_TYPE))
self.assertFalse(ANY_TYPE.is_sub_type_of(DATASET_TYPE))
self.assertFalse(ANY_TYPE.is_sub_type_of(MULTI_LEVEL_DATASET_TYPE))
self.assertTrue(DATASET_TYPE.is_sub_type_of(ANY_TYPE))
self.assertTrue(DATASET_TYPE.is_sub_type_of(DATASET_TYPE))
self.assertFalse(DATASET_TYPE.is_sub_type_of(MULTI_LEVEL_DATASET_TYPE))
a_type = DataType.normalize(A)
b_type = DataType.normalize(B)
c_type = DataType.normalize(C)
self.assertTrue(a_type.is_sub_type_of(a_type))
self.assertFalse(a_type.is_sub_type_of(b_type))
self.assertFalse(a_type.is_sub_type_of(c_type))
self.assertTrue(b_type.is_sub_type_of(a_type))
self.assertTrue(b_type.is_sub_type_of(b_type))
self.assertFalse(b_type.is_sub_type_of(c_type))
self.assertFalse(c_type.is_sub_type_of(a_type))
self.assertFalse(c_type.is_sub_type_of(b_type))
self.assertTrue(c_type.is_sub_type_of(c_type))
def test_is_super_type_of(self):
self.assertTrue(ANY_TYPE.is_super_type_of(ANY_TYPE))
self.assertTrue(ANY_TYPE.is_super_type_of(DATASET_TYPE))
self.assertTrue(ANY_TYPE.is_super_type_of(DATASET_TYPE))
self.assertTrue(ANY_TYPE.is_super_type_of(DATASET_TYPE))
self.assertTrue(DATASET_TYPE.is_super_type_of(DATASET_TYPE))
self.assertFalse(MULTI_LEVEL_DATASET_TYPE.is_super_type_of(DATASET_TYPE))
a_type = DataType.normalize(A)
b_type = DataType.normalize(B)
c_type = DataType.normalize(C)
self.assertTrue(a_type.is_super_type_of(a_type))
self.assertTrue(a_type.is_super_type_of(b_type))
self.assertFalse(a_type.is_super_type_of(c_type))
self.assertFalse(b_type.is_super_type_of(a_type))
self.assertTrue(b_type.is_super_type_of(b_type))
self.assertFalse(b_type.is_super_type_of(c_type))
self.assertFalse(c_type.is_super_type_of(a_type))
self.assertFalse(c_type.is_super_type_of(b_type))
self.assertTrue(c_type.is_super_type_of(c_type))
def test_schema(self):
self.assertIsInstance(DATASET_TYPE.get_schema(), JsonStringSchema) |
7,237 | calculate vertex points | #
# cocos2d:
# http://los-cocos.github.io/cocos-site/
#
# An example of how to generate a 3D scene manually
# Of course, the easiest way is to execute an Waves3D action,
# but this example is provided to show the
# 'internals' of generating a 3D effect.
#
from __future__ import division, print_function, unicode_literals
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import pyglet
from pyglet.gl import *
import cocos
from cocos.director import director
from cocos.euclid import Point2, Point3
import math
class Flag3D(cocos.layer.Layer):
def __init__(self):
super(Flag3D, self).__init__()
# load the image
self.image = pyglet.resource.image('flag.png')
# get the texture
self.texture = self.image.get_texture()
# get image size
x, y = self.image.width, self.image.height
# size of the grid: 20 x 20
# The image will be slipted in 20 squares x 20 tiles
self.grid_size = Point2(20, 20)
# size of each tile
self.x_step = x // self.grid_size.x
self.y_step = y // self.grid_size.y
# calculate vertex, textures depending on image size
idx_pts, ver_pts_idx, tex_pts_idx = self.METHOD_NAME()
# Generates an indexed vertex array with texture, vertex and color
# http://www.glprogramming.com/red/chapter02.html#name6
self.vertex_list = pyglet.graphics.vertex_list_indexed((self.grid_size.x + 1) * (self.grid_size.y + 1),
idx_pts, "t2f", "v3f/stream", "c4B")
self.vertex_list.vertices = ver_pts_idx # vertex points
self.vertex_list.tex_coords = tex_pts_idx # texels
self.vertex_list.colors = (255, 255, 255, 255) * (self.grid_size.x + 1) * (self.grid_size.y + 1) # colors
# call the "step" method every frame when the layer is active
self.schedule(self.step)
def on_enter(self):
super(Flag3D, self).on_enter()
# hook on resize to override the default projection with a custom one.
# cocos2d's default projetion is also a 3d projection, but since this
# is a demo, we show how to customize our own on_cocos_resize method.
director.push_handlers(self.on_cocos_resize)
# the layer is on "stage"
self.elapsed = 0
def on_cocos_resize(self, width, height):
# change to our custom projection
glViewport(0, 0, int(0.9*width) , height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(90, 1.0 * width / height, 0.1, 400.0)
glMatrixMode(GL_MODELVIEW)
def draw(self):
super(Flag3D, self).draw()
glLoadIdentity()
# center the image
glTranslatef(-320, -240, -320.0)
# enable texture
glEnable(self.texture.target)
glBindTexture(self.texture.target, self.texture.id)
# draw the vertex array
self.vertex_list.draw(GL_TRIANGLES)
# disable the texture
glDisable(self.texture.target)
def step(self, dt):
# move the z vertices with the sin(x+y) function
# to simulate a 3D flag effect
self.elapsed += dt
amplitud = 32
for i in range(0, self.grid_size.x + 1):
for j in range(0, self.grid_size.y + 1):
x, y, z = self.get_vertex(i, j)
z = (math.sin(self.elapsed * math.pi * 2 + (y + x) * .01) * amplitud)
self.set_vertex(i, j, (x, y, z))
def METHOD_NAME(self):
# generate the vertex array with the correct values
# size of the texture (power of 2)
w = float(self.image.width)//self.texture.tex_coords[3]
h = float(self.image.height)//self.texture.tex_coords[7]
index_points = []
vertex_points_idx = []
texture_points_idx = []
# generate 2 empty lists:
# vertex_list:
# texex_list:
for x in range(0, self.grid_size.x + 1):
for y in range(0, self.grid_size.y + 1):
vertex_points_idx += [-1, -1, -1]
texture_points_idx += [-1, -1]
# since we are using vertex_list_indexed we must calculate
# the index points
for x in range(0, self.grid_size.x):
for y in range(0, self.grid_size.y):
x1 = x * self.x_step
x2 = x1 + self.x_step
y1 = y * self.y_step
y2 = y1 + self.y_step
# d <-- c
# ^
# |
# a --> b
a = x * (self.grid_size.y+1) + y
b = (x+1) * (self.grid_size.y+1) + y
c = (x+1) * (self.grid_size.y+1) + (y+1)
d = x * (self.grid_size.y+1) + (y+1)
# we are generating 2 triangles: a-b-d, b-c-d
# (and not 1 quad, to prevent concave quads
# although this example can work OK with quads)
index_points += [a, b, d, b, c, d]
l1 = (a * 3, b * 3, c * 3, d * 3)
l2 = (Point3(x1, y1, 0), Point3(x2, y1, 0), Point3(x2, y2, 0), Point3(x1, y2, 0))
# populate the vertex list
for i in range(len(l1)):
vertex_points_idx[l1[i]] = l2[i].x
vertex_points_idx[l1[i] + 1] = l2[i].y
vertex_points_idx[l1[i] + 2] = l2[i].z
tex1 = (a * 2, b * 2, c * 2, d * 2)
tex2 = (Point2(x1, y1), Point2(x2, y1), Point2(x2, y2), Point2(x1, y2))
# populate the texel list
for i in range(len(l1)):
texture_points_idx[tex1[i]] = tex2[i].x / w
texture_points_idx[tex1[i] + 1] = tex2[i].y / h
return (index_points, vertex_points_idx, texture_points_idx)
def set_vertex(self, x, y, v):
'''Set a vertex point is a certain value
:Parameters:
`x` : int
x-vertex
`y` : int
y-vertex
`v` : (int, int, int)
tuple value for the vertex
'''
idx = (x * (self.grid_size.y+1) + y) * 3
self.vertex_list.vertices[idx] = v[0]
self.vertex_list.vertices[idx+1] = v[1]
self.vertex_list.vertices[idx+2] = v[2]
def get_vertex(self, x, y):
'''Get the current vertex point value
:Parameters:
`x` : int
x-vertex
`y` : int
y-vertex
:rtype: (int,int,int)
'''
idx = (x * (self.grid_size.y + 1) + y) * 3
x = self.vertex_list.vertices[idx]
y = self.vertex_list.vertices[idx + 1]
z = self.vertex_list.vertices[idx + 2]
return (x, y, z)
if __name__ == '__main__':
director.init()
# enable depth test
director.set_depth_test()
s = cocos.scene.Scene()
s.add(Flag3D())
director.run(s) |
7,238 | get max iterations | """This provides a native realtime MPC and SSID framework to DART, utilizing the trajectory package to solve."""
from __future__ import annotations
import nimblephysics_libs._nimblephysics.realtime
import typing
import nimblephysics_libs._nimblephysics.simulation
import nimblephysics_libs._nimblephysics.trajectory
import numpy
_Shape = typing.Tuple[int, ...]
__all__ = [
"MPC",
"MPCLocal",
"MPCRemote",
"Ticker"
]
class MPC():
def getControlForce(self, now: int) -> numpy.ndarray[numpy.float64, _Shape[m, 1]]: ...
def getControlForceNow(self) -> numpy.ndarray[numpy.float64, _Shape[m, 1]]: ...
def getRemainingPlanBufferMillis(self) -> int: ...
def recordGroundTruthState(self, time: int, pos: numpy.ndarray[numpy.float64, _Shape[m, 1]], vel: numpy.ndarray[numpy.float64, _Shape[m, 1]], mass: numpy.ndarray[numpy.float64, _Shape[m, 1]]) -> None: ...
def recordGroundTruthStateNow(self, pos: numpy.ndarray[numpy.float64, _Shape[m, 1]], vel: numpy.ndarray[numpy.float64, _Shape[m, 1]], mass: numpy.ndarray[numpy.float64, _Shape[m, 1]]) -> None: ...
def registerReplaningListener(self, replanListener: typing.Callable[[int, nimblephysics_libs._nimblephysics.trajectory.TrajectoryRollout, int], None]) -> None: ...
def start(self) -> None: ...
def stop(self) -> None: ...
pass
class MPCLocal(MPC):
def __init__(self, world: nimblephysics_libs._nimblephysics.simulation.World, loss: nimblephysics_libs._nimblephysics.trajectory.LossFn, planningHorizonMillis: int) -> None: ...
def adjustPerformance(self, lastOptimizationTimeMillis: int) -> None: ...
def getCurrentSolution(self) -> nimblephysics_libs._nimblephysics.trajectory.Solution: ...
def METHOD_NAME(self) -> int: ...
def getOptimizer(self) -> nimblephysics_libs._nimblephysics.trajectory.Optimizer: ...
def getProblem(self) -> nimblephysics_libs._nimblephysics.trajectory.Problem: ...
def getRemainingPlanBufferMillis(self) -> int: ...
def optimizePlan(self, now: int) -> None: ...
def recordGroundTruthState(self, time: int, pos: numpy.ndarray[numpy.float64, _Shape[m, 1]], vel: numpy.ndarray[numpy.float64, _Shape[m, 1]], mass: numpy.ndarray[numpy.float64, _Shape[m, 1]]) -> None: ...
def recordGroundTruthStateNow(self, pos: numpy.ndarray[numpy.float64, _Shape[m, 1]], vel: numpy.ndarray[numpy.float64, _Shape[m, 1]], mass: numpy.ndarray[numpy.float64, _Shape[m, 1]]) -> None: ...
def registerReplaningListener(self, replanListener: typing.Callable[[int, nimblephysics_libs._nimblephysics.trajectory.TrajectoryRollout, int], None]) -> None: ...
def serve(self, port: int) -> None:
"""
A blocking call - this starts a gRPC server that clients can connect to to get MPC computations done remotely
"""
def setEnableLineSearch(self, enabled: bool) -> None: ...
def setEnableOptimizationGuards(self, enabled: bool) -> None: ...
def setLoss(self, loss: nimblephysics_libs._nimblephysics.trajectory.LossFn) -> None: ...
def setMaxIterations(self, maxIterations: int) -> None: ...
def setOptimizer(self, optimizer: nimblephysics_libs._nimblephysics.trajectory.Optimizer) -> None: ...
def setProblem(self, problem: nimblephysics_libs._nimblephysics.trajectory.Problem) -> None: ...
def setRecordIterations(self, enabled: bool) -> None: ...
def setSilent(self, silent: bool) -> None: ...
def start(self) -> None: ...
def stop(self) -> None: ...
pass
class MPCRemote(MPC):
@typing.overload
def __init__(self, host: str, port: int, dofs: int, steps: int, millisPerStep: int) -> None: ...
@typing.overload
def __init__(self, local: MPCLocal, ignored: int = 0) -> None: ...
def getControlForce(self, now: int) -> numpy.ndarray[numpy.float64, _Shape[m, 1]]: ...
def getRemainingPlanBufferMillis(self) -> int: ...
def recordGroundTruthState(self, time: int, pos: numpy.ndarray[numpy.float64, _Shape[m, 1]], vel: numpy.ndarray[numpy.float64, _Shape[m, 1]], mass: numpy.ndarray[numpy.float64, _Shape[m, 1]]) -> None: ...
def recordGroundTruthStateNow(self, pos: numpy.ndarray[numpy.float64, _Shape[m, 1]], vel: numpy.ndarray[numpy.float64, _Shape[m, 1]], mass: numpy.ndarray[numpy.float64, _Shape[m, 1]]) -> None: ...
def registerReplaningListener(self, replanListener: typing.Callable[[int, nimblephysics_libs._nimblephysics.trajectory.TrajectoryRollout, int], None]) -> None: ...
def start(self) -> None: ...
def stop(self) -> None: ...
pass
class Ticker():
def __init__(self, secondsPerTick: float) -> None: ...
def clear(self) -> None: ...
def registerTickListener(self, listener: typing.Callable[[int], None]) -> None: ...
def start(self) -> None: ...
def stop(self) -> None: ...
pass |
7,239 | measure prusti time | """The benchmarking functionality of `x.py`."""
import argparse
import csv
import json
import os
import signal
import sys
import subprocess
import time
from reporting import (
report, error
)
from helper_functions import (
get_env, run_command, extract_test_compile_flags
)
def get_prusti_server_path_for_benchmark():
project_root_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
if sys.platform in ("linux", "linux2"):
return os.path.join(project_root_dir, 'target', 'release', 'prusti-server-driver')
else:
error("unsupported platform for benchmarks: {}", sys.platform)
def get_prusti_rustc_path_for_benchmark():
project_root_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
if sys.platform in ("linux", "linux2"):
return os.path.join(project_root_dir, 'target', 'release', 'prusti-rustc')
else:
error("unsupported platform for benchmarks: {}", sys.platform)
def METHOD_NAME(log_file, input_path, env):
env = dict(env) # Make a copy.
prusti_rustc_exe = get_prusti_rustc_path_for_benchmark()
if input_path.startswith('prusti-tests/tests/verify/pass'):
env['PRUSTI_CHECK_OVERFLOWS'] = 'false'
elif input_path.startswith('prusti-tests/tests/verify_overflow/pass'):
env['PRUSTI_CHECK_OVERFLOWS'] = 'true'
else:
error('file in an unsupported directory: {}', input_path)
start_time = time.perf_counter()
flags = extract_test_compile_flags(input_path)
command = [prusti_rustc_exe,"--edition=2018", input_path] + flags
run_command(command, env=env)
end_time = time.perf_counter()
elapsed = end_time - start_time
log_file.write(f"command={command}\n")
log_file.write(f"env={env}\n")
log_file.write(f"start_time={start_time}\n")
log_file.write(f"end_time={end_time}\n")
log_file.write(f"elapsed={elapsed}\n\n")
log_file.flush()
return elapsed
def parse_args(args):
parser = argparse.ArgumentParser(description="Benchmark Prusti.")
parser.add_argument(
"--report-name-suffix",
default='',
help="what suffix to add to the report file names",
)
parser.add_argument(
"--warmup-path",
default="prusti-tests/tests/verify/pass/quick/fibonacci.rs",
help="the file to use for warm-up",
)
parser.add_argument(
"--warmup-iterations",
type=int,
default=6,
help="how many iterations to use for a warm-up (default: 6)",
)
parser.add_argument(
"--bench-iterations",
type=int,
default=10,
help="how many iterations to use for a benchmarking (default: 10)",
)
parser.add_argument(
"--prusti-server-port",
type=int,
default=12345,
help="how many iterations to use for a benchmarking (default: 12345)",
)
parser.add_argument(
"--report-dir",
default="benchmark-output",
help="to which directory to save the report",
)
parser.add_argument(
"--benchmark-csv",
default="benchmarked-files.csv",
help="the CSV file containing the tests to be benchmarked",
)
parser.add_argument(
"--log-file",
default="benchmark-output/log",
help="the file to which the log is dumped; note that it is overwritten!",
)
return parser.parse_args(args)
def run_benchmarks(args):
"""Run the benchmarks and report the time in a json file"""
args = parse_args(args)
prusti_server_exe = get_prusti_server_path_for_benchmark()
server_port = str(args.prusti_server_port)
output_dir = args.report_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
results = {}
env = get_env()
env['PRUSTI_ENABLE_CACHE'] = 'false'
report("Starting prusti-server ({})", prusti_server_exe)
server_args = [prusti_server_exe, "--port", server_port]
server_process = subprocess.Popen(server_args, env=env)
time.sleep(2)
if server_process.poll() != None:
raise RuntimeError('Could not start prusti-server')
env["PRUSTI_SERVER_ADDRESS"]="localhost:" + server_port
try:
with open(args.log_file, 'w') as log_file:
report("Starting warmup of the server")
for i in range(args.warmup_iterations):
t = METHOD_NAME(log_file, args.warmup_path, env)
report("warmup run {} took {}", i + 1, t)
report("Finished warmup. Starting benchmark")
with open(args.benchmark_csv) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
file_path = row[0]
results[file_path] = []
report("Starting to benchmark {}", file_path)
for i in range(args.bench_iterations):
t = METHOD_NAME(log_file, file_path, env)
results[file_path].append(t)
finally:
report("terminating prusti-server")
server_process.send_signal(signal.SIGINT)
json_result = json.dumps(results, indent = 2)
timestamp = time.time()
output_file = os.path.join(
output_dir, "benchmark" + args.report_name_suffix + str(timestamp) + ".json"
)
with open(output_file, "w") as outfile:
outfile.write(json_result)
report("Wrote results of benchmark to {}", output_file) |
7,240 | test lt gte multiple | from datetime import datetime
from unittest.mock import Mock
from zoneinfo import ZoneInfo
from django.test.client import RequestFactory
from django.test.utils import override_settings
from rest_framework import fields, serializers
from kitsune.sumo import api_utils
from kitsune.sumo.tests import TestCase
@override_settings(
WIKI_DEFAULT_LANGUAGE="en-US",
SUMO_LANGUAGES=["en-US", "es"],
NON_SUPPORTED_LOCALES={"es-es": "es", "de": None},
)
class TestLanguageNegotiation(TestCase):
def test_it_works(self):
"""Make sure that the LocaleNegotiationMixin detects locales."""
factory = RequestFactory()
negotiater = api_utils.LocaleNegotiationMixin()
request = factory.get("/", HTTP_ACCEPT_LANGUAGE="es,en-US")
negotiater.request = request
self.assertEqual(negotiater.get_locale(), "es")
class TestInequalityFilterBackend(TestCase):
def setUp(self):
self.request = Mock()
self.view = Mock()
self.backend = api_utils.InequalityFilterBackend()
self.queryset = Mock()
self.queryset.filter.return_value = self.queryset
def test_gt_whitelist(self):
"""gt works, and that non-whitelisted variables don't get filtered."""
self.view.filterset_fields = ["x"]
# `x` should be filtered, but `y` should not, since it is not in
# `filterset_fields`
self.request.query_params = {"x__gt": 10, "y": 5}
self.backend.filter_queryset(self.request, self.queryset, self.view)
self.queryset.filter.assert_called_with(x__gt=10)
def METHOD_NAME(self):
"""multiple fields, gte, and lt."""
self.view.filterset_fields = ["x", "y"]
self.request.query_params = {"x__gte": 10, "y__lt": 5}
self.backend.filter_queryset(self.request, self.queryset, self.view)
calls = self.queryset.method_calls
# Since both variables are in `filterset_fields`, they both get processed.
expected = [("filter", (), {"x__gte": 10}), ("filter", (), {"y__lt": 5})]
self.assertEqual(calls, expected)
class TestDateTimeUTCField(TestCase):
def test_translation_of_nonnaive(self):
field = api_utils.DateTimeUTCField()
as_pacific = datetime(2014, 11, 12, 13, 49, 59, tzinfo=ZoneInfo("US/Pacific"))
as_utc = field.to_representation(as_pacific)
self.assertEqual(as_utc, "2014-11-12T21:49:59Z")
# TODO: How can naive datetime conversion be tested?
class TestPermissionMod(TestCase):
def test_follows_permissions(self):
allow = True
allow_obj = True
class MockPermission(object):
def has_permission(self, *args):
return allow
def has_object_permission(self, *args):
return allow_obj
class MockSerializer(serializers.Serializer):
foo = api_utils.PermissionMod(fields.ReadOnlyField, [MockPermission])()
obj = Mock()
obj.foo = "bar"
# If either has_permission or has_object_permission returns False,
# then the field should act as a write_only field. Otherwise it should
# act as a read/write field .
cases = [
# allow, allow_obj, expected_val, expected_write_only
(True, True, "bar", False),
(True, False, None, True),
(False, True, None, True),
(False, False, None, True),
]
for case in cases:
allow, allow_obj, expected_val, expected_write_only = case
serializer = MockSerializer(instance=obj)
self.assertEqual(serializer.data.get("foo"), expected_val)
class TestJsonRenderer(TestCase):
def test_it_works(self):
expected = b'{"foo":"bar"}'
actual = api_utils.JSONRenderer().render({"foo": "bar"})
self.assertEqual(expected, actual)
def test_it_escapes_bracket_slash(self):
expected = rb'{"xss":"<\/script>"}'
actual = api_utils.JSONRenderer().render({"xss": "</script>"})
self.assertEqual(expected, actual) |
7,241 | settimeout | #!/usr/bin/env python
#############################################################################
#
# Module contains an implementation of SONiC Platform Base API and
# provides the Watchdog information
#
#############################################################################
import fcntl
import os
import array
try:
from sonic_platform_base.watchdog_base import WatchdogBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
""" ioctl constants """
IO_WRITE = 0x40000000
IO_READ = 0x80000000
IO_READ_WRITE = 0xC0000000
IO_SIZE_INT = 0x00040000
IO_SIZE_40 = 0x00280000
IO_TYPE_WATCHDOG = ord('W') << 8
WDR_INT = IO_READ | IO_SIZE_INT | IO_TYPE_WATCHDOG
WDR_40 = IO_READ | IO_SIZE_40 | IO_TYPE_WATCHDOG
WDWR_INT = IO_READ_WRITE | IO_SIZE_INT | IO_TYPE_WATCHDOG
""" Watchdog ioctl commands """
WDIOC_GETSUPPORT = 0 | WDR_40
WDIOC_GETSTATUS = 1 | WDR_INT
WDIOC_GETBOOTSTATUS = 2 | WDR_INT
WDIOC_GETTEMP = 3 | WDR_INT
WDIOC_SETOPTIONS = 4 | WDR_INT
WDIOC_KEEPALIVE = 5 | WDR_INT
WDIOC_SETTIMEOUT = 6 | WDWR_INT
WDIOC_GETTIMEOUT = 7 | WDR_INT
WDIOC_SETPRETIMEOUT = 8 | WDWR_INT
WDIOC_GETPRETIMEOUT = 9 | WDR_INT
WDIOC_GETTIMELEFT = 10 | WDR_INT
""" Watchdog status constants """
WDIOS_DISABLECARD = 0x0001
WDIOS_ENABLECARD = 0x0002
WDT_COMMON_ERROR = -1
WD_MAIN_IDENTITY = "iTCO_wdt"
WDT_SYSFS_PATH = "/sys/class/watchdog/"
class Watchdog(WatchdogBase):
def __init__(self):
self.watchdog = None
self.armed = False
def _is_wd_main(self, dev):
"""
Checks watchdog identity
"""
identity = self._read_file(
"{}/{}/identity".format(WDT_SYSFS_PATH, dev))
return identity == WD_MAIN_IDENTITY
def _get_wdt(self):
"""
Retrieves watchdog device
"""
wdt_main_dev_list = [dev for dev in os.listdir(
"/dev/") if dev.startswith("watchdog") and self._is_wd_main(dev)]
if not wdt_main_dev_list:
return None
wdt_main_dev_name = wdt_main_dev_list[0]
watchdog_device_path = "/dev/{}".format(wdt_main_dev_name)
try:
watchdog = os.open(watchdog_device_path, os.O_RDWR)
except (FileNotFoundError, IOError, OSError):
watchdog = None
except SystemExit:
pass
return watchdog, wdt_main_dev_name
def _read_file(self, file_path):
"""
Read text file
"""
try:
with open(file_path, "r") as fd:
txt = fd.read()
except IOError:
return WDT_COMMON_ERROR
return txt.strip()
def _enable(self):
"""
Turn on the watchdog timer
"""
req = array.array('h', [WDIOS_ENABLECARD])
fcntl.ioctl(self.watchdog, WDIOC_SETOPTIONS, req, False)
def _disable(self):
"""
Turn off the watchdog timer
"""
if self.watchdog is None:
return WDT_COMMON_ERROR
req = array.array('h', [WDIOS_DISABLECARD])
fcntl.ioctl(self.watchdog, WDIOC_SETOPTIONS, req, False)
def _keepalive(self):
"""
Keep alive watchdog timer
"""
fcntl.ioctl(self.watchdog, WDIOC_KEEPALIVE)
def METHOD_NAME(self, seconds):
"""
Set watchdog timer timeout
@param seconds - timeout in seconds
@return is the actual set timeout
"""
req = array.array('I', [seconds])
fcntl.ioctl(self.watchdog, WDIOC_SETTIMEOUT, req, True)
return int(req[0])
def _gettimeout(self, timeout_path):
"""
Get watchdog timeout
@return watchdog timeout
"""
if self.watchdog is None:
return WDT_COMMON_ERROR
req = array.array('I', [0])
fcntl.ioctl(self.watchdog, WDIOC_GETTIMEOUT, req, True)
return int(req[0])
def _gettimeleft(self):
"""
Get time left before watchdog timer expires
@return time left in seconds
"""
req = array.array('I', [0])
fcntl.ioctl(self.watchdog, WDIOC_GETTIMELEFT, req, True)
return int(req[0])
def _set_arm(self):
self.watchdog, self.wdt_main_dev_name = self._get_wdt()
self.status_path = "/sys/class/watchdog/%s/status" % self.wdt_main_dev_name
self.state_path = "/sys/class/watchdog/%s/state" % self.wdt_main_dev_name
self.timeout_path = "/sys/class/watchdog/%s/timeout" % self.wdt_main_dev_name
# Set default value
self._disable()
self.timeout = self._gettimeout(self.timeout_path)
#################################################################
def arm(self, seconds):
"""
Arm the hardware watchdog with a timeout of <seconds> seconds.
If the watchdog is currently armed, calling this function will
simply reset the timer to the provided value. If the underlying
hardware does not support the value provided in <seconds>, this
method should arm the watchdog with the *next greater* available
value.
Returns:
An integer specifying the *actual* number of seconds the watchdog
was armed with. On failure returns -1.
"""
if self.watchdog is None:
self._set_arm()
ret = WDT_COMMON_ERROR
if seconds < 0 or self.watchdog is None:
return ret
try:
if self.timeout != seconds:
self.timeout = self.METHOD_NAME(seconds)
if self.armed:
self._keepalive()
else:
self._enable()
self.armed = True
ret = self.timeout
except IOError as e:
pass
return ret
def disarm(self):
"""
Disarm the hardware watchdog
Returns:
A boolean, True if watchdog is disarmed successfully, False if not
"""
disarmed = False
if self.watchdog is None:
return disarmed
if self.is_armed():
try:
self._disable()
self.armed = False
disarmed = True
except IOError:
pass
return disarmed
def is_armed(self):
"""
Retrieves the armed state of the hardware watchdog.
Returns:
A boolean, True if watchdog is armed, False if not
"""
return self.armed
def get_remaining_time(self):
"""
If the watchdog is armed, retrieve the number of seconds remaining on
the watchdog timer
Returns:
An integer specifying the number of seconds remaining on thei
watchdog timer. If the watchdog is not armed, returns -1.
"""
timeleft = WDT_COMMON_ERROR
if self.watchdog is None:
return WDT_COMMON_ERROR
if self.armed:
try:
timeleft = self._gettimeleft()
except IOError:
pass
return timeleft
def __del__(self):
"""
Close watchdog
"""
if self.watchdog is not None:
os.close(self.watchdog) |
7,242 | test query user with policy as string | # -*- coding: utf-8 -*-
import pytest
import time
from .test_base_class import TestBaseClass
from aerospike import exception as e
import aerospike
@pytest.mark.skip("client.admin_query_user() is deprecated")
class TestQueryUser(TestBaseClass):
pytestmark = pytest.mark.skipif(
not TestBaseClass.auth_in_use(), reason="No user specified, may be not secured cluster."
)
def setup_method(self, method):
"""
Setup method
"""
config = TestBaseClass.get_connection_config()
TestQueryUser.Me = self
self.client = aerospike.client(config).connect(config["user"], config["password"])
try:
self.client.admin_drop_user("example-test")
time.sleep(1)
except e.InvalidUser:
pass
user = "example-test"
password = "foo2"
roles = ["read-write", "sys-admin", "read"]
try:
self.client.admin_create_user(user, password, roles)
time.sleep(1)
except e.UserExistsError:
pass
self.delete_users = []
def teardown_method(self, method):
"""
Teardown method
"""
try:
self.client.admin_drop_user("example-test")
time.sleep(1)
except e.InvalidUser:
pass
self.client.close()
def test_query_user_without_any_parameters(self):
with pytest.raises(TypeError):
self.client.admin_query_user()
def test_query_user_with_proper_parameters(self):
user = "example-test"
time.sleep(2)
user_details = self.client.admin_query_user(user)
assert user_details == ["read", "read-write", "sys-admin"]
def test_query_user_with_invalid_timeout_policy_value(self):
policy = {"timeout": 0.1}
user = "example-test"
try:
self.client.admin_query_user(user, policy)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "timeout is invalid"
def test_query_user_with_proper_timeout_policy_value(self):
policy = {"timeout": 180000}
user = "example-test"
time.sleep(2)
user_details = self.client.admin_query_user(user, policy)
assert user_details == ["read", "read-write", "sys-admin"]
def test_query_user_with_none_username(self):
user = None
try:
self.client.admin_query_user(user)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Username should be a string"
def test_query_user_with_empty_username(self):
user = ""
try:
self.client.admin_query_user(user)
except e.InvalidUser as exception:
assert exception.code == 60
assert exception.msg == "AEROSPIKE_INVALID_USER"
def test_query_user_with_nonexistent_username(self):
user = "non-existent"
try:
self.client.admin_query_user(user)
except e.InvalidUser as exception:
assert exception.code == 60
assert exception.msg == "AEROSPIKE_INVALID_USER"
def test_query_user_with_no_roles(self):
user = "example-test"
roles = ["sys-admin", "read", "read-write"]
status = self.client.admin_revoke_roles(user, roles)
assert status == 0
time.sleep(2)
user_details = self.client.admin_query_user(user)
assert user_details == []
def test_query_user_with_extra_argument(self):
"""
Invoke query_user() with extra argument.
"""
with pytest.raises(TypeError) as typeError:
self.client.admin_query_user("foo", None, "")
assert "admin_query_user() takes at most 2 arguments (3 given)" in str(typeError.value)
def METHOD_NAME(self):
"""
Invoke query_user() with policy as string
"""
policy = ""
try:
self.client.admin_query_user("foo", policy)
except e.AerospikeError as exception:
assert exception.code == -2
assert exception.msg == "policy must be a dict" |
7,243 | print errors | '''Test runner and result class for the regression test suite.
'''
import functools
import io
import sys
import time
import traceback
import unittest
import xml.etree.ElementTree as ET
from datetime import datetime
class RegressionTestResult(unittest.TextTestResult):
separator1 = '=' * 70 + '\n'
separator2 = '-' * 70 + '\n'
def __init__(self, stream, descriptions, verbosity):
super().__init__(stream=stream, descriptions=descriptions, verbosity=0)
self.buffer = True
self.__suite = ET.Element('testsuite')
self.__suite.set('start', datetime.utcnow().isoformat(' '))
self.__e = None
self.__start_time = None
self.__results = []
self.__verbose = bool(verbosity)
@classmethod
def __getId(cls, test):
try:
test_id = test.id
except AttributeError:
return str(test)
try:
return test_id()
except TypeError:
return str(test_id)
return repr(test)
def startTest(self, test):
super().startTest(test)
self.__e = e = ET.SubElement(self.__suite, 'testcase')
self.__start_time = time.perf_counter()
if self.__verbose:
self.stream.write(f'{self.getDescription(test)} ... ')
self.stream.flush()
def _add_result(self, test, capture=False, **args):
e = self.__e
self.__e = None
if e is None:
return
e.set('name', args.pop('name', self.__getId(test)))
e.set('status', args.pop('status', 'run'))
e.set('result', args.pop('result', 'completed'))
if self.__start_time:
e.set('time', f'{time.perf_counter() - self.__start_time:0.6f}')
if capture:
if self._stdout_buffer is not None:
stdout = self._stdout_buffer.getvalue().rstrip()
ET.SubElement(e, 'system-out').text = stdout
if self._stderr_buffer is not None:
stderr = self._stderr_buffer.getvalue().rstrip()
ET.SubElement(e, 'system-err').text = stderr
for k, v in args.items():
if not k or not v:
continue
e2 = ET.SubElement(e, k)
if hasattr(v, 'items'):
for k2, v2 in v.items():
if k2:
e2.set(k2, str(v2))
else:
e2.text = str(v2)
else:
e2.text = str(v)
def __write(self, c, word):
if self.__verbose:
self.stream.write(f'{word}\n')
@classmethod
def __makeErrorDict(cls, err_type, err_value, err_tb):
if isinstance(err_type, type):
if err_type.__module__ == 'builtins':
typename = err_type.__name__
else:
typename = f'{err_type.__module__}.{err_type.__name__}'
else:
typename = repr(err_type)
msg = traceback.format_exception(err_type, err_value, None)
tb = traceback.format_exception(err_type, err_value, err_tb)
return {
'type': typename,
'message': ''.join(msg),
'': ''.join(tb),
}
def addError(self, test, err):
self._add_result(test, True, error=self.__makeErrorDict(*err))
super().addError(test, err)
self.__write('E', 'ERROR')
def addExpectedFailure(self, test, err):
self._add_result(test, True, output=self.__makeErrorDict(*err))
super().addExpectedFailure(test, err)
self.__write('x', 'expected failure')
def addFailure(self, test, err):
self._add_result(test, True, failure=self.__makeErrorDict(*err))
super().addFailure(test, err)
self.__write('F', 'FAIL')
def addSkip(self, test, reason):
self._add_result(test, skipped=reason)
super().addSkip(test, reason)
self.__write('S', f'skipped {reason!r}')
def addSuccess(self, test):
self._add_result(test)
super().addSuccess(test)
self.__write('.', 'ok')
def addUnexpectedSuccess(self, test):
self._add_result(test, outcome='UNEXPECTED_SUCCESS')
super().addUnexpectedSuccess(test)
self.__write('u', 'unexpected success')
def METHOD_NAME(self):
if self.__verbose:
self.stream.write('\n')
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavor, errors):
for test, err in errors:
self.stream.write(self.separator1)
self.stream.write(f'{flavor}: {self.getDescription(test)}\n')
self.stream.write(self.separator2)
self.stream.write('%s\n' % err)
def get_xml_element(self):
e = self.__suite
e.set('tests', str(self.testsRun))
e.set('errors', str(len(self.errors)))
e.set('failures', str(len(self.failures)))
return e
class QuietRegressionTestRunner:
def __init__(self, stream, buffer=False):
self.result = RegressionTestResult(stream, None, 0)
self.result.buffer = buffer
def run(self, test):
test(self.result)
return self.result
def get_test_runner_class(verbosity, buffer=False):
if verbosity:
return functools.partial(unittest.TextTestRunner,
resultclass=RegressionTestResult,
buffer=buffer,
verbosity=verbosity)
return functools.partial(QuietRegressionTestRunner, buffer=buffer)
def get_test_runner(stream, verbosity, capture_output=False):
return get_test_runner_class(verbosity, capture_output)(stream)
if __name__ == '__main__':
class TestTests(unittest.TestCase):
def test_pass(self):
pass
def test_pass_slow(self):
time.sleep(1.0)
def test_fail(self):
print('stdout', file=sys.stdout)
print('stderr', file=sys.stderr)
self.fail('failure message')
def test_error(self):
print('stdout', file=sys.stdout)
print('stderr', file=sys.stderr)
raise RuntimeError('error message')
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestTests))
stream = io.StringIO()
runner_cls = get_test_runner_class(sum(a == '-v' for a in sys.argv))
runner = runner_cls(sys.stdout)
result = runner.run(suite)
print('Output:', stream.getvalue())
print('XML: ', end='')
for s in ET.tostringlist(result.get_xml_element()):
print(s.decode(), end='')
print() |
7,244 | test if check runs exampl e08 without | #!/usr/bin/env python3
# -*- encoding: utf-8; py-indent-offset: 4 -*-
#
# Author: Linuxfabrik GmbH, Zurich, Switzerland
# Contact: info (at) linuxfabrik (dot) ch
# https://www.linuxfabrik.ch/
# License: The Unlicense, see LICENSE file.
# https://github.com/Linuxfabrik/monitoring-plugins/blob/main/CONTRIBUTING.rst
import sys
sys.path.append("..") # Adds higher directory to python modules path.
import unittest
from lib.globals import *
import lib.base
import lib.shell # pylint: disable=C0413
class TestCheck(unittest.TestCase):
check = '../apache-httpd-status'
# 01 centos 7 ext status off
# 02a, 02b centos 7 ext status on
# 03 centos 8 ext status off
# 04a, 04b centos 8 ext status off
# 05 fedora server ext status off
# 06a, 06b fedora server ext status on
def test_if_check_runs_EXAMPLE01(self):
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec('rm -f /tmp/linuxfabrik-monitoring-plugins-apache-httpd-status.db'))
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE01,,'))
self.assertIn('2/256 workers busy (0.8%; 0 "G"), 5 idle, 249 free',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
def test_if_check_runs_EXAMPLE02(self):
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec('rm -f /tmp/linuxfabrik-monitoring-plugins-apache-httpd-status.db'))
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE02a,,'))
self.assertIn('Waiting for more data (1).',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE02b,,'))
self.assertIn('1/256 workers busy (0.4%; 0 "G"), 7 idle, 248 free; 65.0 accesses, 49.0KiB traffic; Up 8m 30s',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
def test_if_check_runs_EXAMPLE03(self):
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec('rm -f /tmp/linuxfabrik-monitoring-plugins-apache-httpd-status.db'))
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE03,,'))
self.assertIn('15/400 workers busy (3.8%; 0 "G"), 85 idle, 300 free',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
def test_if_check_runs_EXAMPLE04(self):
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec('rm -f /tmp/linuxfabrik-monitoring-plugins-apache-httpd-status.db'))
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE04a,,'))
self.assertIn('Waiting for more data (1).',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE04b,,'))
self.assertIn('Waiting for more data (3).',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE04c,,'))
self.assertIn('10.117.17.40: 13/400 workers busy (3.2%; 0 "G"), 87 idle, 300 free; 547.0 accesses, 11.9MiB traffic; Up ',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
def test_if_check_runs_EXAMPLE05(self):
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec('rm -f /tmp/linuxfabrik-monitoring-plugins-apache-httpd-status.db'))
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE05,,'))
self.assertIn('1/400 workers busy (0.2%; 0 "G"), 74 idle, 325 free',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
def test_if_check_runs_EXAMPLE06(self):
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec('rm -f /tmp/linuxfabrik-monitoring-plugins-apache-httpd-status.db'))
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE06a,,'))
self.assertIn('Waiting for more data (1).',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE06b,,'))
self.assertIn('Waiting for more data (3).',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE06c,,'))
self.assertIn('192.168.122.97: 325/400 workers busy (81.2% [WARNING]; 11 "G"), 75 idle, 1 free; 78.7K accesses, 8.4GiB traffic; Up ',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_WARN)
def test_if_check_runs_EXAMPLE07(self):
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec('rm -f /tmp/linuxfabrik-monitoring-plugins-apache-httpd-status.db'))
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE07a,,'))
self.assertIn('Waiting for more data (1).',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE07b,,'))
self.assertIn('Waiting for more data (3).',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE07c,,'))
self.assertIn('192.168.122.97: 327/400 workers busy (81.8% [WARNING]; 0 "G"), 73 idle, 0 free; 6.2K accesses, 732.9MiB traffic; Up ',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_WARN)
def METHOD_NAME(self):
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec('rm -f /tmp/linuxfabrik-monitoring-plugins-apache-httpd-status.db'))
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE08a,,'))
self.assertIn('Waiting for more data (1).',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE08b,,'))
self.assertIn('Waiting for more data (3).',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE08c,,'))
self.assertIn('192.168.122.97: 1/400 workers busy (0.2%; 0 "G"), 99 idle, 300 free; 10.0K accesses, 976.6MiB traffic; Up ',stdout)
self.assertEqual(stderr, '')
self.assertEqual(retc, STATE_OK)
if __name__ == '__main__':
unittest.main( |
7,245 | chassis api | """ This module provides interface to interact with the chassis of the DUT
via platform API remotely """
import json
import logging
import re
logger = logging.getLogger(__name__)
INBAND_PORT_REGEX = (r"(Ethernet-IB)(\d+)$")
def METHOD_NAME(conn, name, args=None):
if args is None:
args = []
conn.request('POST', '/platform/chassis/{}'.format(name), json.dumps({'args': args}))
resp = conn.getresponse()
res = json.loads(resp.read())['res']
logger.info('Executing chassis API: "{}", arguments: "{}", result: "{}"'.format(name, args, res))
return res
#
# Methods inherited from DeviceBase class
#
def get_name(conn):
return METHOD_NAME(conn, 'get_name')
def get_presence(conn):
return METHOD_NAME(conn, 'get_presence')
def get_model(conn):
return METHOD_NAME(conn, 'get_model')
def get_serial(conn):
return METHOD_NAME(conn, 'get_serial')
def get_revision(conn):
return METHOD_NAME(conn, 'get_revision')
def get_status(conn):
return METHOD_NAME(conn, 'get_status')
def get_position_in_parent(conn):
return METHOD_NAME(conn, 'get_position_in_parent')
def is_replaceable(conn):
return METHOD_NAME(conn, 'is_replaceable')
#
# Methods defined in ChassisBase class
#
# NOTE: The get_change_event() method is not represented here because there is no reliable way
# to test this method in an automated fashion.
def get_base_mac(conn):
return METHOD_NAME(conn, 'get_base_mac')
def get_system_eeprom_info(conn):
return METHOD_NAME(conn, 'get_system_eeprom_info')
def get_reboot_cause(conn):
return METHOD_NAME(conn, 'get_reboot_cause')
def get_num_components(conn):
return METHOD_NAME(conn, 'get_num_components')
def get_all_components(conn):
return METHOD_NAME(conn, 'get_all_components')
def get_component(conn, index):
return METHOD_NAME(conn, 'get_component', [index])
def get_num_modules(conn):
return METHOD_NAME(conn, 'get_num_modules')
def get_all_modules(conn):
return METHOD_NAME(conn, 'get_all_modules')
def get_module(conn, index):
return METHOD_NAME(conn, 'get_module', [index])
def get_module_index(conn, mod_name):
return METHOD_NAME(conn, 'get_module_index', [mod_name])
def get_num_fans(conn):
return METHOD_NAME(conn, 'get_num_fans')
def get_all_fans(conn):
return METHOD_NAME(conn, 'get_all_fans')
def get_fan(conn, index):
return METHOD_NAME(conn, 'get_fan', [index])
def get_num_fan_drawers(conn):
return METHOD_NAME(conn, 'get_num_fan_drawers')
def get_all_fan_drawers(conn):
return METHOD_NAME(conn, 'get_all_fan_drawers')
def get_fan_drawer(conn, index):
return METHOD_NAME(conn, 'get_fan_drawer', [index])
def get_num_psus(conn):
return METHOD_NAME(conn, 'get_num_psus')
def get_all_psus(conn):
return METHOD_NAME(conn, 'get_all_psus')
def get_psu(conn, index):
return METHOD_NAME(conn, 'get_psu', [index])
def get_num_thermals(conn):
return METHOD_NAME(conn, 'get_num_thermals')
def get_all_thermals(conn):
return METHOD_NAME(conn, 'get_all_thermals')
def get_thermal(conn, index):
return METHOD_NAME(conn, 'get_thermal', [index])
def get_thermal_manager(conn):
return METHOD_NAME(conn, 'get_thermal_manager')
def get_num_sfps(conn):
return METHOD_NAME(conn, 'get_num_sfps')
def get_all_sfps(conn):
return METHOD_NAME(conn, 'get_all_sfps')
def get_sfp(conn, index):
return METHOD_NAME(conn, 'get_sfp', [index])
def set_status_led(conn, color):
return METHOD_NAME(conn, 'set_status_led', [color])
def get_status_led(conn):
return METHOD_NAME(conn, 'get_status_led')
def get_watchdog(conn):
return METHOD_NAME(conn, 'get_watchdog')
def get_eeprom(conn):
return METHOD_NAME(conn, 'get_eeprom')
def get_supervisor_slot(conn):
return METHOD_NAME(conn, 'get_supervisor_slot')
def get_my_slot(conn):
return METHOD_NAME(conn, 'get_my_slot')
def is_modular_chassis(conn):
return METHOD_NAME(conn, 'is_modular_chassis')
def is_inband_port(port):
if re.match(INBAND_PORT_REGEX, port):
return True
return False |
7,246 | crit dice gen | import copy
from typing import Callable
import d20
import draconic
from d20.utils import TreeType
import aliasing.api.statblock
from cogs5e.models.sheet.statblock import StatBlock
def maybe_alias_statblock(target):
"""Returns the AliasStatBlock for the target if applicable."""
if not isinstance(target, (StatBlock, str, type(None))):
raise ValueError("target must be a statblock, str, or None")
if isinstance(target, StatBlock):
return aliasing.api.statblock.AliasStatBlock(target)
return aliasing.api.statblock.AliasStatBlock(StatBlock(name=target or "Target"))
def upcast_scaled_dice(effect, autoctx, dice_ast):
"""Scales the dice of the cast to its appropriate amount (handling cantrip scaling and higher level addition)."""
if effect.cantripScale:
level = autoctx.caster.spellbook.caster_level
if level < 5:
level_dice = 1
elif level < 11:
level_dice = 2
elif level < 17:
level_dice = 3
else:
level_dice = 4
level_dice = autoctx.args.last("cantripdice", default=level_dice, type_=int)
def mapper(node):
if isinstance(node, d20.ast.Dice):
node.num = level_dice
return node
dice_ast = d20.utils.tree_map(mapper, dice_ast)
if effect.higher:
higher = effect.higher.get(str(autoctx.get_cast_level()))
if higher:
higher_ast = d20.parse(higher)
dice_ast.roll = d20.ast.BinOp(dice_ast.roll, "+", higher_ast.roll)
return dice_ast
def mi_mapper(minimum: int) -> Callable[[d20.ast.Node], d20.ast.Node]:
"""Returns a function that maps Dice AST objects to OperatedDice with miX attached."""
def mapper(node: d20.ast.Node):
if isinstance(node, d20.ast.Dice):
miX = d20.ast.SetOperator("mi", [d20.ast.SetSelector(None, int(minimum))])
return d20.ast.OperatedDice(node, miX)
return node
return mapper
def max_mapper(node: d20.ast.Node):
"""A function that maps Dice AST objects to OperatedDice that set their values to their maximum."""
if isinstance(node, d20.ast.Dice):
miX = d20.ast.SetOperator("mi", [d20.ast.SetSelector(None, node.size)])
return d20.ast.OperatedDice(node, miX)
return node
def max_add_crit_mapper(node: d20.ast.Node):
"""A function that adds the maximum value of each Dice AST node as a literal
:return: A tuple containing the node and a bool for if the tree mapper should continue.
:rtype: tuple(node, bool)
"""
if isinstance(node, d20.ast.OperatedDice):
return d20.ast.BinOp(node, "+", d20.ast.Literal(node.value.num * node.value.size)), False
if isinstance(node, d20.ast.Dice):
return d20.ast.BinOp(node, "+", d20.ast.Literal(node.num * node.size)), False
return node, True
def crit_mapper(node: d20.ast.Node):
"""A function that doubles the number of dice for each Dice AST node."""
if isinstance(node, d20.ast.Dice):
return d20.ast.Dice(node.num * 2, node.size)
return node
def double_dice_crit_mapper(node: d20.ast.Node):
"""A function that replaces each Dice AST node with itself multiplied by 2.
:return: A tuple containing the node and a bool for if the tree mapper should continue.
:rtype: tuple(node, bool)
"""
if isinstance(node, (d20.ast.OperatedDice, d20.ast.Dice)):
return d20.ast.BinOp(node, "*", d20.ast.Literal(2)), False
return node, True
def METHOD_NAME(dice_ast: d20.ast.Node, critdice: int):
"""A function that finds the size of left most Dice AST node and generates crit dice based on that, for
crit types that double all original dice, but not any additional crit dice. By finding the left most dice,
we do our best to ensure its based on the weapon/source and not any other additional bonuses."""
left = d20.utils.leftmost(dice_ast)
# if we're at the bottom of the branch and it's the dice, add *critdice*
if isinstance(left, d20.ast.Dice):
return d20.ast.Dice(critdice, left.size)
def critdice_tree_update(dice_ast: d20.ast.Node, critdice: int):
"""
Modifies the AST by adding *critdice* dice to any leftmost Dice, branching recursively on any Set.
.. note::
This mutates the AST, so it should be copied before calling to avoid mutating the cached AST.
"""
left = dice_ast
while left.children:
# if we encounter a set going down the left branch, branch and run recursively on all children
if isinstance(left, d20.ast.NumberSet):
for child in left.children:
critdice_tree_update(child, critdice)
return
# otherwise continue down the left branch
left = left.children[0]
# if we're at the bottom of the branch and it's the dice, add *critdice*
if isinstance(left, d20.ast.Dice):
left.num += critdice
def stringify_intexpr(evaluator, expr):
"""
For use in str builders - use the given evaluator to return the result of the intexpr, or nan if any exception is
caught
:rtype: int or float
"""
if isinstance(expr, (int, float)):
return expr
try:
return int(evaluator.eval(str(expr)))
except (TypeError, ValueError, draconic.DraconicException):
return float("nan")
def target_hp_or_default(target, default):
"""Returns the target's hp if defined, otherwise default"""
if isinstance(target, StatBlock) and target.hp is not None:
return target.hp
else:
return default
def tree_map_prefix(func: Callable[[TreeType], tuple[TreeType, bool]], node: TreeType) -> TreeType:
"""
Returns a copy of the tree, with each node replaced with ``func(node)[0]``.
:param func: A transformer function that takes a node and returns a tuple (replacement,
continue_operations_on_children).
:param node: The root of the tree to transform.
"""
copied = copy.copy(node)
operated, continue_operations_on_children = func(copied)
if not continue_operations_on_children:
# we still recurse on the children so that it satisfies the "returns a copy of the tree" property
# so make the function a no-op
func = lambda x: (x, True) # noqa E731
for i, child in enumerate(copied.children):
copied.set_child(i, tree_map_prefix(func, child))
return operated |
7,247 | encode question | # # TODO: allow google to show the image
#
# from WolframGoogle import WolframGoogle
#
# google = WolframGoogle()
#
# r = google.get_result_from_google_directly('What is the molecular weight of Ch4')
# print(r)
import re
html_template = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/css/bootstrap.min.css" integrity="sha384-TX8t27EcRE3e/ihU7zmQxVncDAy5uIKz4rEkgIXeMed4M0jlfIDPvg6uqKI2xXr2" crossorigin="anonymous">
<title>Test HTML for google visual results</title>
</head>
<style>
.kp-header { font-size:20px; font-family: arial; width: 600px }
.headings { font-size:20px; font-family: arial; width: 600px}
</style>
<body>
<span class="rounded">%s</div>
<br/>
</body>
</html>
'''
# Fix the "president of china problem"
#question = 'who is the president of china'
# question = 'where is the capital of china'
# question = 'what is the molecular weight of benzene'
question = 'what is the boiling point of water'
# question = 'what is the heat capacity of H2SO4'
def METHOD_NAME(q):
tmp = q.split(' ')
return '+'.join(tmp)
encoded_question = METHOD_NAME(question)
print('encoded question', encoded_question)
def get_wp_tabs_container(html):
tabs = html.find_all('div', id='wp-tabs-container')
# print('id="wp-tabs-container"', tabs)
if len(tabs) > 0:
return tabs[0]
else:
return ''
def replace_text(html):
findtoure = html.find_all(text=re.compile('Featured snippet from the web'))
for comment in findtoure:
fixed_text = comment.replace('Featured snippet from the web', '')
comment.replace_with(fixed_text)
return html
def extract_key_components(html):
# find the key components, only extract the content of them. Discard all the wrappers
# find the result heading, it is the first element with the role heading
headings = html.find_all('div', role='heading')
if len(headings) > 0:
key_head = headings[0]
# change its class to one of ours:
key_head['class'] = key_head
print('the key head extracted', key_head)
def process_head_result(html):
head = html.findChild().text.strip()
# print(head)
if 'People also ask' == head: # It means we have a problem. The main content comes from the wp-content
return None
else:
headings = html.find_all('div', role='heading')
for h in headings:
# print(h)
h['class'] = 'border headings'
kp_header = html.find_all('div', class_='kp-header')
if len(kp_header) > 0:
kp_header = kp_header[0]
kp_header['style'] = '{ font-size:20px; font-family: arial }'
value = kp_header.findChild().findChild()
value['style'] = '{ font-family: arial; font-size: 40px}'
# =============== find the key image =================
key_image = html.find_all('')
# value.decompose()
# remove this part first ...
# find the key image if there is one, first remove it
url = 'https://www.google.com/search?q=%s' % encoded_question
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from bs4 import BeautifulSoup
# TODO: hide the firefox browser
# driver = webdriver.Firefox()
options = Options()
options.add_argument('--headless')
driver = webdriver.Firefox(options=options)
driver.get(url)
html = driver.find_element_by_tag_name('html').get_attribute('innerHTML')
# print(html)
soup = BeautifulSoup(html, 'html.parser')
div_result = soup.find_all('div', class_='ifM9O')[0]
extract_key_components(div_result)
# tbs = get_wp_tabs_container(soup)
# soup = replace_text(soup)
#
# div_result = soup.find_all('div', class_='ifM9O')[0]
#
# html = process_head_result(div_result)
#
# # html_content = html_template % div_result.encode('utf-8').decode('utf-8')
# html_content = html_template % (div_result)
# with open('show.html', 'w', encoding='utf-8') as f:
# f.write(html_content)
# images = soup.find_all('g-img')
# rst = ''
# for img in images:
# print(img)
driver.quit( |
7,248 | send request | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from ._configuration import AgriFoodMgmtClientConfiguration
from ._serialization import Deserializer, Serializer
from .operations import (
ExtensionsOperations,
FarmBeatsExtensionsOperations,
FarmBeatsModelsOperations,
LocationsOperations,
Operations,
PrivateEndpointConnectionsOperations,
PrivateLinkResourcesOperations,
SolutionsDiscoverabilityOperations,
SolutionsOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class AgriFoodMgmtClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""APIs documentation for Azure AgFoodPlatform Resource Provider Service.
:ivar extensions: ExtensionsOperations operations
:vartype extensions: azure.mgmt.agrifood.operations.ExtensionsOperations
:ivar farm_beats_extensions: FarmBeatsExtensionsOperations operations
:vartype farm_beats_extensions: azure.mgmt.agrifood.operations.FarmBeatsExtensionsOperations
:ivar farm_beats_models: FarmBeatsModelsOperations operations
:vartype farm_beats_models: azure.mgmt.agrifood.operations.FarmBeatsModelsOperations
:ivar locations: LocationsOperations operations
:vartype locations: azure.mgmt.agrifood.operations.LocationsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.agrifood.operations.Operations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections:
azure.mgmt.agrifood.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.agrifood.operations.PrivateLinkResourcesOperations
:ivar solutions: SolutionsOperations operations
:vartype solutions: azure.mgmt.agrifood.operations.SolutionsOperations
:ivar solutions_discoverability: SolutionsDiscoverabilityOperations operations
:vartype solutions_discoverability:
azure.mgmt.agrifood.operations.SolutionsDiscoverabilityOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param solution_id: Solution Id of the solution. Required.
:type solution_id: str
:param subscription_id: The ID of the target subscription. The value must be an UUID. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2021-09-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
solution_id: str,
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = AgriFoodMgmtClientConfiguration(
credential=credential, solution_id=solution_id, subscription_id=subscription_id, **kwargs
)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.extensions = ExtensionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.farm_beats_extensions = FarmBeatsExtensionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.farm_beats_models = FarmBeatsModelsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.locations = LocationsOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.solutions = SolutionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.solutions_discoverability = SolutionsDiscoverabilityOperations(
self._client, self._config, self._serialize, self._deserialize
)
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "AgriFoodMgmtClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details) -> None:
self._client.__exit__(*exc_details) |
7,249 | set job os |
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from ci import models
import re
def set_job_modules(job, output):
"""
The output has the following format:
Currently Loaded Modulefiles:
1) module1
2) module2
...
OR
Currently Loaded Modules:
1) module1
2) module2
...
"""
lines_match = re.search(r"(?<=^Currently Loaded Modulefiles:$)(\s+\d+\) (.*))+", output, flags=re.MULTILINE)
if not lines_match:
lines_match = re.search(r"(?<=^Currently Loaded Modules:$)(\s+\d+\) (.*))+", output, flags=re.MULTILINE)
if not lines_match:
mod_obj, created = models.LoadedModule.objects.get_or_create(name="None")
job.loaded_modules.add(mod_obj)
return
modules = lines_match.group(0)
# Assume that the module names don't have whitespace. Then "split" will have the module
# names alternating with the "\d+)"
mod_list = modules.split()[1::2]
for mod in mod_list:
mod_obj, created = models.LoadedModule.objects.get_or_create(name=mod)
job.loaded_modules.add(mod_obj)
if not mod_list:
mod_obj, created = models.LoadedModule.objects.get_or_create(name="None")
job.loaded_modules.add(mod_obj)
def output_os_search(job, output, name_re, version_re, other_re):
"""
Search the output for OS information.
If all the OS information is found then update the job with
the appropiate record.
Returns:
bool: True if the job OS was set, otherwise False
"""
os_name_match = re.search(name_re, output, flags=re.MULTILINE)
if not os_name_match:
return False
os_name = os_name_match.group(1).strip()
os_version_match = re.search(version_re, output, flags=re.MULTILINE)
os_other_match = re.search(other_re, output, flags=re.MULTILINE)
if os_version_match and os_other_match:
os_version = os_version_match.group(1).strip()
os_other = os_other_match.group(1).strip()
os_record, created = models.OSVersion.objects.get_or_create(name=os_name, version=os_version, other=os_other)
job.operating_system = os_record
return True
return False
def METHOD_NAME(job, output):
"""
Goes through a series of possible OSes.
If no match was found then set the job OS to "Other"
"""
# This matches against the output of "lsb_release -a".
if output_os_search(job, output, r"^Distributor ID:\s+(.+)$", r"^Release:\s+(.+)$", r"^Codename:\s+(.+)$"):
return
# This matches against the output of "systeminfo |grep '^OS'"
if output_os_search(job, output, r"^OS Name:\s+(.+)$", r"^OS Version:\s+(.+)$", r"^OS Configuration:\s+(.+)$"):
return
# This matches against the output of "sw_vers".
if output_os_search(job, output, r"^ProductName:\s+(.+)$", r"^ProductVersion:\s+(.+)$", r"^BuildVersion:\s+(.+)$"):
return
# No OS found
os_record, created = models.OSVersion.objects.get_or_create(name="Other")
job.operating_system = os_record
def set_job_stats(job):
if not job.step_results.exists():
return
passed = 0
failed = 0
skipped = 0
for s in job.step_results.all():
output = "\n".join(s.clean_output().split("<br/>"))
matches = re.findall(r'>(?P<passed>\d+) passed<.*, .*>(?P<skipped>\d+) skipped<.*, .*>(?P<failed>\d+) failed',
output, flags=re.IGNORECASE)
for match in matches:
passed += int(match[0])
failed += int(match[2])
skipped += int(match[1])
job.test_stats.all().delete()
if passed or failed or skipped:
models.JobTestStatistics.objects.create(job=job, passed=passed, failed=failed, skipped=skipped)
def set_job_info(job):
"""
Sets the modules and OS of the job by scanning the output of
the first StepResult for the job. It is assumed that all steps
will have the same modules and OS.
"""
step_result = job.step_results.first()
if step_result:
output = step_result.output
else:
output = ""
job.loaded_modules.clear()
job.operating_system = None
set_job_modules(job, output)
METHOD_NAME(job, output)
job.save()
set_job_stats(job) |
7,250 | delete prereq | #!/usr/bin/env python
import os
import sys
import json
import pprint
import argparse
import boto3
import misc
import certs
import thing
import policy
pp = pprint.PrettyPrinter(indent=4)
def check_aws_configuration():
mysession = boto3.session.Session()
if not mysession._session._config['profiles']:
print("AWS not configured. Please run `aws configure`.")
sys.exit(1)
def prereq():
with open('configure.json') as configure_file:
json_text = json.load(configure_file)
# Create a Thing
thing_name = json_text['thing_name']
thing_obj = thing.Thing(thing_name)
if not thing_obj.create():
# Create a Certificate
cert_obj = certs.Certificate()
result = cert_obj.create()
# Store certId
cert_id = result['certificateId']
cert_id_filename = thing_name + '_cert_id_file.txt'
cert_id_file = open(cert_id_filename, 'w')
cert_id_file.write(cert_id)
cert_id_file_path = os.path.abspath(cert_id_filename)
os.chmod(cert_id_file_path, 0o444)
cert_id_file.close()
# Store cert_pem as file
cert_pem = result['certificatePem']
cert_pem_filename = thing_name + '_cert_pem_file.pem'
cert_pem_file = open(cert_pem_filename, 'w')
cert_pem_file.write(cert_pem)
cert_pem_file_path = os.path.abspath(cert_pem_filename)
os.chmod(cert_pem_file_path, 0o444)
cert_pem_file.close()
# Store private key PEM as file
private_key_pem = result['keyPair']['PrivateKey']
private_key_pem_filename = thing_name + '_private_key_pem_file.pem'
private_key_pem_file = open(private_key_pem_filename, 'w')
private_key_pem_file.write(private_key_pem)
private_key_pem_file_path = os.path.abspath(private_key_pem_filename)
os.chmod(private_key_pem_file_path, 0o444)
private_key_pem_file.close()
# Create a Policy
policy_document = misc.create_policy_document()
policy_name = thing_name + '_amazon_freertos_policy'
policy_obj = policy.Policy(policy_name, policy_document)
policy_obj.create()
# Attach certificate to Thing
cert_obj.attach_thing(thing_name)
# Attach policy to certificate
cert_obj.attach_policy(policy_name)
def update_credential_file():
with open('configure.json') as configure_file:
json_text = json.load(configure_file)
source_dir = os.path.expanduser(json_text['FreeRTOS_source_dir'])
thing_name = json_text['thing_name']
# Read cert_pem from file
cert_pem_filename = thing_name + '_cert_pem_file.pem'
try:
cert_pem_file = open(cert_pem_filename, 'r')
except IOError:
print("{} file not found. Run prerequisite step"
.format(cert_pem_filename))
sys.exit(1)
else:
cert_pem = cert_pem_file.read()
# Read private_key_pem from file
private_key_pem_filename = thing_name + '_private_key_pem_file.pem'
try:
private_key_pem_file = open(private_key_pem_filename, 'r')
except IOError:
print("{} file not found. Run prerequisite step"
.format(private_key_pem_filename))
sys.exit(1)
else:
private_key_pem = private_key_pem_file.read()
# Modify 'demo_config.h' file
misc.write_client_credentials(
source_dir,
thing_name=thing_name,
client_certificate_pem=cert_pem,
client_private_key_pem=private_key_pem,
cleanup=False)
def METHOD_NAME():
with open('configure.json') as configure_file:
json_text = json.load(configure_file)
# Delete Thing
thing_name = json_text['thing_name']
thing_obj = thing.Thing(thing_name)
if thing_obj.exists():
thing_obj.delete()
# Delete certificate
cert_id_filename = thing_name + '_cert_id_file.txt'
if os.path.exists(cert_id_filename):
cert_id_file = open(cert_id_filename, 'r')
cert_id = cert_id_file.read()
cert_obj = certs.Certificate(cert_id)
cert_obj.delete()
cert_id_file.close()
cert_id_file_path = os.path.abspath(cert_id_filename)
os.chmod(cert_id_file_path, 0o666)
os.remove(cert_id_filename)
# Delete cert_pem file and private_key_pem file
cert_pem_filename = thing_name + '_cert_pem_file.pem'
if os.path.exists(cert_pem_filename):
cert_pem_file_path = os.path.abspath(cert_pem_filename)
os.chmod(cert_pem_file_path, 0o666)
os.remove(cert_pem_filename)
private_key_pem_filename = thing_name + '_private_key_pem_file.pem'
if os.path.exists(private_key_pem_filename):
private_key_pem_file_path = os.path.abspath(private_key_pem_filename)
os.chmod(private_key_pem_file_path, 0o666)
os.remove(private_key_pem_filename)
# Delete policy
policy_name = thing_name + '_amazon_freertos_policy'
policy_obj = policy.Policy(policy_name)
if policy_obj.exists():
policy_obj.delete()
def cleanup_creds():
with open('configure.json') as file:
json_text = json.load(file)
source_dir = os.path.expanduser(json_text['FreeRTOS_source_dir'])
# Cleanup 'demo_config.h' file
misc.write_client_credentials(source_dir, cleanup=True)
def setup():
prereq()
update_credential_file()
print("Setup Completed")
def cleanup():
METHOD_NAME()
cleanup_creds()
print("Cleanup Completed")
def list_certificates():
client = boto3.client('iot')
certs = client.list_certificates()['certificates']
pp.pprint(certs)
def list_things():
client = boto3.client('iot')
things = client.list_things()['things']
pp.pprint(things)
def list_policies():
client = boto3.client('iot')
policies = client.list_policies()['policies']
pp.pprint(policies)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
subparsers = arg_parser.add_subparsers(help='Available commands',
dest='command')
subparsers.add_parser('setup', help='Setup AWS IoT')
subparsers.add_parser('cleanup', help='Cleanup AWS IoT')
subparsers.add_parser('list_certificates', help='List certificates')
subparsers.add_parser('list_things', help='List things')
subparsers.add_parser('list_policies', help='List policies')
subparsers.add_parser('prereq', help='Setup prerequisites for AWS IoT')
subparsers.add_parser('update_creds', help='Update credential files')
subparsers.add_parser('delete_prereq', help='Delete prerequisites created')
subparsers.add_parser('cleanup_creds', help='Cleanup credential files')
args = arg_parser.parse_args()
check_aws_configuration()
if args.command == 'setup':
setup()
elif args.command == 'cleanup':
cleanup()
elif args.command == 'list_certificates':
list_certificates()
elif args.command == 'list_things':
list_things()
elif args.command == 'list_policies':
list_policies()
elif args.command == 'prereq':
prereq()
elif args.command == 'update_creds':
update_credential_file()
elif args.command == 'delete_prereq':
METHOD_NAME()
elif args.command == 'cleanup_creds':
cleanup_creds()
else:
print("Command does not exist")
sys.exit(0) |
7,251 | test gateway runtimes | import os
import time
import pytest
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.pods import Pod
from jina.parsers import set_gateway_parser
from jina.serve.runtimes import asyncio as runtime_asyncio
from jina.serve.executors import BaseExecutor
from tests.helper import _generate_pod_args
@pytest.fixture()
def fake_env():
os.environ['key_parent'] = 'value3'
yield
os.environ.pop('key_parent', None)
class EnvChecker1(BaseExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pod/pod-specific
assert os.environ['key1'] == 'value1'
assert os.environ['key2'] == 'value2'
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
def test_pod_runtime_env_setting(fake_env):
args = _generate_pod_args(
[
'--uses',
'EnvChecker1',
'--env',
'key1=value1',
'--env',
'key2=value2',
]
)
with Pod(args):
pass
# should not affect the main process
assert 'key1' not in os.environ
assert 'key2' not in os.environ
assert 'key_parent' in os.environ
@pytest.mark.parametrize(
'protocol, expected',
[
('grpc', 'GRPCGateway'),
('websocket', 'WebSocketGateway'),
('http', 'HTTPGateway'),
],
)
def test_gateway_args(protocol, expected):
args = set_gateway_parser().parse_args(
[
'--host',
'jina-custom-gateway',
'--port',
'23456',
'--protocol',
protocol,
]
)
p = Pod(args)
assert p.args.uses == expected
@pytest.mark.parametrize(
'protocol, expected',
[
('grpc', 'GRPCGateway'),
('websocket', 'WebSocketGateway'),
('http', 'HTTPGateway'),
],
)
def METHOD_NAME(protocol, expected):
args = set_gateway_parser().parse_args(
[
'--graph-description',
'{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}',
'--deployments-addresses',
'{"pod0": ["0.0.0.0:1234"]}',
'--protocol',
protocol,
]
)
with Pod(args) as p:
assert p.args.uses == expected
class RaisingExecutor(BaseExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
raise RuntimeError('intentional error')
def test_failing_executor():
args = _generate_pod_args(
[
'--uses',
'RaisingExecutor',
]
)
with pytest.raises(RuntimeFailToStart):
with Pod(args):
pass
@pytest.mark.parametrize(
'protocol, expected',
[
('grpc', 'GRPCGateway'),
('websocket', 'WebSocketGateway'),
('http', 'HTTPGateway'),
],
)
def test_failing_gateway_runtimes(protocol, expected):
args = set_gateway_parser().parse_args(
[
'--graph-description',
'{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}',
'--deployments-addresses',
'{_INVALIDJSONINTENTIONALLY_pod0": ["0.0.0.0:1234"]}',
'--protocol',
protocol,
]
)
with pytest.raises(RuntimeFailToStart):
with Pod(args):
pass
@pytest.mark.timeout(4)
def test_close_before_start(monkeypatch):
class SlowFakeRuntime:
def __init__(self, *args, **kwargs):
time.sleep(5.0)
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def run_forever(self):
pass
monkeypatch.setattr(
runtime_asyncio,
'AsyncNewLoopRuntime',
SlowFakeRuntime,
)
pod = Pod(_generate_pod_args(['--noblock-on-start']))
pod.start()
pod.is_signal_handlers_installed.set()
pod.close()
@pytest.mark.timeout(4)
def test_close_before_start_slow_enter(monkeypatch):
class SlowFakeRuntime:
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
time.sleep(5.0)
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def run_forever(self):
pass
monkeypatch.setattr(
runtime_asyncio,
'AsyncNewLoopRuntime',
SlowFakeRuntime,
)
pod = Pod(_generate_pod_args(['--noblock-on-start']))
pod.start()
pod.is_signal_handlers_installed.set()
pod.close() |
7,252 | test create from local file | import uuid
from labelbox import parser
import pytest
from labelbox.schema.annotation_import import AnnotationImportState, MEAPredictionImport
from labelbox.data.serialization import NDJsonConverter
"""
- Here we only want to check that the uploads are calling the validation
- Then with unit tests we can check the types of errors raised
"""
def test_create_from_url(model_run_with_data_rows,
annotation_import_test_helpers):
name = str(uuid.uuid4())
url = "https://storage.googleapis.com/labelbox-public-bucket/predictions_test_v2.ndjson"
annotation_import = model_run_with_data_rows.add_predictions(
name=name, predictions=url)
assert annotation_import.model_run_id == model_run_with_data_rows.uid
annotation_import_test_helpers.check_running_state(annotation_import, name,
url)
annotation_import.wait_until_done()
def test_create_from_objects(model_run_with_data_rows, object_predictions,
annotation_import_test_helpers):
name = str(uuid.uuid4())
annotation_import = model_run_with_data_rows.add_predictions(
name=name, predictions=object_predictions)
assert annotation_import.model_run_id == model_run_with_data_rows.uid
annotation_import_test_helpers.check_running_state(annotation_import, name)
annotation_import_test_helpers.assert_file_content(
annotation_import.input_file_url, object_predictions)
annotation_import.wait_until_done()
def test_create_from_objects_with_confidence(predictions_with_confidence,
model_run_with_data_rows,
annotation_import_test_helpers):
name = str(uuid.uuid4())
object_prediction_data_rows = [
object_prediction["dataRow"]["id"]
for object_prediction in predictions_with_confidence
]
# MUST have all data rows in the model run
model_run_with_data_rows.upsert_data_rows(
data_row_ids=object_prediction_data_rows)
annotation_import = model_run_with_data_rows.add_predictions(
name=name, predictions=predictions_with_confidence)
assert annotation_import.model_run_id == model_run_with_data_rows.uid
annotation_import_test_helpers.check_running_state(annotation_import, name)
annotation_import_test_helpers.assert_file_content(
annotation_import.input_file_url, predictions_with_confidence)
annotation_import.wait_until_done()
assert annotation_import.state == AnnotationImportState.FINISHED
annotation_import_test_helpers.download_and_assert_status(
annotation_import.status_file_url)
def test_create_from_objects_all_project_labels(
model_run_with_all_project_labels, object_predictions,
annotation_import_test_helpers):
name = str(uuid.uuid4())
annotation_import = model_run_with_all_project_labels.add_predictions(
name=name, predictions=object_predictions)
assert annotation_import.model_run_id == model_run_with_all_project_labels.uid
annotation_import_test_helpers.check_running_state(annotation_import, name)
annotation_import_test_helpers.assert_file_content(
annotation_import.input_file_url, object_predictions)
annotation_import.wait_until_done()
def test_model_run_project_labels(model_run_with_all_project_labels,
model_run_predictions):
model_run = model_run_with_all_project_labels
# TODO: Move to export_v2
model_run_exported_labels = model_run.export_labels(download=True)
labels_indexed_by_schema_id = {}
for label in model_run_exported_labels:
# assuming exported array of label 'objects' has only one label per data row... as usually is when there are no label revisions
schema_id = label['Label']['objects'][0]['schemaId']
labels_indexed_by_schema_id[schema_id] = label
assert (len(
labels_indexed_by_schema_id.keys())) == len(model_run_predictions)
# making sure the labels are in this model run are all labels uploaded to the project
# by comparing some 'immutable' attributes
for expected_label in model_run_predictions:
schema_id = expected_label['schemaId']
actual_label = labels_indexed_by_schema_id[schema_id]
assert actual_label['Label']['objects'][0]['title'] == expected_label[
'name']
assert actual_label['DataRow ID'] == expected_label['dataRow']['id']
def test_create_from_label_objects(model_run_with_data_rows, object_predictions,
annotation_import_test_helpers):
name = str(uuid.uuid4())
predictions = list(NDJsonConverter.deserialize(object_predictions))
annotation_import = model_run_with_data_rows.add_predictions(
name=name, predictions=predictions)
assert annotation_import.model_run_id == model_run_with_data_rows.uid
annotation_import_test_helpers.check_running_state(annotation_import, name)
normalized_predictions = NDJsonConverter.serialize(predictions)
annotation_import_test_helpers.assert_file_content(
annotation_import.input_file_url, normalized_predictions)
annotation_import.wait_until_done()
def METHOD_NAME(tmp_path, model_run_with_data_rows,
object_predictions,
annotation_import_test_helpers):
name = str(uuid.uuid4())
file_name = f"{name}.ndjson"
file_path = tmp_path / file_name
with file_path.open("w") as f:
parser.dump(object_predictions, f)
annotation_import = model_run_with_data_rows.add_predictions(
name=name, predictions=str(file_path))
assert annotation_import.model_run_id == model_run_with_data_rows.uid
annotation_import_test_helpers.check_running_state(annotation_import, name)
annotation_import_test_helpers.assert_file_content(
annotation_import.input_file_url, object_predictions)
annotation_import.wait_until_done()
def test_get(client, model_run_with_data_rows, annotation_import_test_helpers):
name = str(uuid.uuid4())
url = "https://storage.googleapis.com/labelbox-public-bucket/predictions_test_v2.ndjson"
model_run_with_data_rows.add_predictions(name=name, predictions=url)
annotation_import = MEAPredictionImport.from_name(
client, model_run_id=model_run_with_data_rows.uid, name=name)
assert annotation_import.model_run_id == model_run_with_data_rows.uid
annotation_import_test_helpers.check_running_state(annotation_import, name,
url)
annotation_import.wait_until_done()
@pytest.mark.slow
def test_wait_till_done(model_run_predictions, model_run_with_data_rows):
name = str(uuid.uuid4())
annotation_import = model_run_with_data_rows.add_predictions(
name=name, predictions=model_run_predictions)
assert len(annotation_import.inputs) == len(model_run_predictions)
annotation_import.wait_until_done()
assert annotation_import.state == AnnotationImportState.FINISHED
# Check that the status files are being returned as expected
assert len(annotation_import.errors) == 0
assert len(annotation_import.inputs) == len(model_run_predictions)
input_uuids = [
input_annot['uuid'] for input_annot in annotation_import.inputs
]
inference_uuids = [pred['uuid'] for pred in model_run_predictions]
assert set(input_uuids) == set(inference_uuids)
assert len(annotation_import.statuses) == len(model_run_predictions)
for status in annotation_import.statuses:
assert status['status'] == 'SUCCESS'
status_uuids = [
input_annot['uuid'] for input_annot in annotation_import.statuses
]
assert set(input_uuids) == set(status_uuids) |
7,253 | project | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jira issue tracker."""
import datetime
from urllib.parse import urljoin
from dateutil import parser
from clusterfuzz._internal.config import db_config
from libs.issue_management import issue_tracker
from libs.issue_management.jira.issue_tracker_manager import IssueTrackerManager
class Issue(issue_tracker.Issue):
"""Represents an issue."""
def __init__(self, itm, jira_issue):
self.itm = itm
self.jira_issue = jira_issue
self._ccs = issue_tracker.LabelStore(self.itm.get_watchers(self.jira_issue))
self._components = issue_tracker.LabelStore(
self.jira_issue.fields.components)
self._labels = issue_tracker.LabelStore(self.jira_issue.fields.labels)
@property
def issue_tracker(self):
"""The IssueTracker for this issue."""
return IssueTracker(self.itm)
@property
def id(self):
"""The issue identifier."""
return int(self.jira_issue.id)
@property
def key(self):
"""The issue key (e.g. FUZZ-123)."""
return self.jira_issue.key
@property
def title(self):
"""The issue title."""
return self.jira_issue.fields.summary
@title.setter
def title(self, new_title):
self.jira_issue.fields.summary = new_title
@property
def reporter(self):
"""The issue reporter."""
return self.jira_issue.fields.reporter
@reporter.setter
def reporter(self, new_reporter):
self.jira_issue.fields.reporter = new_reporter
@property
def is_open(self):
"""Whether the issue is open."""
return self.jira_issue.fields.resolution is None
@property
def closed_time(self):
return datetime.datetime.fromtimestamp(
parser.parse(self.jira_issue.fields.resolutiondate).timestamp())
@property
def status(self):
"""The issue status."""
return self.jira_issue.fields.status
@status.setter
def status(self, new_status):
self.jira_issue.fields.status = new_status
@property
def body(self):
"""The issue body."""
return self.jira_issue.fields.description
@body.setter
def body(self, new_body):
self.jira_issue.fields.description = new_body
@property
def assignee(self):
"""The issue assignee."""
return self.jira_issue.fields.assignee
@assignee.setter
def assignee(self, new_assignee):
self.jira_issue.fields.assignee = new_assignee
@property
def ccs(self):
"""The issue CC list."""
return self._ccs
@property
def labels(self):
"""The issue labels list."""
return self._labels
@property
def components(self):
"""The issue component list."""
return self._components
# FIXME: Add support for notify arguments
def save(self, new_comment=None, notify=True): # pylint: disable=unused-argument
"""Save the issue."""
# add new comment to issue
if new_comment:
self.itm.client.add_comment(self.jira_issue, new_comment)
for added in self._components.added:
self.components.add(added)
for removed in self._components.removed:
self.components.remove(removed)
self._components.reset_tracking()
for added in self._ccs.added:
self.ccs.add(added)
for removed in self._ccs.removed:
self.ccs.remove(removed)
self._ccs.reset_tracking()
for added in self._labels.added:
self.labels.add(added)
for removed in self._labels.removed:
self.labels.remove(removed)
self._labels.reset_tracking()
self.itm.save(self)
@property
def actions(self):
return ()
@property
def merged_into(self):
pass
class IssueTracker(issue_tracker.IssueTracker):
"""Issue tracker interface."""
def __init__(self, itm):
self._itm = itm
@property
def METHOD_NAME(self):
return self._itm.project_name
def new_issue(self):
jira_issue = self._itm.create()
return Issue(self._itm, jira_issue)
def get_issue(self, issue_id):
jira_issue = self._itm.get_issue(issue_id)
if not jira_issue:
return None
return Issue(self._itm, jira_issue)
def find_issues(self, keywords=None, only_open=False):
"""Find issues."""
search_text = 'project = {project_name}' + _get_search_text(keywords)
search_text = search_text.format(project_name=self._itm.project_name)
if only_open:
search_text += ' AND resolution = Unresolved'
issues = self._itm.get_issues(search_text)
return [Issue(self._itm, issue) for issue in issues]
def issue_url(self, issue_id):
"""Return the issue URL with the given ID."""
issue = self.get_issue(issue_id)
if not issue:
return None
config = db_config.get()
url = urljoin(config.jira_url, f'/browse/{str(issue.key)}')
return url
def find_issues_url(self, keywords=None, only_open=None):
search_text = 'project = {project_name}' + _get_search_text(keywords)
search_text = search_text.format(project_name=self._itm.project_name)
if only_open:
search_text += ' AND resolution = Unresolved'
config = db_config.get()
return urljoin(config.jira_url, f'/issues/?jql={search_text}')
def _get_issue_tracker_manager_for_project(project_name):
"""Return jira issue tracker manager for the given project."""
# If there is no issue tracker set, bail out.
if not project_name or project_name == 'disabled':
return None
return IssueTrackerManager(project_name=project_name)
def get_issue_tracker(project_name, config): # pylint: disable=unused-argument
"""Get the issue tracker for the project name."""
itm = _get_issue_tracker_manager_for_project(project_name)
if itm is None:
return None
return IssueTracker(itm)
def _get_search_text(keywords):
"""Get search text."""
jira_special_characters = '+-&|!(){}[]^~*?\\:'
search_text = ''
for keyword in keywords:
# Replace special characters with whitespace as they are not allowed and
# can't be searched for.
stripped_keyword = keyword
for special_character in jira_special_characters:
stripped_keyword = stripped_keyword.replace(special_character, ' ')
# coalesce multiple spaces into one.
stripped_keyword = ' '.join(stripped_keyword.split())
search_text += f' AND text ~ "{stripped_keyword}"'
return search_text |
7,254 | get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group: str,
integration_service_environment_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2019-05-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Logic/integrationServiceEnvironments/{integrationServiceEnvironmentName}/skus") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroup": _SERIALIZER.url("resource_group", resource_group, 'str'),
"integrationServiceEnvironmentName": _SERIALIZER.url("integration_service_environment_name", integration_service_environment_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class IntegrationServiceEnvironmentSkusOperations(object):
"""IntegrationServiceEnvironmentSkusOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.logic.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group: str,
integration_service_environment_name: str,
**kwargs: Any
) -> Iterable["_models.IntegrationServiceEnvironmentSkuList"]:
"""Gets a list of integration service environment Skus.
:param resource_group: The resource group.
:type resource_group: str
:param integration_service_environment_name: The integration service environment name.
:type integration_service_environment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IntegrationServiceEnvironmentSkuList or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-05-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.IntegrationServiceEnvironmentSkuList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group=resource_group,
integration_service_environment_name=integration_service_environment_name,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group=resource_group,
integration_service_environment_name=integration_service_environment_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("IntegrationServiceEnvironmentSkuList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
METHOD_NAME, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Logic/integrationServiceEnvironments/{integrationServiceEnvironmentName}/skus"} # type: ignore |
7,255 | test duplicate symbol | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/pylint-dev/pylint/blob/main/LICENSE
# Copyright (c) https://github.com/pylint-dev/pylint/blob/main/CONTRIBUTORS.txt
from __future__ import annotations
from collections.abc import ValuesView
from pathlib import Path
import pytest
from pylint.exceptions import InvalidMessageError, UnknownMessageError
from pylint.message.message_definition import MessageDefinition
from pylint.message.message_id_store import MessageIdStore
from pylint.testutils._run import _Run as Run
EMPTY_FILE = str(Path(__file__).parent.parent.resolve() / "regrtest_data" / "empty.py")
def test_len_str(msgid_store: MessageIdStore, msgids: dict[str, str]) -> None:
assert len(msgid_store) == len(msgids)
str_result = str(msgid_store)
assert "MessageIdStore: [" in str_result
assert " - W1234 (warning-symbol)" in str_result
assert " - W1235 (warning-symbol-two)" in str_result
assert " - C1234 (convention-symbol)" in str_result
assert " - E1234 (error-symbol)" in str_result
assert "]" in str_result
def test_get_message_ids(msgid_store: MessageIdStore, msgids: dict[str, str]) -> None:
"""We can get message id even with capitalization problem."""
msgid = next(iter(msgids.keys()))
msgids_result = msgid_store.get_active_msgids(msgid.lower())
assert len(msgids_result) == 1
assert msgid == msgids_result[0]
def test_get_message_ids_not_existing(empty_msgid_store: MessageIdStore) -> None:
with pytest.raises(UnknownMessageError) as error:
w9876 = "W9876"
empty_msgid_store.get_active_msgids(w9876)
assert w9876 in str(error.value)
def test_register_message_definitions(
empty_msgid_store: MessageIdStore,
message_definitions: ValuesView[MessageDefinition],
) -> None:
number_of_msgid = len(message_definitions)
for message_definition in message_definitions:
empty_msgid_store.register_message_definition(
message_definition.msgid,
message_definition.symbol,
message_definition.old_names,
)
if message_definition.old_names:
number_of_msgid += len(message_definition.old_names)
assert len(empty_msgid_store) == number_of_msgid
def test_add_msgid_and_symbol(empty_msgid_store: MessageIdStore) -> None:
empty_msgid_store.add_msgid_and_symbol("E1235", "new-sckiil")
empty_msgid_store.add_legacy_msgid_and_symbol("C1235", "old-sckiil", "E1235")
assert len(empty_msgid_store) == 2
message_ids = empty_msgid_store.get_active_msgids("E1235")
assert len(message_ids) == 1
assert message_ids[0] == "E1235"
message_ids = empty_msgid_store.get_active_msgids("old-sckiil")
assert len(message_ids) == 1
assert message_ids[0] == "E1235"
assert empty_msgid_store.get_symbol("C1235") == "old-sckiil"
assert empty_msgid_store.get_symbol("E1235") == "new-sckiil"
assert empty_msgid_store.get_msgid("old-sckiil") == "C1235"
assert empty_msgid_store.get_msgid("new-sckiil") == "E1235"
with pytest.raises(UnknownMessageError):
empty_msgid_store.get_symbol("C1234")
with pytest.raises(UnknownMessageError):
empty_msgid_store.get_msgid("not-exist")
def METHOD_NAME(empty_msgid_store: MessageIdStore) -> None:
empty_msgid_store.add_msgid_and_symbol("W1234", "warning-symbol")
with pytest.raises(InvalidMessageError) as error:
empty_msgid_store.check_msgid_and_symbol("W1234", "other-symbol")
assert (
"Message id 'W1234' cannot have both 'other-symbol' and 'warning-symbol' as symbolic name."
in str(error.value)
)
def test_duplicate_msgid(msgid_store: MessageIdStore) -> None:
msgid_store.add_msgid_and_symbol("W1234", "warning-symbol")
with pytest.raises(InvalidMessageError) as error:
msgid_store.check_msgid_and_symbol("W1235", "warning-symbol")
assert (
"Message symbol 'warning-symbol' cannot be used for 'W1234' and 'W1235'"
in str(error.value)
)
def test_exclusivity_of_msgids() -> None:
"""Test to see if all checkers have an exclusive message id prefix."""
err_msg = (
"{} has the same prefix ('{}') as the '{}' checker. Please make sure the prefix "
"is unique for each checker. You can use 'script/get_unused_message_id_category.py' "
"to get a unique id."
)
runner = Run(["--enable-all-extensions", EMPTY_FILE], exit=False)
# Some pairs are hard-coded as they are pre-existing and non-exclusive,
# and we don't want to rename them for backwards compatibility
checker_id_pairs = {
"00": ("main", "miscellaneous"),
"01": (
"basic",
"refactoring",
"consider_ternary_expression",
"while_used",
"docstyle",
"deprecated_builtins",
),
"02": ("classes", "refactoring", "multiple_types"),
"03": ("classes", "format"),
"04": ("imports", "spelling"),
"05": ("consider-using-any-or-all", "miscellaneous"),
"07": ("exceptions", "broad_try_clause", "overlap-except"),
"12": ("design", "logging"),
"17": ("async", "refactoring"),
"20": ("compare-to-zero", "empty-comment", "magic-value"),
}
for msgid, definition in runner.linter.msgs_store._messages_definitions.items():
if definition.shared:
continue
if msgid[1:3] in checker_id_pairs:
assert (
definition.checker_name in checker_id_pairs[msgid[1:3]]
), err_msg.format(msgid, msgid[1:3], checker_id_pairs[msgid[1:3]][0])
else:
checker_id_pairs[msgid[1:3]] = (definition.checker_name,) |
7,256 | callback regex1 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2023
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import asyncio
import re
import pytest
from telegram import (
Bot,
CallbackQuery,
Chat,
ChosenInlineResult,
InlineQuery,
Message,
PreCheckoutQuery,
ShippingQuery,
Update,
User,
)
from telegram.ext import CallbackContext, JobQueue, MessageHandler, filters
from telegram.ext.filters import MessageFilter
from tests.auxil.slots import mro_slots
message = Message(1, None, Chat(1, ""), from_user=User(1, "", False), text="Text")
params = [
{"callback_query": CallbackQuery(1, User(1, "", False), "chat", message=message)},
{"inline_query": InlineQuery(1, User(1, "", False), "", "")},
{"chosen_inline_result": ChosenInlineResult("id", User(1, "", False), "")},
{"shipping_query": ShippingQuery("id", User(1, "", False), "", None)},
{"pre_checkout_query": PreCheckoutQuery("id", User(1, "", False), "", 0, "")},
{"callback_query": CallbackQuery(1, User(1, "", False), "chat")},
]
ids = (
"callback_query",
"inline_query",
"chosen_inline_result",
"shipping_query",
"pre_checkout_query",
"callback_query_without_message",
)
@pytest.fixture(scope="class", params=params, ids=ids)
def false_update(request):
return Update(update_id=1, **request.param)
@pytest.fixture(scope="class")
def message(bot):
message = Message(1, None, Chat(1, ""), from_user=User(1, "", False))
message._unfreeze()
message.chat._unfreeze()
message.set_bot(bot)
return message
class TestMessageHandler:
test_flag = False
SRE_TYPE = type(re.match("", ""))
def test_slot_behaviour(self):
handler = MessageHandler(filters.ALL, self.callback)
for attr in handler.__slots__:
assert getattr(handler, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(handler)) == len(set(mro_slots(handler))), "duplicate slot"
@pytest.fixture(autouse=True)
def _reset(self):
self.test_flag = False
async def callback(self, update, context):
self.test_flag = (
isinstance(context, CallbackContext)
and isinstance(context.bot, Bot)
and isinstance(update, Update)
and isinstance(context.update_queue, asyncio.Queue)
and isinstance(context.job_queue, JobQueue)
and isinstance(context.chat_data, dict)
and isinstance(context.bot_data, dict)
and (
(
isinstance(context.user_data, dict)
and (
isinstance(update.message, Message)
or isinstance(update.edited_message, Message)
)
)
or (
context.user_data is None
and (
isinstance(update.channel_post, Message)
or isinstance(update.edited_channel_post, Message)
)
)
)
)
def METHOD_NAME(self, update, context):
if context.matches:
types = all(type(res) is self.SRE_TYPE for res in context.matches)
num = len(context.matches) == 1
self.test_flag = types and num
def callback_regex2(self, update, context):
if context.matches:
types = all(type(res) is self.SRE_TYPE for res in context.matches)
num = len(context.matches) == 2
self.test_flag = types and num
def test_with_filter(self, message):
handler = MessageHandler(filters.ChatType.GROUP, self.callback)
message.chat.type = "group"
assert handler.check_update(Update(0, message))
message.chat.type = "private"
assert not handler.check_update(Update(0, message))
def test_callback_query_with_filter(self, message):
class TestFilter(filters.UpdateFilter):
flag = False
def filter(self, u):
self.flag = True
test_filter = TestFilter()
handler = MessageHandler(test_filter, self.callback)
update = Update(1, callback_query=CallbackQuery(1, None, None, message=message))
assert update.effective_message
assert not handler.check_update(update)
assert not test_filter.flag
def test_specific_filters(self, message):
f = (
~filters.UpdateType.MESSAGES
& ~filters.UpdateType.CHANNEL_POST
& filters.UpdateType.EDITED_CHANNEL_POST
)
handler = MessageHandler(f, self.callback)
assert not handler.check_update(Update(0, edited_message=message))
assert not handler.check_update(Update(0, message=message))
assert not handler.check_update(Update(0, channel_post=message))
assert handler.check_update(Update(0, edited_channel_post=message))
def test_other_update_types(self, false_update):
handler = MessageHandler(None, self.callback)
assert not handler.check_update(false_update)
assert not handler.check_update("string")
def test_filters_returns_empty_dict(self):
class DataFilter(MessageFilter):
data_filter = True
def filter(self, msg: Message):
return {}
handler = MessageHandler(DataFilter(), self.callback)
assert handler.check_update(Update(0, message)) is False
async def test_context(self, app, message):
handler = MessageHandler(
None,
self.callback,
)
app.add_handler(handler)
async with app:
await app.process_update(Update(0, message=message))
assert self.test_flag
self.test_flag = False
await app.process_update(Update(0, edited_message=message))
assert self.test_flag
self.test_flag = False
await app.process_update(Update(0, channel_post=message))
assert self.test_flag
self.test_flag = False
await app.process_update(Update(0, edited_channel_post=message))
assert self.test_flag
async def test_context_regex(self, app, message):
handler = MessageHandler(filters.Regex("one two"), self.METHOD_NAME)
app.add_handler(handler)
async with app:
message.text = "not it"
await app.process_update(Update(0, message))
assert not self.test_flag
message.text += " one two now it is"
await app.process_update(Update(0, message))
assert self.test_flag
async def test_context_multiple_regex(self, app, message):
handler = MessageHandler(filters.Regex("one") & filters.Regex("two"), self.callback_regex2)
app.add_handler(handler)
async with app:
message.text = "not it"
await app.process_update(Update(0, message))
assert not self.test_flag
message.text += " one two now it is"
await app.process_update(Update(0, message))
assert self.test_flag |
7,257 | load design matrix | import os
import re
import pandas
from qtpy.QtWidgets import QCheckBox
from ert import LibresFacade
from ert.config import CancelPluginException, ErtPlugin
from ert.gui.ertwidgets.customdialog import CustomDialog
from ert.gui.ertwidgets.listeditbox import ListEditBox
from ert.gui.ertwidgets.models.path_model import PathModel
from ert.gui.ertwidgets.pathchooser import PathChooser
def METHOD_NAME(filename) -> pandas.DataFrame:
dm = pandas.read_csv(filename, delim_whitespace=True)
dm = dm.rename(columns={dm.columns[0]: "Realization"})
dm = dm.set_index(["Realization"])
return dm
class CSVExportJob(ErtPlugin):
"""
Export of summary, misfit, design matrix data and gen kw into a single CSV file.
The script expects a single argument:
output_file: this is the path to the file to output the CSV data to
Optional arguments:
case_list: a comma separated list of cases to export (no spaces allowed)
if no list is provided the current case is exported
a single * can be used to export all cases
design_matrix: a path to a file containing the design matrix
infer_iteration: If True the script will try to infer the iteration number
by looking at the suffix of the case name (i.e. default_2
= iteration 2). If False the script will use the ordering
of the case list: the first item will be iteration 0,
the second item will be iteration 1...
The script also looks for default values for output path and design matrix
path to present in the GUI. These can be specified with DATA_KW keyword in
the config file:
DATA_KW <CSV_OUTPUT_PATH> {some path}
DATA_KW <DESIGN_MATRIX_PATH> {some path}
"""
INFER_HELP = (
"<html>"
"If this is checked the iteration number will be inferred from the name i.e.:"
"<ul>"
"<li>case_name -> iteration: 0</li>"
"<li>case_name_0 -> iteration: 0</li>"
"<li>case_name_2 -> iteration: 2</li>"
"<li>case_0, case_2, case_5 -> iterations: 0, 2, 5</li>"
"</ul>"
"Leave this unchecked to set iteration number to the order of the listed cases:"
"<ul><li>case_0, case_2, case_5 -> iterations: 0, 1, 2</li></ul>"
"<br/>"
"</html>"
)
def getName(self):
return "CSV Export"
def getDescription(self):
return (
"Export GenKW, design matrix, misfit data "
"and summary data into a single CSV file."
)
def inferIterationNumber(self, case_name):
pattern = re.compile("_([0-9]+$)")
match = pattern.search(case_name)
if match is not None:
return int(match.group(1))
return 0
def run( # pylint: disable=arguments-differ
self,
output_file,
case_list=None,
design_matrix_path=None,
infer_iteration=True,
drop_const_cols=False,
):
cases = []
facade = LibresFacade(self.ert())
if case_list is not None:
if case_list.strip() == "*":
cases = self.getAllCaseList()
else:
cases = case_list.split(",")
if case_list is None or len(cases) == 0:
cases = "default"
if design_matrix_path is not None:
if not os.path.exists(design_matrix_path):
raise UserWarning("The design matrix file does not exists!")
if not os.path.isfile(design_matrix_path):
raise UserWarning("The design matrix is not a file!")
data = pandas.DataFrame()
for case in cases:
case = case.strip()
try:
ensemble = self.storage.get_ensemble_by_name(case)
except KeyError as exc:
raise UserWarning(f"The case '{case}' does not exist!") from exc
if not ensemble.has_data:
raise UserWarning(f"The case '{case}' does not have any data!")
case_data = facade.load_all_gen_kw_data(ensemble)
if design_matrix_path is not None:
design_matrix_data = METHOD_NAME(design_matrix_path)
if not design_matrix_data.empty:
case_data = case_data.join(design_matrix_data, how="outer")
misfit_data = facade.load_all_misfit_data(ensemble)
if not misfit_data.empty:
case_data = case_data.join(misfit_data, how="outer")
summary_data = facade.load_all_summary_data(ensemble)
if not summary_data.empty:
case_data = case_data.join(summary_data, how="outer")
else:
case_data["Date"] = None
case_data.set_index(["Date"], append=True, inplace=True)
case_data["Iteration"] = ensemble.iteration
case_data["Case"] = case
case_data.set_index(["Case", "Iteration"], append=True, inplace=True)
data = pandas.concat([data, case_data])
data = data.reorder_levels(["Realization", "Iteration", "Date", "Case"])
if drop_const_cols:
data = data.loc[:, (data != data.iloc[0]).any()]
data.to_csv(output_file)
export_info = (
f"Exported {len(data.index)} rows and {len(data.columns)} "
f"columns to {output_file}."
)
return export_info
def getArguments(self, parent=None):
description = "The CSV export requires some information before it starts:"
dialog = CustomDialog("CSV Export", description, parent)
default_csv_output_path = self.get_context_value(
"<CSV_OUTPUT_PATH>", default="output.csv"
)
output_path_model = PathModel(default_csv_output_path)
output_path_chooser = PathChooser(output_path_model)
design_matrix_default = self.get_context_value(
"<DESIGN_MATRIX_PATH>", default=""
)
design_matrix_path_model = PathModel(
design_matrix_default, is_required=False, must_exist=True
)
design_matrix_path_chooser = PathChooser(design_matrix_path_model)
list_edit = ListEditBox(self.getAllCaseList())
infer_iteration_check = QCheckBox()
infer_iteration_check.setChecked(True)
infer_iteration_check.setToolTip(CSVExportJob.INFER_HELP)
drop_const_columns_check = QCheckBox()
drop_const_columns_check.setChecked(False)
drop_const_columns_check.setToolTip(
"If checked, exclude columns whose value is the same for every entry"
)
dialog.addLabeledOption("Output file path", output_path_chooser)
dialog.addLabeledOption("Design matrix path", design_matrix_path_chooser)
dialog.addLabeledOption("List of cases to export", list_edit)
dialog.addLabeledOption("Infer iteration number", infer_iteration_check)
dialog.addLabeledOption("Drop constant columns", drop_const_columns_check)
dialog.addButtons()
success = dialog.showAndTell()
if success:
design_matrix_path = design_matrix_path_model.getPath()
if design_matrix_path.strip() == "":
design_matrix_path = None
case_list = ",".join(list_edit.getItems())
return [
output_path_model.getPath(),
case_list,
design_matrix_path,
infer_iteration_check.isChecked(),
drop_const_columns_check.isChecked(),
]
raise CancelPluginException("User cancelled!")
def get_context_value(self, name, default):
context = self.ert().get_context()
if name in context:
return context[name]
return default
def getAllCaseList(self):
all_case_list = [case.name for case in self.storage.ensembles if case.has_data]
return all_case_list |
7,258 | tear down | import asyncio
import tempfile
import contextlib
from http import HTTPStatus
import pathlib
from unittest.mock import patch
from typing import AsyncIterator, Dict
import aiohttp
from dffml import (
Record,
config,
Features,
Feature,
ModelContext,
Model,
Sources,
SourcesContext,
entrypoint,
)
from dffml.accuracy import AccuracyContext, AccuracyScorer
from dffml.source.memory import MemorySource
from dffml_service_http.cli import Server
from dffml_service_http.routes import DISALLOW_CACHING
@config
class FakeModelConfig:
location: pathlib.Path
features: Features
predict: Feature
class FakeModelContext(ModelContext):
def __init__(self, parent):
super().__init__(parent)
self.trained_on: Dict[str, Record] = {}
async def train(self, sources: Sources):
async for record in sources.records():
self.trained_on[record.key] = record
async def predict(self, sources: SourcesContext) -> AsyncIterator[Record]:
async for record in sources.with_features(["by_ten"]):
record.predicted(
"Salary", record.feature("by_ten") * 10, float(record.key)
)
yield record
@entrypoint("fake")
class FakeModel(Model):
CONTEXT = FakeModelContext
CONFIG = FakeModelConfig
@config
class FakeScorerConfig:
pass
class FakeScorerContext(AccuracyContext):
async def score(
self, mctx: ModelContext, sources: SourcesContext, feature: Feature
):
accuracy: int = 0
async for record in sources.records():
accuracy += int(record.key)
return accuracy
@entrypoint("fakescore")
class FakeScorer(AccuracyScorer):
CONFIG = FakeScorerConfig
CONTEXT = FakeScorerContext
class ServerRunner:
def __init__(self):
self.begin = asyncio.Queue()
self.end = asyncio.Event()
self.server_stopped = None
async def start(self, coro):
self.server_stopped = asyncio.create_task(coro)
server_started = asyncio.create_task(self.begin.get())
done, pending = await asyncio.wait(
{self.server_stopped, server_started},
return_when=asyncio.FIRST_COMPLETED,
)
# Raise issues if they happened
for task in done:
# This branch is only taken if tests fail
if task is self.server_stopped: # pragma: no cov
exception = task.exception()
if exception is not None:
raise exception
return server_started.result()
async def stop(self):
self.end.set()
await self.server_stopped
@classmethod
@contextlib.asynccontextmanager
async def patch(cls, server_cls):
self = cls()
with patch.object(
server_cls, "RUN_YIELD_START", new=self.begin
), patch.object(server_cls, "RUN_YIELD_FINISH", new=self.end):
yield self
await self.stop()
class ServerException(Exception):
pass # pragma: no cov
class TestRoutesRunning:
async def setUp(self):
self.exit_stack = contextlib.AsyncExitStack()
await self.exit_stack.__aenter__()
self.tserver = await self.exit_stack.enter_async_context(
ServerRunner.patch(Server)
)
self.cli = Server(port=0, insecure=True)
await self.tserver.start(self.cli.run())
# Set up client
self.session = await self.exit_stack.enter_async_context(
aiohttp.ClientSession()
)
async def METHOD_NAME(self):
await self.exit_stack.__aexit__(None, None, None)
@property
def url(self):
return f"http://{self.cli.addr}:{self.cli.port}"
def check_allow_caching(self, r):
for header, should_be in DISALLOW_CACHING.items():
if not header in r.headers:
raise Exception(f"No cache header {header} not in {r.headers}")
if r.headers[header] != should_be:
raise Exception(
f"No cache header {header} should have been {should_be!r} but was {r.headers[header]!r}"
)
@contextlib.asynccontextmanager
async def get(self, path):
async with self.session.get(self.url + path) as r:
self.check_allow_caching(r)
if r.status != HTTPStatus.OK:
raise ServerException((await r.json())["error"])
yield r
@contextlib.asynccontextmanager
async def post(self, path, *args, **kwargs):
async with self.session.post(self.url + path, *args, **kwargs) as r:
self.check_allow_caching(r)
if r.status != HTTPStatus.OK:
raise ServerException((await r.json())["error"])
yield r
@contextlib.asynccontextmanager
async def _add_memory_source(self):
async with MemorySource(
records=[
Record(str(i), data={"features": {"by_ten": i * 10}})
for i in range(0, self.num_records)
]
) as source:
self.source = self.cli.app["sources"][self.slabel] = source
async with source() as sctx:
self.sctx = self.cli.app["source_contexts"][self.slabel] = sctx
yield
@contextlib.asynccontextmanager
async def _add_fake_model(self):
with tempfile.TemporaryDirectory() as tempdir:
async with FakeModel(
FakeModelConfig(
location=tempdir,
features=Features(Feature("by_ten")),
predict=Feature("by_ten"),
)
) as model:
self.model = self.cli.app["models"][self.mlabel] = model
async with model() as mctx:
self.mctx = self.cli.app["model_contexts"][
self.mlabel
] = mctx
yield
@contextlib.asynccontextmanager
async def _add_fake_scorer(self):
async with FakeScorer(FakeScorerConfig()) as scorer:
self.scorer = self.cli.app["scorers"][self.alabel] = scorer
async with scorer() as actx:
self.actx = self.cli.app["scorer_contexts"][self.alabel] = actx
yield |
7,259 | create business | # Copyright © 2022 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Unit Tests and the helper routines."""
import copy
from tests import EPOCH_DATETIME
def create_filing(token=None, json_filing=None, business_id=None,
filing_date=EPOCH_DATETIME, transaction_id: str = None):
"""Return a test filing."""
from legal_api.models import Filing
filing = Filing()
filing.filing_date = filing_date
if token:
filing.payment_token = str(token)
if json_filing:
filing.filing_json = json_filing
if business_id:
filing.business_id = business_id
if transaction_id:
filing.transaction_id = transaction_id
return filing
def METHOD_NAME(identifier, legal_type=None, legal_name=None):
"""Return a test business."""
from legal_api.models import Business
business = Business()
business.identifier = identifier
business.legal_type = legal_type
business.legal_name = legal_name
office = create_business_address()
business.offices.append(office)
return business
def create_business_address(office_type='businessOffice'):
"""Create an address."""
from legal_api.models import Address, Office
office = Office(office_type=office_type)
office.addresses.append(create_office(Address.DELIVERY))
office.addresses.append(create_office(Address.MAILING))
return office
def create_office(type):
"""Create an office."""
from legal_api.models import Address
address = Address(
city='Test City',
street='Test Street',
postal_code='T3S3T3',
country='CA',
region='BC',
address_type=type
)
return address
def create_party(party_json):
"""Create a party."""
from legal_api.models import Address, Party
new_party = Party(
first_name=party_json['officer'].get('firstName', '').upper(),
last_name=party_json['officer'].get('lastName', '').upper(),
middle_initial=party_json['officer'].get('middleInitial', '').upper(),
title=party_json.get('title', '').upper(),
organization_name=party_json['officer'].get('organizationName', '').upper(),
email=party_json['officer'].get('email'),
identifier=party_json['officer'].get('identifier'),
tax_id=party_json['officer'].get('taxId'),
party_type=party_json['officer'].get('partyType')
)
if party_json.get('mailingAddress'):
mailing_address = Address(
street=party_json['mailingAddress']['streetAddress'],
city=party_json['mailingAddress']['addressCity'],
country='CA',
postal_code=party_json['mailingAddress']['postalCode'],
region=party_json['mailingAddress']['addressRegion'],
delivery_instructions=party_json['mailingAddress'].get('deliveryInstructions', '').upper()
)
new_party.mailing_address = mailing_address
if party_json.get('deliveryAddress'):
delivery_address = Address(
street=party_json['deliveryAddress']['streetAddress'],
city=party_json['deliveryAddress']['addressCity'],
country='CA',
postal_code=party_json['deliveryAddress']['postalCode'],
region=party_json['deliveryAddress']['addressRegion'],
delivery_instructions=party_json['deliveryAddress'].get('deliveryInstructions', '').upper()
)
new_party.delivery_address = delivery_address
return new_party
def create_party_role(business, party, roles, appointment_date=EPOCH_DATETIME):
"""Create party roles."""
from legal_api.models import PartyRole
for role in roles:
party_role = PartyRole(
role=role,
party=party,
appointment_date=appointment_date,
cessation_date=None
)
business.party_roles.append(party_role)
def create_registration_data(legal_type, identifier='FM1234567', tax_id=None):
"""Test data for registration."""
person_json = {
'officer': {
'id': 2,
'firstName': 'Peter',
'lastName': 'Griffin',
'middleName': '',
'partyType': 'person'
},
'mailingAddress': {
'streetAddress': 'mailing_address - address line one',
'streetAddressAdditional': '',
'addressCity': 'mailing_address city',
'addressCountry': 'CA',
'postalCode': 'H0H0H0',
'addressRegion': 'BC'
}
}
org_json = copy.deepcopy(person_json)
org_json['officer'] = {
'id': 2,
'organizationName': 'Xyz Inc.',
'identifier': 'BC1234567',
'taxId': '123456789',
'email': 'peter@email.com',
'partyType': 'organization'
}
business = METHOD_NAME(identifier,
legal_type=legal_type,
legal_name='test-reg-' + legal_type)
if tax_id:
business.tax_id = tax_id
json_filing = {
'filing': {
'header': {
'name': 'registration'
},
'registration': {
}
}
}
filing = create_filing(json_filing=json_filing)
party = create_party(person_json if legal_type == 'SP' else org_json)
role = 'proprietor' if legal_type == 'SP' else 'partner'
create_party_role(business, party, [role])
business.save()
filing.business_id = business.id
filing.save()
return filing.id, business.id |
7,260 | mix | #!/usr/bin/env python
#
# Copyright 2008,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, filter, blocks
import cmath
import math
def fir_filter(x, taps, decim=1):
y = []
x2 = (len(taps) - 1) * [0, ] + x
for i in range(0, len(x), decim):
yi = 0
for j in range(len(taps)):
yi += taps[len(taps) - 1 - j] * x2[i + j]
y.append(yi)
return y
def sig_source_s(samp_rate, freq, amp, N):
t = [float(x) / samp_rate for x in range(N)]
y = [int(100 * math.sin(2. * math.pi * freq * x)) for x in t]
return y
def sig_source_c(samp_rate, freq, amp, N):
t = [float(x) / samp_rate for x in range(N)]
y = [math.cos(2. * math.pi * freq * x) +
1j * math.sin(2. * math.pi * freq * x) for x in t]
return y
def METHOD_NAME(lo, data):
y = [lo_i * data_i for lo_i, data_i in zip(lo, data)]
return y
class test_freq_xlating_filter(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def generate_ccf_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.1
self.taps = filter.firdes.low_pass(1, fs, bw, bw / 4)
times = list(range(1024))
self.src_data = [
cmath.exp(-2j * cmath.pi * fc / fs * (t / 100.0)) for t in times]
def generate_ccc_source(self):
self.fs = fs = 1
self.fc = fc = 0.3
self.bw = bw = 0.1
self.taps = filter.firdes.complex_band_pass(
1, fs, -bw / 2, bw / 2, bw / 4)
times = list(range(1024))
self.src_data = [
cmath.exp(-2j * cmath.pi * fc / fs * (t / 100.0)) for t in times]
def assert_fft_ok(self, expected_result, result_data):
expected_result = expected_result[:len(result_data)]
self.assertComplexTuplesAlmostEqual(expected_result, result_data,
places=5)
def test_fft_filter_ccf_001(self):
self.generate_ccf_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = METHOD_NAME(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_c(self.src_data)
op = filter.freq_xlating_fft_filter_ccc(
decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assert_fft_ok(expected_data, result_data)
def test_fft_filter_ccf_002(self):
self.generate_ccf_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = METHOD_NAME(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_c(self.src_data)
op = filter.freq_xlating_fft_filter_ccc(
decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assert_fft_ok(expected_data, result_data)
def test_fft_filter_ccc_001(self):
self.generate_ccc_source()
decim = 1
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = METHOD_NAME(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_c(self.src_data)
op = filter.freq_xlating_fft_filter_ccc(
decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assert_fft_ok(expected_data, result_data)
def test_fft_filter_ccc_002(self):
self.generate_ccc_source()
decim = 4
lo = sig_source_c(self.fs, -self.fc, 1, len(self.src_data))
despun = METHOD_NAME(lo, self.src_data)
expected_data = fir_filter(despun, self.taps, decim)
src = blocks.vector_source_c(self.src_data)
op = filter.freq_xlating_fft_filter_ccc(
decim, self.taps, self.fc, self.fs)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assert_fft_ok(expected_data, result_data)
if __name__ == '__main__':
gr_unittest.run(test_freq_xlating_filter) |
7,261 | test read saved params | #!/usr/bin/env python3
import random
import unittest
import numpy as np
import cereal.messaging as messaging
from cereal import log
from common.params import Params
from selfdrive.locationd.calibrationd import Calibrator, INPUTS_NEEDED, INPUTS_WANTED, BLOCK_SIZE, MIN_SPEED_FILTER, MAX_YAW_RATE_FILTER, SMOOTH_CYCLES, HEIGHT_INIT
class TestCalibrationd(unittest.TestCase):
def METHOD_NAME(self):
msg = messaging.new_message('liveCalibration')
msg.liveCalibration.validBlocks = random.randint(1, 10)
msg.liveCalibration.rpyCalib = [random.random() for _ in range(3)]
msg.liveCalibration.height = [random.random() for _ in range(1)]
Params().put("CalibrationParams", msg.to_bytes())
c = Calibrator(param_put=True)
np.testing.assert_allclose(msg.liveCalibration.rpyCalib, c.rpy)
np.testing.assert_allclose(msg.liveCalibration.height, c.height)
self.assertEqual(msg.liveCalibration.validBlocks, c.valid_blocks)
def test_calibration_basics(self):
c = Calibrator(param_put=False)
for _ in range(BLOCK_SIZE * INPUTS_WANTED):
c.handle_v_ego(MIN_SPEED_FILTER + 1)
c.handle_cam_odom([MIN_SPEED_FILTER + 1, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1e-3, 1e-3, 1e-3],
[0.0, 0.0, HEIGHT_INIT.item()],
[1e-3, 1e-3, 1e-3])
self.assertEqual(c.valid_blocks, INPUTS_WANTED)
np.testing.assert_allclose(c.rpy, np.zeros(3))
np.testing.assert_allclose(c.height, HEIGHT_INIT)
c.reset()
def test_calibration_low_speed_reject(self):
c = Calibrator(param_put=False)
for _ in range(BLOCK_SIZE * INPUTS_WANTED):
c.handle_v_ego(MIN_SPEED_FILTER - 1)
c.handle_cam_odom([MIN_SPEED_FILTER + 1, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1e-3, 1e-3, 1e-3],
[0.0, 0.0, HEIGHT_INIT.item()],
[1e-3, 1e-3, 1e-3])
for _ in range(BLOCK_SIZE * INPUTS_WANTED):
c.handle_v_ego(MIN_SPEED_FILTER + 1)
c.handle_cam_odom([MIN_SPEED_FILTER - 1, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1e-3, 1e-3, 1e-3],
[0.0, 0.0, HEIGHT_INIT.item()],
[1e-3, 1e-3, 1e-3])
self.assertEqual(c.valid_blocks, 0)
np.testing.assert_allclose(c.rpy, np.zeros(3))
np.testing.assert_allclose(c.height, HEIGHT_INIT)
def test_calibration_yaw_rate_reject(self):
c = Calibrator(param_put=False)
for _ in range(BLOCK_SIZE * INPUTS_WANTED):
c.handle_v_ego(MIN_SPEED_FILTER + 1)
c.handle_cam_odom([MIN_SPEED_FILTER + 1, 0.0, 0.0],
[0.0, 0.0, MAX_YAW_RATE_FILTER ],
[0.0, 0.0, 0.0],
[1e-3, 1e-3, 1e-3],
[0.0, 0.0, HEIGHT_INIT.item()],
[1e-3, 1e-3, 1e-3])
self.assertEqual(c.valid_blocks, 0)
np.testing.assert_allclose(c.rpy, np.zeros(3))
np.testing.assert_allclose(c.height, HEIGHT_INIT)
def test_calibration_speed_std_reject(self):
c = Calibrator(param_put=False)
for _ in range(BLOCK_SIZE * INPUTS_WANTED):
c.handle_v_ego(MIN_SPEED_FILTER + 1)
c.handle_cam_odom([MIN_SPEED_FILTER + 1, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1e3, 1e3, 1e3],
[0.0, 0.0, HEIGHT_INIT.item()],
[1e-3, 1e-3, 1e-3])
self.assertEqual(c.valid_blocks, INPUTS_NEEDED)
np.testing.assert_allclose(c.rpy, np.zeros(3))
def test_calibration_speed_std_height_reject(self):
c = Calibrator(param_put=False)
for _ in range(BLOCK_SIZE * INPUTS_WANTED):
c.handle_v_ego(MIN_SPEED_FILTER + 1)
c.handle_cam_odom([MIN_SPEED_FILTER + 1, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1e-3, 1e-3, 1e-3],
[0.0, 0.0, HEIGHT_INIT.item()],
[1e3, 1e3, 1e3])
self.assertEqual(c.valid_blocks, INPUTS_NEEDED)
np.testing.assert_allclose(c.rpy, np.zeros(3))
def test_calibration_auto_reset(self):
c = Calibrator(param_put=False)
for _ in range(BLOCK_SIZE * INPUTS_WANTED):
c.handle_v_ego(MIN_SPEED_FILTER + 1)
c.handle_cam_odom([MIN_SPEED_FILTER + 1, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1e-3, 1e-3, 1e-3],
[0.0, 0.0, HEIGHT_INIT.item()],
[1e-3, 1e-3, 1e-3])
self.assertEqual(c.valid_blocks, INPUTS_WANTED)
np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0])
old_rpy_weight_prev = 0.0
for _ in range(BLOCK_SIZE + 10):
self.assertLess(old_rpy_weight_prev - c.old_rpy_weight, 1/SMOOTH_CYCLES + 1e-3)
old_rpy_weight_prev = c.old_rpy_weight
c.handle_v_ego(MIN_SPEED_FILTER + 1)
c.handle_cam_odom([MIN_SPEED_FILTER + 1, -0.05 * MIN_SPEED_FILTER, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1e-3, 1e-3, 1e-3],
[0.0, 0.0, HEIGHT_INIT.item()],
[1e-3, 1e-3, 1e-3])
self.assertEqual(c.valid_blocks, 1)
self.assertEqual(c.cal_status, log.LiveCalibrationData.Status.recalibrating)
np.testing.assert_allclose(c.rpy, [0.0, 0.0, -0.05], atol=1e-2)
if __name__ == "__main__":
unittest.main() |
7,262 | set up | # Copyright (c) 2015 VMware, Inc.
# Copyright (c) 2015 Hewlett Packard Enterprise
#
# SPDX-License-Identifier: Apache-2.0
import collections
import tempfile
from unittest import mock
import testtools
import bandit
from bandit.core import config
from bandit.core import docs_utils
from bandit.core import issue
from bandit.core import manager
from bandit.formatters import text as b_text
class TextFormatterTests(testtools.TestCase):
def METHOD_NAME(self):
super().METHOD_NAME()
@mock.patch("bandit.core.issue.Issue.get_code")
def test_output_issue(self, get_code):
issue = _get_issue_instance()
get_code.return_value = "DDDDDDD"
indent_val = "CCCCCCC"
def _template(_issue, _indent_val, _code):
return_val = [
"{}>> Issue: [{}:{}] {}".format(
_indent_val, _issue.test_id, _issue.test, _issue.text
),
"{} Severity: {} Confidence: {}".format(
_indent_val,
_issue.severity.capitalize(),
_issue.confidence.capitalize(),
),
f"{_indent_val} CWE: {_issue.cwe}",
f"{_indent_val} More Info: "
f"{docs_utils.get_url(_issue.test_id)}",
"{} Location: {}:{}:{}".format(
_indent_val, _issue.fname, _issue.lineno, _issue.col_offset
),
]
if _code:
return_val.append(f"{_indent_val}{_code}")
return "\n".join(return_val)
issue_text = b_text._output_issue_str(issue, indent_val)
expected_return = _template(issue, indent_val, "DDDDDDD")
self.assertEqual(expected_return, issue_text)
issue_text = b_text._output_issue_str(
issue, indent_val, show_code=False
)
expected_return = _template(issue, indent_val, "")
self.assertEqual(expected_return, issue_text)
issue.lineno = ""
issue.col_offset = ""
issue_text = b_text._output_issue_str(
issue, indent_val, show_lineno=False
)
expected_return = _template(issue, indent_val, "DDDDDDD")
self.assertEqual(expected_return, issue_text)
@mock.patch("bandit.core.manager.BanditManager.get_issue_list")
def test_no_issues(self, get_issue_list):
conf = config.BanditConfig()
self.manager = manager.BanditManager(conf, "file")
(tmp_fd, self.tmp_fname) = tempfile.mkstemp()
self.manager.out_file = self.tmp_fname
get_issue_list.return_value = collections.OrderedDict()
with open(self.tmp_fname, "w") as tmp_file:
b_text.report(
self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5
)
with open(self.tmp_fname) as f:
data = f.read()
self.assertIn("No issues identified.", data)
@mock.patch("bandit.core.manager.BanditManager.get_issue_list")
def test_report_nobaseline(self, get_issue_list):
conf = config.BanditConfig()
self.manager = manager.BanditManager(conf, "file")
(tmp_fd, self.tmp_fname) = tempfile.mkstemp()
self.manager.out_file = self.tmp_fname
self.manager.verbose = True
self.manager.files_list = ["binding.py"]
self.manager.scores = [
{"SEVERITY": [0, 0, 0, 1], "CONFIDENCE": [0, 0, 0, 1]}
]
self.manager.skipped = [("abc.py", "File is bad")]
self.manager.excluded_files = ["def.py"]
issue_a = _get_issue_instance()
issue_b = _get_issue_instance()
get_issue_list.return_value = [issue_a, issue_b]
self.manager.metrics.data["_totals"] = {
"loc": 1000,
"nosec": 50,
"skipped_tests": 0,
}
for category in ["SEVERITY", "CONFIDENCE"]:
for level in ["UNDEFINED", "LOW", "MEDIUM", "HIGH"]:
self.manager.metrics.data["_totals"][f"{category}.{level}"] = 1
# Validate that we're outputting the correct issues
output_str_fn = "bandit.formatters.text._output_issue_str"
with mock.patch(output_str_fn) as output_str:
output_str.return_value = "ISSUE_OUTPUT_TEXT"
with open(self.tmp_fname, "w") as tmp_file:
b_text.report(
self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5
)
calls = [
mock.call(issue_a, "", lines=5),
mock.call(issue_b, "", lines=5),
]
output_str.assert_has_calls(calls, any_order=True)
# Validate that we're outputting all of the expected fields and the
# correct values
with open(self.tmp_fname, "w") as tmp_file:
b_text.report(
self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5
)
with open(self.tmp_fname) as f:
data = f.read()
expected_items = [
"Run started",
"Files in scope (1)",
"binding.py (score: ",
"CONFIDENCE: 1",
"SEVERITY: 1",
f"CWE: {str(issue.Cwe(issue.Cwe.MULTIPLE_BINDS))}",
"Files excluded (1):",
"def.py",
"Undefined: 1",
"Low: 1",
"Medium: 1",
"High: 1",
"Total lines skipped ",
"(#nosec): 50",
"Total potential issues skipped due to specifically being ",
"disabled (e.g., #nosec BXXX): 0",
"Total issues (by severity)",
"Total issues (by confidence)",
"Files skipped (1)",
"abc.py (File is bad)",
]
for item in expected_items:
self.assertIn(item, data)
@mock.patch("bandit.core.manager.BanditManager.get_issue_list")
def test_report_baseline(self, get_issue_list):
conf = config.BanditConfig()
self.manager = manager.BanditManager(conf, "file")
(tmp_fd, self.tmp_fname) = tempfile.mkstemp()
self.manager.out_file = self.tmp_fname
issue_a = _get_issue_instance()
issue_b = _get_issue_instance()
issue_x = _get_issue_instance()
issue_x.fname = "x"
issue_y = _get_issue_instance()
issue_y.fname = "y"
issue_z = _get_issue_instance()
issue_z.fname = "z"
get_issue_list.return_value = collections.OrderedDict(
[(issue_a, [issue_x]), (issue_b, [issue_y, issue_z])]
)
# Validate that we're outputting the correct issues
indent_val = " " * 10
output_str_fn = "bandit.formatters.text._output_issue_str"
with mock.patch(output_str_fn) as output_str:
output_str.return_value = "ISSUE_OUTPUT_TEXT"
with open(self.tmp_fname, "w") as tmp_file:
b_text.report(
self.manager, tmp_file, bandit.LOW, bandit.LOW, lines=5
)
calls = [
mock.call(issue_a, "", lines=5),
mock.call(issue_b, "", show_code=False, show_lineno=False),
mock.call(issue_y, indent_val, lines=5),
mock.call(issue_z, indent_val, lines=5),
]
output_str.assert_has_calls(calls, any_order=True)
def _get_issue_instance(
severity=bandit.MEDIUM,
cwe=issue.Cwe.MULTIPLE_BINDS,
confidence=bandit.MEDIUM,
):
new_issue = issue.Issue(severity, cwe, confidence, "Test issue")
new_issue.fname = "code.py"
new_issue.test = "bandit_plugin"
new_issue.lineno = 1
return new_issue |
7,263 | get service endpoint output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetServiceEndpointResult',
'AwaitableGetServiceEndpointResult',
'get_service_endpoint',
'get_service_endpoint_output',
]
@pulumi.output_type
class GetServiceEndpointResult:
"""
ServiceEndpoint resource details.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, system_data=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ServiceEndpointResourceResponseProperties':
"""
ServiceEndpoint resource properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetServiceEndpointResult(GetServiceEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServiceEndpointResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_service_endpoint(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_endpoint_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceEndpointResult:
"""
Returns ServiceEndpoint resources for a given name.
Azure REST API version: 2022-02-01.
:param str account_name: The name of the RecommendationsService Account resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_endpoint_name: The name of the ServiceEndpoint resource.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['serviceEndpointName'] = service_endpoint_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:recommendationsservice:getServiceEndpoint', __args__, opts=opts, typ=GetServiceEndpointResult).value
return AwaitableGetServiceEndpointResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_service_endpoint)
def METHOD_NAME(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_endpoint_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetServiceEndpointResult]:
"""
Returns ServiceEndpoint resources for a given name.
Azure REST API version: 2022-02-01.
:param str account_name: The name of the RecommendationsService Account resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_endpoint_name: The name of the ServiceEndpoint resource.
"""
... |
7,264 | get iot hub resource | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetIotHubResourceResult',
'AwaitableGetIotHubResourceResult',
'get_iot_hub_resource',
'get_iot_hub_resource_output',
]
@pulumi.output_type
class GetIotHubResourceResult:
"""
The description of the IoT hub.
"""
def __init__(__self__, etag=None, id=None, identity=None, location=None, name=None, properties=None, sku=None, system_data=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
The Etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal ETag convention.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ArmIdentityResponse']:
"""
The managed identities for the IotHub.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.IotHubPropertiesResponse':
"""
IotHub properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> 'outputs.IotHubSkuInfoResponse':
"""
IotHub SKU info
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system meta data relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetIotHubResourceResult(GetIotHubResourceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIotHubResourceResult(
etag=self.etag,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def METHOD_NAME(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIotHubResourceResult:
"""
Get the non-security related metadata of an IoT hub.
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:devices/v20221115preview:getIotHubResource', __args__, opts=opts, typ=GetIotHubResourceResult).value
return AwaitableGetIotHubResourceResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
sku=pulumi.get(__ret__, 'sku'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_iot_hub_resource_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetIotHubResourceResult]:
"""
Get the non-security related metadata of an IoT hub.
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
... |
7,265 | test elfr register event source w | # Impacket - Collection of Python classes for working with network protocols.
#
# Copyright (C) 2023 Fortra. All rights reserved.
#
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Tested so far:
# (h)ElfrOpenBELW
# (h)ElfrOpenELW
# (h)ElfrRegisterEventSourceW
# (h)ElfrReadELW
# (h)ElfrClearELFW
# (h)ElfrBackupELFW
# ElfrReportEventW
# hElfrNumberOfRecords
# hElfrOldestRecordNumber
# Not yet:
# ElfrCloseEL
#
from __future__ import division
from __future__ import print_function
import pytest
import unittest
from six import assertRaisesRegex
from tests.dcerpc import DCERPCTests
from impacket.dcerpc.v5 import even
from impacket.dcerpc.v5.dtypes import NULL
from impacket.dcerpc.v5.rpcrt import DCERPCException, RPC_C_AUTHN_LEVEL_PKT_PRIVACY
class RRPTests(DCERPCTests):
iface_uuid = even.MSRPC_UUID_EVEN
string_binding = r"ncacn_np:{0.machine}[\PIPE\eventlog]"
authn = True
authn_level = RPC_C_AUTHN_LEVEL_PKT_PRIVACY
def test_ElfrOpenBELW(self):
dce, rpctransport = self.connect()
request = even.ElfrOpenBELW()
request['UNCServerName'] = NULL
request['BackupFileName'] = '\\??\\BETO'
request['MajorVersion'] = 1
request['MinorVersion'] = 1
with assertRaisesRegex(self, DCERPCException, "STATUS_OBJECT_NAME_NOT_FOUND"):
dce.request(request)
def test_hElfrOpenBELW(self):
dce, rpctransport = self.connect()
with assertRaisesRegex(self, DCERPCException, "STATUS_OBJECT_NAME_NOT_FOUND"):
even.hElfrOpenBELW(dce, '\\??\\BETO')
def test_ElfrOpenELW(self):
dce, rpctransport = self.connect()
request = even.ElfrOpenELW()
request['UNCServerName'] = NULL
request['ModuleName'] = 'Security'
request['RegModuleName'] = ''
request['MajorVersion'] = 1
request['MinorVersion'] = 1
resp = dce.request(request)
resp.dump()
def test_hElfrOpenELW(self):
dce, rpctransport = self.connect()
resp = even.hElfrOpenELW(dce, 'Security', '')
resp.dump()
def METHOD_NAME(self):
dce, rpctransport = self.connect()
request = even.ElfrRegisterEventSourceW()
request['UNCServerName'] = NULL
request['ModuleName'] = 'Security'
request['RegModuleName'] = ''
request['MajorVersion'] = 1
request['MinorVersion'] = 1
with assertRaisesRegex(self, DCERPCException, "STATUS_ACCESS_DENIED"):
dce.request(request)
def test_hElfrRegisterEventSourceW(self):
dce, rpctransport = self.connect()
with assertRaisesRegex(self, DCERPCException, "STATUS_ACCESS_DENIED"):
even.hElfrRegisterEventSourceW(dce, 'Security', '')
def test_ElfrReadELW(self):
dce, rpctransport = self.connect()
resp = even.hElfrOpenELW(dce, 'Security', '')
resp.dump()
request = even.ElfrReadELW()
request['LogHandle'] = resp['LogHandle']
request['ReadFlags'] = even.EVENTLOG_SEQUENTIAL_READ | even.EVENTLOG_FORWARDS_READ
request['RecordOffset'] = 0
request['NumberOfBytesToRead'] = even.MAX_BATCH_BUFF
resp = dce.request(request)
resp.dump()
def test_hElfrReadELW(self):
dce, rpctransport = self.connect()
resp = even.hElfrOpenELW(dce, 'Security', '')
resp.dump()
resp = even.hElfrReadELW(dce, resp['LogHandle'],
even.EVENTLOG_SEQUENTIAL_READ | even.EVENTLOG_FORWARDS_READ,
0, even.MAX_BATCH_BUFF)
resp.dump()
def test_ElfrClearELFW(self):
dce, rpctransport = self.connect()
resp = even.hElfrOpenELW(dce, 'Security', '')
resp.dump()
request = even.ElfrClearELFW()
request['LogHandle'] = resp['LogHandle']
request['BackupFileName'] = '\\??\\c:\\beto2'
with assertRaisesRegex(self, DCERPCException, "STATUS_OBJECT_NAME_INVALID"):
dce.request(request)
def test_hElfrClearELFW(self):
dce, rpctransport = self.connect()
resp = even.hElfrOpenELW(dce, 'Security', '')
resp.dump()
with assertRaisesRegex(self, DCERPCException, "STATUS_OBJECT_NAME_INVALID"):
even.hElfrClearELFW(dce, resp['LogHandle'], '\\??\\c:\\beto2')
def test_ElfrBackupELFW(self):
dce, rpctransport = self.connect()
resp = even.hElfrOpenELW(dce, 'Security', '')
resp.dump()
request = even.ElfrBackupELFW()
request['LogHandle'] = resp['LogHandle']
request['BackupFileName'] = '\\??\\c:\\beto2'
with assertRaisesRegex(self, DCERPCException, "STATUS_OBJECT_NAME_INVALID"):
dce.request(request)
def test_hElfrBackupELFW(self):
dce, rpctransport = self.connect()
resp = even.hElfrOpenELW(dce, 'Security', '')
resp.dump()
with assertRaisesRegex(self, DCERPCException, "STATUS_OBJECT_NAME_INVALID"):
even.hElfrBackupELFW(dce, resp['LogHandle'], '\\??\\c:\\beto2')
def test_ElfrReportEventW(self):
dce, rpctransport = self.connect()
resp = even.hElfrOpenELW(dce, 'Security', '')
resp.dump()
request = even.ElfrReportEventW()
request['LogHandle'] = resp['LogHandle']
request['Time'] = 5000000
request['EventType'] = even.EVENTLOG_ERROR_TYPE
request['EventCategory'] = 0
request['EventID'] = 7037
request['ComputerName'] = 'MYCOMPUTER!'
request['NumStrings'] = 1
request['DataSize'] = 0
request['UserSID'].fromCanonical('S-1-2-5-21')
nn = even.PRPC_UNICODE_STRING()
nn['Data'] = 'HOLA BETUSSS'
request['Strings'].append(nn)
request['Data'] = NULL
request['Flags'] = 0
request['RecordNumber'] = NULL
request['TimeWritten'] = NULL
with assertRaisesRegex(self, DCERPCException, "STATUS_ACCESS_DENIED"):
dce.request(request)
def test_hElfrNumberOfRecords(self):
dce, rpctransport = self.connect()
resp = even.hElfrOpenELW(dce, 'Security', '')
resp.dump()
resp = even.hElfrNumberOfRecords(dce, resp['LogHandle'])
resp.dump()
def test_hElfrOldestRecordNumber(self):
dce, rpctransport = self.connect()
resp = even.hElfrOpenELW(dce, 'Security', '')
resp.dump()
resp = even.hElfrOldestRecordNumber(dce, resp['LogHandle'])
resp.dump()
@pytest.mark.remote
class RRPTestsSMBTransport(RRPTests, unittest.TestCase):
transfer_syntax = DCERPCTests.TRANSFER_SYNTAX_NDR
@pytest.mark.remote
class RRPTestsSMBTransport64(RRPTests, unittest.TestCase):
transfer_syntax = DCERPCTests.TRANSFER_SYNTAX_NDR64
# Process command-line arguments.
if __name__ == "__main__":
unittest.main(verbosity=1) |
7,266 | do makecache | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
import dnf.cli.commands.makecache as makecache
import dnf.pycomp
import tests.support
from tests.support import mock
class MakeCacheCommandTest(tests.support.DnfBaseTestCase):
REPOS = ['main']
CLI = "mock"
def setUp(self):
super(MakeCacheCommandTest, self).setUp()
for r in self.base.repos.values():
r.basecachedir = self.base.conf.cachedir
@staticmethod
@mock.patch('dnf.Base.fill_sack', new=mock.MagicMock())
def METHOD_NAME(cmd):
return tests.support.command_run(cmd, ['timer'])
def assert_last_info(self, logger, msg):
self.assertEqual(logger.info.mock_calls[-1], mock.call(msg))
@mock.patch('dnf.base.logger',
new_callable=tests.support.mock_logger)
@mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.util.on_ac_power', return_value=True)
@mock.patch('dnf.util.on_metered_connection', return_value=False)
def test_makecache_timer(self, _on_ac_power, _on_metered_connection, logger):
cmd = makecache.MakeCacheCommand(self.cli)
self.base.conf.metadata_timer_sync = 0
self.assertFalse(self.METHOD_NAME(cmd))
self.assert_last_info(logger, u'Metadata timer caching disabled.')
self.base.conf.metadata_timer_sync = 5 # resync after 5 seconds
self.base._repo_persistor.since_last_makecache = mock.Mock(return_value=3)
self.assertFalse(self.METHOD_NAME(cmd))
self.assert_last_info(logger, u'Metadata cache refreshed recently.')
@mock.patch('dnf.base.logger',
new_callable=tests.support.mock_logger)
@mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.util.on_ac_power', return_value=False)
@mock.patch('dnf.util.on_metered_connection', return_value=False)
def test_makecache_timer_battery(self, _on_ac_power, _on_metered_connection, logger):
cmd = makecache.MakeCacheCommand(self.cli)
self.base.conf.metadata_timer_sync = 5
self.assertFalse(self.METHOD_NAME(cmd))
msg = u'Metadata timer caching disabled when running on a battery.'
self.assert_last_info(logger, msg)
@mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.util.on_ac_power', return_value=None)
@mock.patch('dnf.util.on_metered_connection', return_value=False)
def test_makecache_timer_battery2(self, _on_ac_power, _on_metered_connection):
cmd = makecache.MakeCacheCommand(self.cli)
self.base.conf.metadata_timer_sync = 5
self.assertTrue(self.METHOD_NAME(cmd))
@mock.patch('dnf.base.logger',
new_callable=tests.support.mock_logger)
@mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)
@mock.patch('dnf.util.on_ac_power', return_value=False)
@mock.patch('dnf.util.on_metered_connection', return_value=True)
def test_makecache_timer_metered(self, _on_ac_power, _on_metered_connection, logger):
cmd = makecache.MakeCacheCommand(self.cli)
self.base.conf.metadata_timer_sync = 5
self.assertFalse(self.METHOD_NAME(cmd))
msg = u'Metadata timer caching disabled when running on metered connection.'
self.assert_last_info(logger, msg) |
7,267 | test set single value | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.sparse_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
def _SparseToDense(sparse_indices,
output_size,
sparse_values,
default_value,
validate_indices=True):
feed_sparse_indices = array_ops.placeholder(dtypes.int32)
feed_dict = {feed_sparse_indices: sparse_indices}
return sparse_ops.sparse_to_dense(
feed_sparse_indices,
output_size,
sparse_values,
default_value=default_value,
validate_indices=validate_indices).eval(feed_dict=feed_dict)
class SparseToDenseTest(xla_test.XLATestCase):
def testInt(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1, 0)
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testFloat(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1.0, 0.0)
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.float32)
self.assertAllClose(np_ans, tf_ans)
def testSetValue(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], [1, 2], -1)
np_ans = np.array([-1, 1, -1, 2, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def METHOD_NAME(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1, -1)
np_ans = np.array([-1, 1, -1, 1, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def test2d(self):
# pylint: disable=bad-whitespace
with self.session(), self.test_scope():
tf_ans = _SparseToDense([[1, 3], [2, 0]], [3, 4], 1, -1)
np_ans = np.array([[-1, -1, -1, -1],
[-1, -1, -1, 1],
[ 1, -1, -1, -1]]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testZeroDefault(self):
with self.session():
x = sparse_ops.sparse_to_dense(2, [4], 7).eval()
self.assertAllEqual(x, [0, 0, 7, 0])
def test3d(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([[1, 3, 0], [2, 0, 1]], [3, 4, 2], 1, -1)
np_ans = np.ones((3, 4, 2), dtype=np.int32) * -1
np_ans[1, 3, 0] = 1
np_ans[2, 0, 1] = 1
self.assertAllClose(np_ans, tf_ans)
def testDegenerateIndexMatrix(self):
with self.session(), self.test_scope():
tf_ans = _SparseToDense([[2], [3], [4], [5], [6], [7], [8], [9]], [10],
[1, 2, 3, 4, 5, 6, 7, 8], -1)
self.assertAllClose([-1, -1, 1, 2, 3, 4, 5, 6, 7, 8], tf_ans)
def testBadShape(self):
with self.session(), self.test_scope():
with self.assertRaisesWithPredicateMatch(ValueError, "must be rank 1"):
_SparseToDense([1, 3], [[5], [3]], 1, -1)
def testBadValue(self):
with self.session(), self.test_scope():
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[2,1\], "
r"should be \[\] or \[2\]"):
_SparseToDense([1, 3], [5], [[5], [3]], -1)
def testBadNumValues(self):
with self.session(), self.test_scope():
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[3\], should be \[\] or \[2\]"):
_SparseToDense([1, 3], [5], [1, 2, 3], -1)
def testBadDefault(self):
with self.session(), self.test_scope():
with self.assertRaisesOpError("default_value should be a scalar"):
_SparseToDense([1, 3], [5], [1, 2], [0])
if __name__ == "__main__":
test.main() |
7,268 | managed by | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetConfigurationProfileHCRPAssignmentResult',
'AwaitableGetConfigurationProfileHCRPAssignmentResult',
'get_configuration_profile_hcrpassignment',
'get_configuration_profile_hcrpassignment_output',
]
@pulumi.output_type
class GetConfigurationProfileHCRPAssignmentResult:
"""
Configuration profile assignment is an association between a VM and automanage profile configuration.
"""
def __init__(__self__, id=None, METHOD_NAME=None, name=None, properties=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'managed_by' to be a str")
pulumi.set(__self__, "managed_by", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="managedBy")
def METHOD_NAME(self) -> str:
"""
Azure resource id. Indicates if this resource is managed by another Azure resource.
"""
return pulumi.get(self, "managed_by")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ConfigurationProfileAssignmentPropertiesResponse':
"""
Properties of the configuration profile assignment.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetConfigurationProfileHCRPAssignmentResult(GetConfigurationProfileHCRPAssignmentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConfigurationProfileHCRPAssignmentResult(
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
properties=self.properties,
system_data=self.system_data,
type=self.type)
def get_configuration_profile_hcrpassignment(configuration_profile_assignment_name: Optional[str] = None,
machine_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConfigurationProfileHCRPAssignmentResult:
"""
Get information about a configuration profile assignment
Azure REST API version: 2022-05-04.
:param str configuration_profile_assignment_name: The configuration profile assignment name.
:param str machine_name: The name of the Arc machine.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['configurationProfileAssignmentName'] = configuration_profile_assignment_name
__args__['machineName'] = machine_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:automanage:getConfigurationProfileHCRPAssignment', __args__, opts=opts, typ=GetConfigurationProfileHCRPAssignmentResult).value
return AwaitableGetConfigurationProfileHCRPAssignmentResult(
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'managed_by'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_configuration_profile_hcrpassignment)
def get_configuration_profile_hcrpassignment_output(configuration_profile_assignment_name: Optional[pulumi.Input[str]] = None,
machine_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConfigurationProfileHCRPAssignmentResult]:
"""
Get information about a configuration profile assignment
Azure REST API version: 2022-05-04.
:param str configuration_profile_assignment_name: The configuration profile assignment name.
:param str machine_name: The name of the Arc machine.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
7,269 | post init | from abc import ABC, abstractmethod
from contextlib import nullcontext
from typing import Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from coati.experience_buffer import ExperienceBuffer
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from colossalai.booster import Booster
from colossalai.booster.plugin import Plugin
from .sampler import DistributedSampler
_BoostArgSpec = Union[nn.Module, Tuple[nn.Module, Optimizer], Dict]
class Strategy(ABC):
"""
Base class for training strategies.
"""
def __init__(self, plugin_initializer: Callable[..., Optional[Plugin]] = lambda: None) -> None:
super().__init__()
# NOTE: dist must be initialized before Booster
self.setup_distributed()
self.plugin = plugin_initializer()
self.booster = Booster(plugin=self.plugin)
self.METHOD_NAME()
@abstractmethod
def METHOD_NAME(self) -> None:
pass
def backward(self, loss: torch.Tensor, model: nn.Module, optimizer: Optimizer, **kwargs) -> None:
self.booster.backward(loss, optimizer)
def optimizer_step(self, optimizer: Optimizer, **kwargs) -> None:
optimizer.step()
@abstractmethod
def setup_distributed(self) -> None:
pass
@abstractmethod
def setup_dataloader(self, data_buffer: ExperienceBuffer, pin_memory: bool = False) -> DataLoader:
pass
def model_init_context(self):
return nullcontext()
def prepare(self, *boost_args: _BoostArgSpec) -> Union[List[_BoostArgSpec], _BoostArgSpec]:
"""Prepare [model | (model, optimizer) | Dict] based on each strategy.
NOTE: the keys of Dict must be a subset of `self.booster.boost`'s arguments.
Example::
>>> # e.g., include lr_scheduler
>>> result_dict = strategy.prepare(dict(model=model, lr_scheduler=lr_scheduler))
>>> # when fine-tuning actor and critic
>>> (actor, actor_optim), (critic, critic_optim), reward_model, initial_model = strategy.prepare((actor, actor_optim), (critic, critic_optim), reward_model, initial_model)
>>> # or when training reward model
>>> (reward_model, reward_model_optim) = strategy.prepare((reward_model, reward_model_optim))
>>> # or just inference
>>> actor, critic = strategy.prepare(actor, critic)
Returns:
Union[List[_BoostArgSpec], _BoostArgSpec]: [model | (model, optimizer) | Dict] in the original order.
"""
rets = []
for arg in boost_args:
if isinstance(arg, nn.Module):
model, *_ = self.booster.boost(arg)
rets.append(model)
elif isinstance(arg, tuple):
try:
model, optimizer = arg
except ValueError:
raise RuntimeError(f'Expect (model, optimizer) pair, got a tuple with size "{len(arg)}"')
model, optimizer, *_ = self.booster.boost(model=model, optimizer=optimizer)
rets.append((model, optimizer))
elif isinstance(arg, Dict):
model, optimizer, criterion, dataloader, lr_scheduler = self.booster.boost(**arg)
boost_result = dict(model=model,
optimizer=optimizer,
criterion=criterion,
dataloader=dataloader,
lr_scheduler=lr_scheduler)
# remove None values
boost_result = {key: value for key, value in boost_result.items() if value is not None}
rets.append(boost_result)
else:
raise RuntimeError(f'Type {type(arg)} is not supported')
return rets[0] if len(rets) == 1 else rets
@staticmethod
def unwrap_model(model: nn.Module) -> nn.Module:
"""Get the unwrapped model from a wrapped model made by Strategy.prepare.
Args:
model (nn.Module): the model to unwrap
Returns:
nn.Module: the original model
"""
return model
def save_model(self, model: nn.Module, path: str, only_rank0: bool = True, **kwargs) -> None:
self.booster.save_model(model, path, shard=not only_rank0, **kwargs)
def load_model(self, model: nn.Module, path: str, strict: bool = True) -> None:
self.booster.load_model(model, path, strict)
def save_optimizer(self, optimizer: Optimizer, path: str, only_rank0: bool = False, **kwargs) -> None:
self.booster.save_optimizer(optimizer, path, shard=not only_rank0, **kwargs)
def load_optimizer(self, optimizer: Optimizer, path: str) -> None:
self.booster.load_optimizer(optimizer, path)
def setup_sampler(self, dataset) -> DistributedSampler:
# FIXME(cwher): this is only invoked in train_on_ray, not tested after adapt Boost API.
return DistributedSampler(dataset, 1, 0)
@abstractmethod
def save_pretrained(self,
model: nn.Module,
path: str,
only_rank0: bool = True,
tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None:
pass
@abstractmethod
def get_model_state_dict_shard(self, model: nn.Module, **config):
pass |
7,270 | test verify agent status | import json
import unittest
from test import DongTaiTestCase
from test.apiserver.test_agent_base import REGISTER_JSON, gzipdata
from rest_framework.test import APITestCase
from dongtai_common.models.user import User
class MyTestCase(DongTaiTestCase):
def test_something(self):
self.assertEqual(True, False)
def test_vul_recheck(self):
from dongtai_engine.tasks import vul_recheck
vul_recheck()
def test_search_vul_from_replay_method_pool(self):
from dongtai_engine.tasks import search_vul_from_replay_method_pool
method_id = 110
search_vul_from_replay_method_pool(method_id)
def test_search_vul_from_method_pool(self):
method_pool_id = 657160
from dongtai_engine.tasks import search_vul_from_method_pool
search_vul_from_method_pool(method_pool_id)
def test_update_agent_status(self):
from dongtai_engine.tasks import update_agent_status
update_agent_status()
def METHOD_NAME(self):
import time
from dongtai_common.models.agent import IastAgent
from dongtai_engine.tasks import is_alive
timestamp = int(time.time())
stopped_agents = IastAgent.objects.values("id").filter(is_running=0)
is_running_agents = []
for agent in stopped_agents:
agent_id = agent["id"]
if is_alive(agent_id=agent_id, timestamp=timestamp):
is_running_agents.append(agent_id)
else:
continue
if is_running_agents:
IastAgent.objects.filter(id__in=is_running_agents).update(is_running=1, is_core_running=1)
def test_update_sca(self):
from dongtai_engine.tasks import update_one_sca
update_one_sca(
2379,
"/Users/xxx/spring-boot/2.3.2.RELEASE/org.springframework:spring-beans.jar",
"a4bb5ffad5564e4a0e25955e3a40b1c6158385b2",
"org.springframework:spring-beans.jar",
"SHA-1",
)
def test_http_header(self):
from dongtai_common.models.agent import IastAgent
agents = IastAgent.objects.filter(bind_project_id=1252).values("id")
from dongtai_common.models.agent_method_pool import MethodPool
method_pools = MethodPool.objects.filter(agent_id__in=agents).values("req_header_fs")
from http.server import BaseHTTPRequestHandler
class HttpRequest(BaseHTTPRequestHandler):
def __init__(self, raw_request):
self.body = None
self.uri = None
self.params = None
from io import BytesIO
self.rfile = BytesIO(raw_request.encode())
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
self.parse_path()
self.parse_body()
self._cookie_keys = set()
@property
def cookie_keys(self):
return self._cookie_keys
def init_cookie_keys(self):
cookies = self.headers.get("cookies").split(";")
for cookie in cookies:
self._cookie_keys.add(cookie.strip().split("=")[0])
def parse_body(self):
if self.body is None:
self.body = self.rfile.read().decode("utf-8")
return self.body
def parse_path(self):
items = self.path.split("?")
self.uri = items[0]
self.params = "?".join(items[1:])
project_headers = set()
project_cookies = set()
for method_pool in method_pools:
try:
request = HttpRequest(method_pool["req_header_fs"])
project_headers = project_headers | set(request.headers.keys())
except BaseException:
pass
print(project_headers)
print(project_cookies)
class MultiUserTestCase(APITestCase):
def test_update_agent_status_multi_user(self):
from dongtai_engine.tasks import update_agent_status
user1 = User.objects.filter(pk=1).get()
assert user1 is not None
self.client.force_authenticate(user=user1)
self.register_agent(name="test1")
User(id=2, username="test", phone="123456789").save()
user2 = User.objects.filter(pk=2).get()
assert user2 is not None
self.client.force_authenticate(user=user2)
self.register_agent(name="test2")
update_agent_status()
def register_agent(self, **kwargs):
register_data = REGISTER_JSON
register_data.update(kwargs)
data = gzipdata(REGISTER_JSON)
response = self.client.post(
"http://testserver/api/v1/agent/register",
data=data,
HTTP_CONTENT_ENCODING="gzip",
content_type="application/json",
)
return json.loads(response.content)["data"]
if __name__ == "__main__":
unittest.main() |
7,271 | repr frozenset | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
import builtins
from itertools import islice
from _thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__qualname__ = getattr(user_function, '__qualname__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
if not x:
return "array('%s')" % x.typecode
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
if not x:
return 'set()'
x = _possibly_sorted(x)
return self._repr_iterable(x, level, '{', '}', self.maxset)
def METHOD_NAME(self, x, level):
if not x:
return 'frozenset()'
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset({', '})',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = builtins.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = builtins.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = builtins.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = builtins.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %#x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr |
7,272 | test serialisation | ##########################################################################
#
# Copyright (c) 2020, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import IECoreScene
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
import GafferArnold
class ArnoldColorManagerTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
# Loading
colorManager = GafferArnold.ArnoldColorManager()
colorManager.loadColorManager( "color_manager_ocio" )
for name, plugType, defaultValue in [
( "config", Gaffer.StringPlug, "" ),
( "color_space_narrow", Gaffer.StringPlug, "" ),
( "color_space_linear", Gaffer.StringPlug, "" ),
] :
self.assertIn( "config", colorManager["parameters"] )
self.assertIsInstance( colorManager["parameters"][name], plugType )
self.assertEqual( colorManager["parameters"][name].getValue(), defaultValue )
self.assertEqual( colorManager["parameters"][name].defaultValue(), defaultValue )
# Affects
options = GafferScene.StandardOptions()
colorManager["in"].setInput( options["out"] )
cs = GafferTest.CapturingSlot( colorManager.plugDirtiedSignal() )
options["enabled"].setValue( False )
self.assertEqual(
{ x[0] for x in cs if not x[0].getName().startswith( "__" ) },
{ colorManager["in"]["globals"], colorManager["in"], colorManager["out"]["globals"], colorManager["out"] }
)
del cs[:]
colorManager["parameters"]["color_space_linear"].setValue( "linear" )
self.assertEqual(
{ x[0] for x in cs if not x[0].getName().startswith( "__" ) },
{ colorManager["parameters"]["color_space_linear"], colorManager["parameters"], colorManager["out"]["globals"], colorManager["out"] }
)
# Compute
g = colorManager["out"].globals()
self.assertIn( "option:ai:color_manager", g )
cm = g["option:ai:color_manager"]
self.assertIsInstance( cm, IECoreScene.ShaderNetwork )
self.assertEqual( cm.outputShader().name, "color_manager_ocio" )
self.assertEqual( cm.outputShader().type, "ai:color_manager" )
self.assertEqual(
cm.outputShader().parameters,
IECore.CompoundData( {
"color_space_linear" : "linear",
"color_space_narrow" : "",
"config" : "",
} )
)
colorManager["enabled"].setValue( False )
self.assertNotIn( "ai:color_manager", colorManager["out"].globals() )
def METHOD_NAME( self ) :
s = Gaffer.ScriptNode()
s["m"] = GafferArnold.ArnoldColorManager()
s["m"].loadColorManager( "color_manager_ocio" )
s["m"]["parameters"]["color_space_narrow"].setValue( "narrow" )
s["m"]["parameters"]["color_space_linear"].setValue( "linear" )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertIn( "m", s2 )
self.assertIsInstance( s2["m"], GafferArnold.ArnoldColorManager )
self.assertEqual( s2["m"]["parameters"].keys(), s["m"]["parameters"].keys() )
self.assertEqual( s2["m"]["parameters"]["color_space_narrow"].getValue(), "narrow" )
self.assertEqual( s2["m"]["parameters"]["color_space_linear"].getValue(), "linear" )
self.assertEqual( s2["m"]["out"].globals(), s["m"]["out"].globals() )
if __name__ == "__main__":
unittest.main() |
7,273 | execute base | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
"""
Core execution - this is the special sauce of the default operation
"""
import logging
from tern.classes.notice import Notice
from tern.classes.package import Package
from tern.report import content
from tern.report import formats
from tern.report import errors
from tern.utils import constants
from tern.analyze import common as com
from tern.analyze.default.command_lib import command_lib
from tern.analyze.default import collect
from tern.analyze.default import bundle
from tern.analyze.default import default_common as dcom
# global logger
logger = logging.getLogger(constants.logger_name)
class Prereqs:
"""Set these values after instantiating an object, then pass it to the
functions below"""
def __init__(self):
self.fs_shell = '' # container filesystem shell
self.host_shell = '' # host shell
self.binary = '' # container filesystem indicator file
self.layer_workdir = '' # WORKDIR path
self.host_path = '' # layer rootfs path on host
self.envs = None # environment variables to set
self.listing = None # current listing
def METHOD_NAME(layer_obj, prereqs):
"""Given an ImageLayer object, find packages installed in the layer
using the default method.
1. Use command_lib's base to look up the binary to see if there
is a method to retrieve the metadata
2. If there is, invoke the scripts in a chroot environment and
process the results
3. Add the results to the ImageLayer object
It is assumed that the filesystem is prepped for execution by mounting
the filesystem in the working directory and /proc, /sys and /dev device
nodes are mounted"""
# Add notices to this layer object
origin_layer = 'Layer {}'.format(layer_obj.layer_index)
# find the binary listing
listing = command_lib.get_base_listing(prereqs.binary)
if listing:
# put generic notice about how package metadata is collected
snippet_msg = formats.invoke_for_base.format(binary=prereqs.binary)
layer_obj.origins.add_notice_to_origins(
origin_layer, Notice(snippet_msg, 'info'))
# get list of metadata by invoking scripts in chroot
logger.debug("Collecting metadata for image layer...")
pkg_dict, invoke_msg, warnings = collect.collect_list_metadata(
listing, prereqs)
# more processing for debian copyrights to get licenses
if listing.get("pkg_format") == "deb":
logger.debug("Processing Debian copyrights...")
pkg_dict["pkg_licenses"] = com.get_deb_package_licenses(
pkg_dict["copyrights"])
# add any errors and warnings to the layer's origins object
if invoke_msg:
logger.error(
"Script invocation error. Unable to collect some metadata")
layer_obj.origins.add_notice_to_origins(
origin_layer, Notice(invoke_msg, 'error'))
if warnings:
logger.warning("Some metadata may be missing")
# bundle the results into Package objects
bundle.fill_pkg_results(layer_obj, pkg_dict, listing.get("pkg_format"))
# remove extra FileData objects from the layer
com.remove_duplicate_layer_files(layer_obj)
# if there is no listing add a notice
else:
layer_obj.origins.add_notice_to_origins(
origin_layer, Notice(errors.no_listing_for_base_key.format(
listing_key=prereqs.binary), 'error'))
def execute_snippets(layer_obj, command_obj, prereqs):
"""Given in ImageLayer object, shell and binary to look up, find packages
installed in the layer using the default method:
For snippets, we will get the packages installed by the command"""
# set up a notice origin for the layer
origin_layer = 'Layer {}'.format(layer_obj.layer_index)
# find packages for the command
cmd_msg = (formats.invoke_for_snippets + '\n' +
content.print_package_invoke(command_obj.name))
layer_obj.origins.add_notice_to_origins(origin_layer, Notice(
cmd_msg, 'info'))
pkg_list = filter.get_installed_package_names(command_obj)
# collect all the dependencies for each package name
all_pkgs = []
for pkg_name in pkg_list:
pkg_invoke = command_lib.check_for_unique_package(
prereqs.listing, pkg_name)
deps, deps_msg = com.get_package_dependencies(
pkg_invoke, pkg_name, prereqs.fs_shell)
if deps_msg:
logger.warning(deps_msg)
layer_obj.origins.add_notice_to_origins(
origin_layer, Notice(deps_msg, 'error'))
all_pkgs.append(pkg_name)
all_pkgs.extend(deps)
unique_pkgs = list(set(all_pkgs))
# get package metadata for each package name
for pkg_name in unique_pkgs:
pkg = Package(pkg_name)
dcom.fill_package_metadata(pkg, pkg_invoke, prereqs.fs_shell,
layer_obj.get_layer_workdir(), prereqs.envs)
layer_obj.add_package(pkg) |
7,274 | get schema | from django.core.exceptions import ValidationError
from django.urls import reverse
from django.db.models import Q
from rest_access_policy import PermittedPkRelatedField
from rest_framework import serializers
from mathesar.api.db.permissions.query_table import QueryTableAccessPolicy
from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
from mathesar.api.exceptions.validation_exceptions.exceptions import DuplicateUIQueryInSchemaAPIException
from mathesar.models.base import Table
from mathesar.models.query import UIQuery
class BaseQuerySerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):
schema = serializers.SerializerMethodField('get_schema')
base_table = PermittedPkRelatedField(
access_policy=QueryTableAccessPolicy,
queryset=Table.current_objects.all()
)
class Meta:
model = UIQuery
fields = ['schema', 'initial_columns', 'transformations', 'base_table', 'display_names']
def METHOD_NAME(self, uiquery):
base_table = uiquery.base_table
if base_table:
return base_table.schema.id
def validate(self, attrs):
unexpected_fields = set(self.initial_data) - set(self.fields)
if unexpected_fields:
raise ValidationError(f"Unexpected field(s): {unexpected_fields}")
self._validate_uniqueness(attrs)
return attrs
def _validate_uniqueness(self, attrs):
"""
Uniqueness is only defined when both name and base_table are defined.
Would be nice to define this in terms of Django's UniqueConstraint, but that doesn't seem
possible, due to schema being a child property of base_table.
"""
name = attrs.get('name')
if name:
base_table = attrs.get('base_table')
if base_table:
schema = base_table.schema
is_duplicate_q = self._get_is_duplicate_q(name, schema)
duplicates = UIQuery.objects.filter(is_duplicate_q)
if duplicates.exists():
raise DuplicateUIQueryInSchemaAPIException(field='name')
def _get_is_duplicate_q(self, name, schema):
has_same_name_q = Q(name=name)
has_same_schema_q = Q(base_table__schema=schema)
is_duplicate_q = has_same_name_q & has_same_schema_q
is_update = self.instance is not None
if is_update:
# If this is an update, filter self out of found duplicates
id = self.instance.id
is_not_this_instance_q = ~Q(id=id)
is_duplicate_q = is_duplicate_q & is_not_this_instance_q
return is_duplicate_q
class QuerySerializer(BaseQuerySerializer):
results_url = serializers.SerializerMethodField('get_results_url')
records_url = serializers.SerializerMethodField('get_records_url')
columns_url = serializers.SerializerMethodField('get_columns_url')
class Meta:
model = UIQuery
fields = '__all__'
def get_records_url(self, obj):
if isinstance(obj, UIQuery) and obj.pk is not None:
# Only get records_url if we are serializing an existing persisted UIQuery
request = self.context['request']
return request.build_absolute_uri(reverse('query-records', kwargs={'pk': obj.pk}))
else:
return None
def get_columns_url(self, obj):
if isinstance(obj, UIQuery) and obj.pk is not None:
# Only get columns_url if we are serializing an existing persisted UIQuery
request = self.context['request']
return request.build_absolute_uri(reverse('query-columns', kwargs={'pk': obj.pk}))
else:
return None
def get_results_url(self, obj):
if isinstance(obj, UIQuery) and obj.pk is not None:
# Only get records_url if we are serializing an existing persisted UIQuery
request = self.context['request']
return request.build_absolute_uri(reverse('query-results', kwargs={'pk': obj.pk}))
else:
return None |
7,275 | is draggable | from meerk40t.core.node.node import Node
class RootNode(Node):
"""
RootNode is one of the few directly declarable node-types and serves as the base type for all Node classes.
The notifications are shallow. They refer *only* to the node in question, not to any children or parents.
"""
def __init__(self, context, **kwargs):
_ = context._
super().__init__(type="root", **kwargs)
self._root = self
self.context = context
self.listeners = []
self.add(type="branch ops", label=_("Operations"))
self.add(type="branch elems", label=_("Elements"))
self.add(type="branch reg", label=_("Regmarks"))
def __repr__(self):
return f"RootNode({str(self.context)})"
def __copy__(self):
return RootNode(self.context)
def METHOD_NAME(self):
return False
def listen(self, listener):
self.listeners.append(listener)
def unlisten(self, listener):
self.listeners.remove(listener)
def notify_created(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "node_created"):
listen.node_created(node, **kwargs)
def notify_destroyed(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "node_destroyed"):
listen.node_destroyed(node, **kwargs)
def notify_attached(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "node_attached"):
listen.node_attached(node, **kwargs)
def notify_detached(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "node_detached"):
listen.node_detached(node, **kwargs)
def notify_changed(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "node_changed"):
listen.node_changed(node, **kwargs)
def notify_selected(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "selected"):
listen.selected(node, **kwargs)
def notify_emphasized(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "emphasized"):
listen.emphasized(node, **kwargs)
def notify_targeted(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "targeted"):
listen.targeted(node, **kwargs)
def notify_highlighted(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "highlighted"):
listen.highlighted(node, **kwargs)
def notify_modified(self, node=None, **kwargs):
"""
Notifies any listeners that a value in the tree has been changed such that the matrix or other property
values have changed. But that the underlying data object itself remains intact.
@param node: node that was modified.
@param kwargs:
@return:
"""
if node is None:
node = self
self._bounds = None
for listen in self.listeners:
if hasattr(listen, "modified"):
listen.modified(node, **kwargs)
def notify_translated(self, node=None, dx=0, dy=0, **kwargs):
"""
Notifies any listeners that a value in the tree has been changed such that the matrix or other property
values have changed. But that the underlying data object itself remains intact.
@param node: node that was modified.
@param dx: translation change for node
@param dy: translation change for node
@param kwargs:
@return:
"""
if node is None:
node = self
if self._bounds is not None:
self._bounds = [
self._bounds[0] + dx,
self._bounds[1] + dy,
self._bounds[2] + dx,
self._bounds[3] + dy,
]
for listen in self.listeners:
if hasattr(listen, "translated"):
listen.translated(node, dx=dx, dy=dy) # , **kwargs)
def notify_scaled(self, node=None, sx=1, sy=1, ox=0, oy=0, **kwargs):
"""
Notifies any listeners that a value in the tree has been changed such that the matrix or other property
values have changed. But that the underlying data object itself remains intact.
@param node: node that was modified.
@param sx: scale_x value
@param sy: scale_y value
@param ox: offset_x value
@param oy: offset_y value
@param kwargs:
@return:
"""
if node is None:
node = self
if self._bounds is not None:
x0, y0, x1, y1 = self._bounds
if sx != 1.0:
d1 = x0 - ox
d2 = x1 - ox
x0 = ox + sx * d1
x1 = ox + sx * d2
if sy != 1.0:
d1 = y0 - oy
d2 = y1 - oy
y0 = oy + sy * d1
y1 = oy + sy * d2
self._bounds = [min(x0, x1), min(y0, y1), max(x0, x1), max(y0, y1)]
for listen in self.listeners:
if hasattr(listen, "scaled"):
listen.scaled(node, sx=sx, sy=sy, ox=ox, oy=oy) # , **kwargs)
def notify_altered(self, node=None, **kwargs):
"""
Notifies any listeners that a value in the tree has had its underlying data fundamentally changed and while
this may not be reflected by the properties any assumptions about the content of this node are no longer
valid.
@param node:
@param kwargs:
@return:
"""
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "altered"):
listen.altered(node, **kwargs)
def notify_expand(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "expand"):
listen.expand(node, **kwargs)
def notify_collapse(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "collapse"):
listen.collapse(node, **kwargs)
def notify_reorder(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "reorder"):
listen.reorder(node, **kwargs)
def notify_update(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "update"):
listen.update(node, **kwargs)
def notify_focus(self, node=None, **kwargs):
if node is None:
node = self
for listen in self.listeners:
if hasattr(listen, "focus"):
listen.focus(node, **kwargs) |
7,276 | run | #
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
from config_common import handler_base, utils, cfg_exceptions
from config_common.rhn_log import log_debug, log_error, die
class Handler(handler_base.HandlerBase):
_usage_options = "[options] [ config_channel ... ]"
_options_table = handler_base.HandlerBase._options_table + [
handler_base.HandlerBase._option_class(
'-t', '--topdir',
action="store",
default="./",
help="Directory all the file paths are relative to",
),
handler_base.HandlerBase._option_class(
'-c', '--channel',
action='store',
default=None,
help="List of channels the config info will be uploaded into. Channels delimited by ','.\nExample: --channel=foo,bar,baz",
),
handler_base.HandlerBase._option_class(
'', '--disable-selinux',
action='store_true',
default=None,
help="Don't upload SELinux contexts",
),
]
def METHOD_NAME(self):
log_debug(2)
#5/12/05 wregglej - 149034 changed r into a instance variable
self.r = self.repository
topdir = self.options.topdir
if not topdir:
die(7, "--topdir not specified")
if not os.path.isdir(topdir):
die(8, "--topdir specified, but `%s' not a directory" %
topdir)
topdir = utils.normalize_path(topdir)
#5/12/05 wregglej - 149034 allowing the channel name and the directory name to vary independently.
if not self.options.channel is None:
#Get the list of channels with leading and trailing whitespace removed.
channels = [x.strip() for x in self.options.channel.split(',') if x]
#Get the list of directories to upload. At this point it's the list of arguments.
dirs = self.args
elif not self.args:
#If we get to this point, then --channel wasn't used and nothing was included as arguments.
#Assumes that the directories in topdir are the ones we want to upload, and since no channels were
#specified that each directory is it's own channel.
channels = os.listdir(topdir)
dirs = None
print("No config channels specified, using %s" % channels)
else:
#At this point, --channel wasn't used but there was something included as an argument.
#The name of the channel is assumed to be the same as the name of the directory.
channels = self.args
dirs = None
#If dirs isn't None, then each directory needs to be uploaded into each channel.
if dirs:
for channel in channels:
for directory in dirs:
self.upload_config_channel(topdir, channel, directory)
#If dirs is None, then each channel is it's own channel.
else:
for channel in channels:
self.upload_config_channel(topdir, channel, channel)
def upload_config_channel(self, topdir, channel, directory_name):
if not self.r.config_channel_exists(channel):
die(6, "Error: config channel %s does not exist" % channel)
if self.options.disable_selinux:
selinux_ctx = ''
else:
selinux_ctx = None
print("Using config channel %s" % channel)
channel_dir = utils.join_path(topdir, directory_name)
if not os.path.exists(channel_dir):
die(6, "Error: channel directory %s does not exist" % channel_dir)
flist = list_files_recursive(channel_dir)
for (dirname, filenames) in flist:
assert dirname.startswith(channel_dir)
remote_dirname = dirname[len(channel_dir):]
for f in filenames:
local_file = utils.join_path(dirname, f)
remote_file = utils.join_path(remote_dirname, f)
print("Uploading %s from %s" % (remote_file, local_file))
try:
self.r.put_file(channel, remote_file, local_file, is_first_revision=0,
selinux_ctx = selinux_ctx)
except cfg_exceptions.RepositoryFilePushError:
e = sys.exc_info()[1]
log_error(e)
def is_file_or_link(dirname, basename):
return os.path.isfile(os.path.join(dirname, basename)) or \
os.path.islink(os.path.join(dirname, basename))
def list_files_recursive(d):
file_list = []
for dirname, dirs, names in os.walk(d):
file_list.append((dirname, filter(lambda x, d=dirname: is_file_or_link(d, x),
names)))
return file_list |
7,277 | authenticate | """
Classes that override default django-oauth-toolkit behavior
"""
from datetime import datetime, timedelta
from django.contrib.auth import authenticate, get_user_model
from django.db.models.signals import pre_save
from django.dispatch import receiver
from oauth2_provider.models import AccessToken
from oauth2_provider.oauth2_validators import OAuth2Validator
from oauth2_provider.scopes import get_scopes_backend
from pytz import utc
from ..models import RestrictedApplication
# pylint: disable=W0223
@receiver(pre_save, sender=AccessToken)
def on_access_token_presave(sender, instance, *args, **kwargs): # pylint: disable=unused-argument
"""
Mark AccessTokens as expired for 'restricted applications' if required.
"""
if RestrictedApplication.should_expire_access_token(instance.application):
instance.expires = datetime(1970, 1, 1, tzinfo=utc)
class EdxOAuth2Validator(OAuth2Validator):
"""
Validator class that implements edX-specific custom behavior:
* It allows users to log in with their email or username.
* It does not require users to be active before logging in.
"""
def validate_user(self, username, password, client, request, *args, **kwargs):
"""
Authenticate users, but allow inactive users (with u.is_active == False)
to authenticate.
"""
user = self.METHOD_NAME(username=username, password=password)
if user is not None:
request.user = user
return True
return False
def METHOD_NAME(self, username, password):
"""
Authenticate the user, allowing the user to identify themselves either
by username or email
"""
authenticated_user = authenticate(username=username, password=password)
if authenticated_user is None:
UserModel = get_user_model() # pylint: disable=invalid-name
try:
email_user = UserModel.objects.get(email=username)
except UserModel.DoesNotExist:
authenticated_user = None
else:
authenticated_user = authenticate(username=email_user.username, password=password)
return authenticated_user
def save_bearer_token(self, token, request, *args, **kwargs):
"""
Ensure that access tokens issued via client credentials grant are
associated with the owner of the ``Application``.
Also, update the `expires_in` value in the token response for
RestrictedApplications.
"""
grant_type = request.grant_type
user = request.user
if grant_type == 'client_credentials':
# Temporarily remove the grant type to avoid triggering the super method's code that removes request.user.
request.grant_type = None
# Ensure the tokens get associated with the correct user since DOT does not normally
# associate access tokens issued with the client_credentials grant to users.
request.user = request.client.user
super().save_bearer_token(token, request, *args, **kwargs)
is_restricted_client = self._update_token_expiry_if_restricted_client(token, request.client)
if not is_restricted_client:
self._update_token_expiry_if_overridden_in_request(token, request)
# Restore the original request attributes
request.grant_type = grant_type
request.user = user
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""
Ensure required scopes are permitted (as specified in the settings file)
"""
available_scopes = get_scopes_backend().get_available_scopes(application=client, request=request)
return set(scopes).issubset(set(available_scopes))
def _update_token_expiry_if_restricted_client(self, token, client):
"""
Update the token's expires_in value if the given client is a
RestrictedApplication and return whether the given client is restricted.
"""
# Since RestrictedApplications override the DOT defined expiry such that
# access_tokens are always expired, re-read the token from the database
# and calculate expires_in (in seconds) from the database value. This
# value should be a negative value, meaning that it is already expired.
if RestrictedApplication.should_expire_access_token(client):
access_token = AccessToken.objects.get(token=token['access_token'])
expires_in = (access_token.expires - _get_utc_now()).total_seconds()
assert expires_in < 0
token['expires_in'] = expires_in
return True
def _update_token_expiry_if_overridden_in_request(self, token, request):
"""
Update the token's expires_in value if the request specifies an
expiration value and update the expires value on the stored AccessToken
object.
This is needed since DOT's save_bearer_token method always uses
the dot_settings.ACCESS_TOKEN_EXPIRE_SECONDS value instead of applying
the requesting expiration value.
"""
expires_in = getattr(request, 'expires_in', None)
if expires_in:
access_token = AccessToken.objects.get(token=token['access_token'])
access_token.expires = _get_utc_now() + timedelta(seconds=expires_in)
access_token.save()
token['expires_in'] = expires_in
def _get_utc_now():
"""
Return current time in UTC.
"""
return datetime.utcnow().replace(tzinfo=utc) |
7,278 | test futures | import concurrent.futures
import threading
import time
import numpy as np
import pytest
import strax
SHORT_TIMEOUT = 0.1
LONG_TIMEOUT = 5 * SHORT_TIMEOUT
def reader(source, reader_sleeps=0, name=''):
result = []
for x in source:
print(f"Reader {name} got {x}, sleeping for {reader_sleeps}")
time.sleep(reader_sleeps)
print(f"Reader {name} awoke")
result.append(x)
return result
def mailbox_tester(messages,
numbers=None,
lazy=False,
reader_sleeps=0.,
max_messages=100,
expected_result=None,
timeout=SHORT_TIMEOUT,
result_timeout=LONG_TIMEOUT):
if numbers is None:
numbers = np.arange(len(messages))
if expected_result is None:
messages = np.asarray(messages)
expected_result = messages[np.argsort(numbers)]
mb = strax.Mailbox(max_messages=max_messages,
timeout=timeout,
lazy=lazy)
n_readers = 2
with concurrent.futures.ThreadPoolExecutor() as tp:
futures = [tp.submit(reader,
source=mb.subscribe(),
reader_sleeps=reader_sleeps)
for _ in range(n_readers)]
for i, _ in enumerate(messages):
mb.send(messages[i], msg_number=numbers[i])
print(f"Sent message {i}. Now {len(mb._mailbox)} ms in mailbox.")
mb.close()
# Results must be equal
for f in futures:
np.testing.assert_equal(f.result(timeout=result_timeout),
expected_result)
def test_highlevel():
"""Test highlevel mailbox API"""
for lazy in [False, True]:
n_threads_start = len(threading.enumerate())
print(f"Lazy mode: {lazy}")
mb = strax.Mailbox(lazy=lazy)
mb.add_sender(iter(list(range(10))))
def test_reader(source):
test_reader.got = r = []
for s in source:
r.append(s)
mb.add_reader(test_reader)
mb.start()
time.sleep(SHORT_TIMEOUT)
assert hasattr(test_reader, 'got')
assert test_reader.got == list(range(10))
mb.cleanup()
threads = [f'{t.name} is dead: {True^t.is_alive()}'
for t in threading.enumerate()]
assert len(threads) == n_threads_start, (
f"Not all threads died. \n Threads running are:{threads}")
def test_result_timeout():
"""Test that our mailbox tester actually times out.
(if not, the other tests might hang indefinitely if something is broken)
"""
with pytest.raises(concurrent.futures.TimeoutError):
mailbox_tester([0, 1],
numbers=[1, 2],
timeout=2 * LONG_TIMEOUT)
def test_read_timeout():
"""Subscribers time out if we cannot read for too long"""
with pytest.raises(strax.MailboxReadTimeout):
mailbox_tester([0, 1], numbers=[1, 2])
def test_write_timeout():
"""Writers time out if we cannot write for too long"""
with pytest.raises(strax.MailboxFullTimeout):
mailbox_tester([0, 1, 2, 3, 4],
max_messages=1,
reader_sleeps=LONG_TIMEOUT)
def test_reversed():
"""Mailbox sorts messages properly"""
mailbox_tester(np.arange(10),
numbers=np.arange(10)[::-1])
def test_deadlock_regression():
"""A reader thread may start after the first message is processed"""
# Test cannot run in lazy mode, cannot send without active subscriber
mb = strax.Mailbox(timeout=SHORT_TIMEOUT)
mb.send(0)
readers = [
threading.Thread(target=reader,
kwargs=dict(
source=mb.subscribe(),
name=str(i)))
for i in range(2)
]
readers[0].start()
time.sleep(SHORT_TIMEOUT)
readers[1].start()
mb.send(1)
mb.close()
for t in readers:
t.join(SHORT_TIMEOUT)
assert not t.is_alive()
def test_close_protection():
"""Cannot send messages to a closed mailbox"""
mb = strax.Mailbox()
mb.close()
with pytest.raises(strax.MailBoxAlreadyClosed):
mb.send(0)
def test_valid_msg_number():
"""Message numbers are non-negative integers"""
mb = strax.Mailbox()
with pytest.raises(strax.InvalidMessageNumber):
mb.send(0, msg_number=-1)
with pytest.raises(strax.InvalidMessageNumber):
mb.send(0, msg_number='???')
# Task for in the next test, must be global since we're using ProcessPool
# (which must pickle)
def _task(i):
time.sleep(SHORT_TIMEOUT)
return i
def METHOD_NAME():
"""Mailbox awaits futures before passing them to readers."""
# Timeouts are longer for this example,
# since they involve creating subprocesses.
exc = concurrent.futures.ProcessPoolExecutor()
futures = [exc.submit(_task, i) for i in range(3)]
mailbox_tester(futures,
expected_result=[0, 1, 2],
result_timeout=5 * LONG_TIMEOUT,
timeout=5 * LONG_TIMEOUT) |
7,279 | test list pipelines | #!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import time
from tests.unit import unittest
from boto.datapipeline import layer1
class TestDataPipeline(unittest.TestCase):
datapipeline = True
def setUp(self):
self.connection = layer1.DataPipelineConnection()
self.sample_pipeline_objects = [
{'fields': [
{'key': 'workerGroup', 'stringValue': 'MyworkerGroup'}],
'id': 'Default',
'name': 'Default'},
{'fields': [
{'key': 'startDateTime', 'stringValue': '2012-09-25T17:00:00'},
{'key': 'type', 'stringValue': 'Schedule'},
{'key': 'period', 'stringValue': '1 hour'},
{'key': 'endDateTime', 'stringValue': '2012-09-25T18:00:00'}],
'id': 'Schedule',
'name': 'Schedule'},
{'fields': [
{'key': 'type', 'stringValue': 'ShellCommandActivity'},
{'key': 'command', 'stringValue': 'echo hello'},
{'key': 'parent', 'refValue': 'Default'},
{'key': 'schedule', 'refValue': 'Schedule'}],
'id': 'SayHello',
'name': 'SayHello'}
]
self.connection.auth_service_name = 'datapipeline'
def create_pipeline(self, name, unique_id, description=None):
response = self.connection.create_pipeline(name, unique_id,
description)
pipeline_id = response['pipelineId']
self.addCleanup(self.connection.delete_pipeline, pipeline_id)
return pipeline_id
def get_pipeline_state(self, pipeline_id):
response = self.connection.describe_pipelines([pipeline_id])
for attr in response['pipelineDescriptionList'][0]['fields']:
if attr['key'] == '@pipelineState':
return attr['stringValue']
def test_can_create_and_delete_a_pipeline(self):
response = self.connection.create_pipeline('name', 'unique_id',
'description')
self.connection.delete_pipeline(response['pipelineId'])
def test_validate_pipeline(self):
pipeline_id = self.create_pipeline('name2', 'unique_id2')
self.connection.validate_pipeline_definition(
self.sample_pipeline_objects, pipeline_id)
def test_put_pipeline_definition(self):
pipeline_id = self.create_pipeline('name3', 'unique_id3')
self.connection.put_pipeline_definition(self.sample_pipeline_objects,
pipeline_id)
# We should now be able to get the pipeline definition and see
# that it matches what we put.
response = self.connection.get_pipeline_definition(pipeline_id)
objects = response['pipelineObjects']
self.assertEqual(len(objects), 3)
self.assertEqual(objects[0]['id'], 'Default')
self.assertEqual(objects[0]['name'], 'Default')
self.assertEqual(objects[0]['fields'],
[{'key': 'workerGroup', 'stringValue': 'MyworkerGroup'}])
def test_activate_pipeline(self):
pipeline_id = self.create_pipeline('name4', 'unique_id4')
self.connection.put_pipeline_definition(self.sample_pipeline_objects,
pipeline_id)
self.connection.activate_pipeline(pipeline_id)
attempts = 0
state = self.get_pipeline_state(pipeline_id)
while state != 'SCHEDULED' and attempts < 10:
time.sleep(10)
attempts += 1
state = self.get_pipeline_state(pipeline_id)
if attempts > 10:
self.fail("Pipeline did not become scheduled "
"after 10 attempts.")
objects = self.connection.describe_objects(['Default'], pipeline_id)
field = objects['pipelineObjects'][0]['fields'][0]
self.assertDictEqual(field, {'stringValue': 'COMPONENT', 'key': '@sphere'})
def METHOD_NAME(self):
pipeline_id = self.create_pipeline('name5', 'unique_id5')
pipeline_id_list = [p['id'] for p in
self.connection.list_pipelines()['pipelineIdList']]
self.assertTrue(pipeline_id in pipeline_id_list)
if __name__ == '__main__':
unittest.main() |
7,280 | get post dominators | from typing import Optional
import logging
import networkx
from ..utils.graph import compute_dominance_frontier, PostDominators, TemporaryNode
from . import Analysis
_l = logging.getLogger(name=__name__)
class CDG(Analysis):
"""
Implements a control dependence graph.
"""
def __init__(self, cfg, start=None, no_construct=False):
"""
Constructor.
:param cfg: The control flow graph upon which this control dependence graph will build
:param start: The starting point to begin constructing the control dependence graph
:param no_construct: Skip the construction step. Only used in unit-testing.
"""
self._start = start if start is not None else self.project.entry
self._cfg = cfg
self._ancestor = None
self._semi = None
self._post_dom: Optional[networkx.DiGraph] = None
self._graph: Optional[networkx.DiGraph] = None
self._normalized_cfg = None
if not no_construct:
if self._cfg is None:
# This leads to import cycles otherwise
# pylint: disable=import-outside-toplevel
from angr.analyses.cfg.cfg_emulated import CFGEmulated
self._cfg = self.project.analyses[CFGEmulated].prep()()
# FIXME: We should not use get_any_irsb in such a real setting...
self._entry = self._cfg.model.get_any_node(self._start)
self._construct()
#
# Properties
#
@property
def graph(self):
return self._graph
#
# Public methods
#
def METHOD_NAME(self):
"""
Return the post-dom tree
"""
return self._post_dom
def get_dependants(self, run):
"""
Return a list of nodes that are control dependent on the given node in the control dependence graph
"""
if run in self._graph.nodes():
return list(self._graph.successors(run))
else:
return []
def get_guardians(self, run):
"""
Return a list of nodes on whom the specific node is control dependent in the control dependence graph
"""
if run in self._graph.nodes():
return list(self._graph.predecessors(run))
else:
return []
#
# Private methods
#
def _construct(self):
"""
Construct a control dependence graph.
This implementation is based on figure 6 of paper An Efficient Method of Computing Static Single Assignment
Form by Ron Cytron, etc.
"""
if not self._cfg._model.ident.startswith("CFGEmulated"):
raise ValueError("CDG is only supported by CFGEmulated.")
self._acyclic_cfg = self._cfg.copy()
# TODO: Cycle-removing is not needed - confirm it later
# The CFG we use should be acyclic!
# self._acyclic_cfg.remove_cycles()
# Pre-process the acyclic CFG
self._pre_process_cfg()
# Construct post-dominator tree
self._pd_construct()
self._graph: networkx.DiGraph = networkx.DiGraph()
# Construct the reversed dominance frontier mapping
rdf = compute_dominance_frontier(self._normalized_cfg, self._post_dom)
for y in self._cfg.graph.nodes():
if y not in rdf:
continue
for x in rdf[y]:
self._graph.add_edge(x, y)
# self._post_process()
def _pre_process_cfg(self):
"""
Pre-process the acyclic CFG.
- Change all FakeRet edges to normal edges when necessary (e.g. the normal/expected return edge does not exist)
"""
for _, dst, data in self._acyclic_cfg.graph.edges(data=True):
if "jumpkind" in data and data["jumpkind"] == "Ijk_FakeRet":
all_edges_to_dst = self._acyclic_cfg.graph.in_edges([dst], data=True)
if not any((s, d) for s, d, da in all_edges_to_dst if da["jumpkind"] != "Ijk_FakeRet"):
# All in edges are FakeRets
# Change them to a normal edge
for _, _, data_ in all_edges_to_dst:
data_["jumpkind"] = "Ijk_Boring"
def _post_process(self):
"""
There are cases where a loop has two overlapping loop headers thanks
to the way VEX is dealing with continuous instructions. As we were
breaking the connection between the second loop header and its
successor, we shall restore them in our CDG.
"""
# TODO: Verify its correctness
loop_back_edges = self._cfg.get_loop_back_edges()
for b1, b2 in loop_back_edges:
self._graph.add_edge(b1, b2)
#
# Post-dominator tree related
#
def _pd_construct(self):
pdoms = PostDominators(self._acyclic_cfg, self._entry, successors_func=self._pd_graph_successors)
self._post_dom = pdoms.post_dom
self._pd_post_process(self._acyclic_cfg)
# Create the normalized_cfg without the annoying ContainerNodes
self._normalized_cfg = networkx.DiGraph()
for src, dst in pdoms.prepared_graph.edges():
self._normalized_cfg.add_edge(src.obj, dst.obj)
@staticmethod
def _pd_graph_successors(graph, node):
if type(node) is TemporaryNode:
# This is for testing
successors = graph.graph.successors(node)
else:
# Real CFGNode!
successors = graph.model.get_successors(node)
return successors
def _pd_post_process(self, cfg):
"""
Take care of those loop headers/tails where we manually broke their
connection to the next BBL
"""
loop_back_edges = self._cfg.get_loop_back_edges()
for b1, b2 in loop_back_edges:
# The edge between b1 and b2 is manually broken
# The post dominator of b1 should be b2 (or not?)
successors = list(self._pd_graph_successors(cfg, b1))
if len(successors) == 0:
if b2 in self._post_dom:
self._post_dom.add_edge(b1, b2)
else:
_l.debug("%s is not in post dominator dict.", b2)
from angr.analyses import AnalysesHub
AnalysesHub.register_default("CDG", CDG) |
7,281 | update data | # ***************************************************************************
# * *
# * Copyright (c) 2013-2015 - Juergen Riegel <FreeCAD@juergen-riegel.net> *
# * Copyright (c) 2017-2018 Oliver Oxtoby (CSIR) <ooxtoby@csir.co.za> *
# * Copyright (c) 2017-2018 Alfred Bogaers (CSIR) <abogaers@csir.co.za> *
# * Copyright (c) 2017-2018 Johan Heyns (CSIR) <jheyns@csir.co.za> *
# * Copyright (c) 2019-2022 Oliver Oxtoby <oliveroxtoby@gmail.com> *
# * *
# * This program is free software: you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License as *
# * published by the Free Software Foundation, either version 3 of the *
# * License, or (at your option) any later version. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this program. If not, *
# * see <https://www.gnu.org/licenses/>. *
# * *
# ***************************************************************************
import os
import FreeCAD
if FreeCAD.GuiUp:
import FreeCADGui
from CfdOF import CfdTools
from CfdOF.CfdTools import addObjectProperty
from CfdOF.Solve import TaskPanelCfdFluidProperties
def makeCfdFluidMaterial(name):
obj = FreeCAD.ActiveDocument.addObject("App::MaterialObjectPython", name)
CfdMaterial(obj) # Include default fluid properties
if FreeCAD.GuiUp:
ViewProviderCfdFluidMaterial(obj.ViewObject)
return obj
class CommandCfdFluidMaterial:
def GetResources(self):
icon_path = os.path.join(CfdTools.getModulePath(), "Gui", "Icons", "material.svg")
return {
'Pixmap': icon_path,
'MenuText': 'Add fluid properties',
'ToolTip': 'Add fluid properties'}
def IsActive(self):
return CfdTools.getActiveAnalysis() is not None
def Activated(self):
FreeCAD.Console.PrintMessage("Set fluid properties \n")
FreeCAD.ActiveDocument.openTransaction("Set CfdFluidMaterialProperty")
FreeCADGui.doCommand("from CfdOF import CfdTools")
FreeCADGui.doCommand("from CfdOF.Solve import CfdFluidMaterial")
editing_existing = False
analysis_object = CfdTools.getActiveAnalysis()
if analysis_object is None:
CfdTools.cfdErrorBox("No active analysis object found")
return False
physics_model = CfdTools.getPhysicsModel(analysis_object)
if not physics_model or physics_model.Phase == 'Single':
members = analysis_object.Group
for i in members:
if isinstance(i.Proxy, CfdMaterial):
FreeCADGui.activeDocument().setEdit(i.Name)
editing_existing = True
if not editing_existing:
FreeCADGui.doCommand(
"CfdTools.getActiveAnalysis().addObject(CfdFluidMaterial.makeCfdFluidMaterial('FluidProperties'))")
FreeCADGui.ActiveDocument.setEdit(FreeCAD.ActiveDocument.ActiveObject.Name)
class CfdMaterial:
""" CFD material properties object. Compatible with FreeCAD material object. """
def __init__(self, obj):
obj.Proxy = self
self.Type = "CfdMaterial"
self.initProperties(obj)
def initProperties(self, obj):
# Not currently used, but required for parent class
addObjectProperty(obj, "References", [], "App::PropertyLinkSubListGlobal", "Material", "List of material shapes")
# Compatibility with FEM material object
if addObjectProperty(
obj, "Category", ["Solid", "Fluid"], "App::PropertyEnumeration", "Material", "Type of material"):
obj.Category = "Fluid"
# 'Material' PropertyMap already present in MaterialObjectPython
if not obj.Material:
mats, name_path_list = CfdTools.importMaterials()
# Load a default to initialise the values for each type
obj.Material = mats[name_path_list[[np[0] for np in name_path_list].index('AirIsothermal')][1]]
elif not obj.Material.get('Type'):
mat = obj.Material
mat['Type'] = 'Isothermal'
obj.Material = mat
def onDocumentRestored(self, obj):
self.initProperties(obj)
def execute(self, obj):
return
class _CfdMaterial:
""" Backward compatibility for old class name when loading from file """
def onDocumentRestored(self, obj):
CfdMaterial(obj)
class ViewProviderCfdFluidMaterial:
def __init__(self, vobj):
vobj.Proxy = self
self.taskd = None
def getIcon(self):
icon_path = os.path.join(CfdTools.getModulePath(), "Gui", "Icons", "material.svg")
return icon_path
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def METHOD_NAME(self, obj, prop):
analysis_obj = CfdTools.getParentAnalysisObject(obj)
if analysis_obj and not analysis_obj.Proxy.loading:
analysis_obj.NeedsCaseRewrite = True
def onChanged(self, vobj, prop):
return
def setEdit(self, vobj, mode):
analysis_object = CfdTools.getParentAnalysisObject(self.Object)
if analysis_object is None:
CfdTools.cfdErrorBox("No parent analysis object found")
return False
physics_model = CfdTools.getPhysicsModel(analysis_object)
if not physics_model:
CfdTools.cfdErrorBox("Analysis object must have a physics object")
return False
import importlib
importlib.reload(TaskPanelCfdFluidProperties)
self.taskd = TaskPanelCfdFluidProperties.TaskPanelCfdFluidProperties(self.Object, physics_model)
self.taskd.obj = vobj.Object
FreeCADGui.Control.showDialog(self.taskd)
return True
def doubleClicked(self, vobj):
doc = FreeCADGui.getDocument(vobj.Object.Document)
if not doc.getInEdit():
doc.setEdit(vobj.Object.Name)
else:
FreeCAD.Console.PrintError('Task dialog already open\n')
FreeCADGui.Control.showTaskView()
return True
def unsetEdit(self, vobj, mode):
if self.taskd:
self.taskd.closing()
self.taskd = None
FreeCADGui.Control.closeDialog()
def __getstate__(self):
return None
def __setstate__(self, state):
return None
class _ViewProviderCfdFluidMaterial:
""" Backward compatibility for old class name when loading from file """
def attach(self, vobj):
new_proxy = ViewProviderCfdFluidMaterial(vobj)
new_proxy.attach(vobj)
def __getstate__(self):
return None
def __setstate__(self, state):
return None |
7,282 | test broadcast tensors | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import torch as ori_torch
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
binary_ops = [
torch.add,
torch.sub,
torch.mul,
torch.div,
torch.min,
torch.minimum,
torch.max,
torch.maximum,
torch.fmod,
torch.pow,
torch.eq,
torch.ne,
torch.gt,
torch.ge,
torch.lt,
torch.le,
torch.logical_and,
torch.logical_or,
torch.logical_xor,
]
@flow.unittest.skip_unless_1n1d()
class TestBroadcastOps(flow.unittest.TestCase):
@autotest(n=5, auto_backward=False)
def test_broadcast_elementwise(test_case):
op_idx = random(low=0, high=len(binary_ops)).to(int).value()
op = binary_ops[op_idx]
device = random_device()
x = random_tensor(ndim=4, dim0=2, dim1=2, dim2=3, dim3=4).to(device)
y = random_tensor(ndim=4, dim0=1, dim1=2, dim2=3, dim3=1).to(device)
out = op(x, y)
return out
@autotest(n=5, auto_backward=False)
def test_broadcast_matrix_row(test_case):
op_idx = random(low=0, high=len(binary_ops)).to(int).value()
op = binary_ops[op_idx]
device = random_device()
x = random_tensor(ndim=3, dim0=2, dim1=2, dim2=3).to(device)
y = random_tensor(ndim=2, dim0=2, dim1=3).to(device)
out = op(x, y)
return out
@autotest(n=5, auto_backward=False)
def test_broadcast_matrix_col(test_case):
op_idx = random(low=0, high=len(binary_ops)).to(int).value()
op = binary_ops[op_idx]
device = random_device()
x = random_tensor(ndim=3, dim0=2, dim1=2, dim2=3).to(device)
y = random_tensor(ndim=3, dim0=2, dim1=2, dim2=1).to(device)
out = op(x, y)
return out
@autotest(n=5, auto_backward=False)
def test_cpu_scalar_tensor_auto_cast(test_case):
def check_output(test_case, output):
of_res = output.oneflow
torch_res = output.pytorch
# NOTE: torch's device has no device index bug oneflow has.
# e.g. torch gets "cpu" but oneflow gets "cpu:0"
test_case.assertTrue(str(torch_res.device) in str(of_res.device))
test_case.assertTrue(
np.allclose(of_res.numpy(), torch_res.detach().cpu().numpy())
)
op_idx = random(low=0, high=len(binary_ops)).to(int).value()
op = binary_ops[op_idx]
device = random_device()
x = torch.tensor(1.0)
y = random_tensor(ndim=2, dim0=2, dim1=2).to(device)
out = op(x, y)
check_output(test_case, out)
out = op(y, x)
check_output(test_case, out)
@autotest(n=30, auto_backward=False)
def test_broadcast_scalar(test_case):
op_idx = random(low=0, high=len(binary_ops)).to(int).value()
op = binary_ops[op_idx]
device = random_device()
x = random_tensor(ndim=3, dim0=2, dim1=2, dim2=3).to(device)
out = op(x, 1)
return out
@profile(torch.add)
def profile_broadcast_matrix_row(test_case):
input0 = torch.ones(256, 1024)
input1 = torch.ones(1024)
torch.add(input0, input1)
@profile(torch.add)
def profile_broadcast_matrix_col(test_case):
input0 = torch.ones(1024, 256)
input1 = torch.ones(1024, 1)
torch.add(input0, input1)
@profile(torch.add)
def profile_broadcast_elementwise(test_case):
input0 = torch.ones(256, 1024)
input1 = torch.ones(256, 1024)
torch.add(input0, input1)
@profile(torch.add)
def profile_broadcast_scalar(test_case):
input0 = torch.ones(256, 1024)
torch.add(input0, 1)
@profile(torch.add)
def profile_broadcast_general(test_case):
input0 = torch.ones(2, 64, 8, 16, 16, 4)
input1 = torch.ones(64, 8, 1, 16, 1)
torch.add(input0, input1)
@flow.unittest.skip_unless_1n1d()
class TestBroadcastOpsOther(flow.unittest.TestCase):
def test_broadcast_shapes(test_case):
shapes = (2,), (3, 1), (1, 1, 1)
test_case.assertTrue(
flow.broadcast_shapes(*shapes), ori_torch.broadcast_shapes(*shapes),
)
@autotest(n=3)
def METHOD_NAME(test_case):
device = random_device()
x = random_tensor(ndim=2, dim0=1, dim1=4).to(device=device)
y = random_tensor(ndim=2, dim0=3, dim1=1).to(device=device)
return torch.broadcast_tensors(x, y)
def test_broadcast_to(test_case):
# see flow.expand, because broadcast_to is an alias of flow.expand
pass
if __name__ == "__main__":
unittest.main() |
7,283 | test ec2 no instances | from unittest import mock
from boto3 import resource, session
from moto import mock_ec2
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
from prowler.providers.aws.services.ssm.ssm_service import ManagedInstance
from prowler.providers.common.models import Audit_Metadata
AWS_REGION = "us-east-1"
EXAMPLE_AMI_ID = "ami-12c6146b"
AWS_ACCOUNT_NUMBER = "123456789012"
class Test_ec2_instance_managed_by_ssm_test:
def set_mocked_audit_info(self):
audit_info = AWS_Audit_Info(
session_config=None,
original_session=None,
audit_session=session.Session(
profile_name=None,
botocore_session=None,
),
audited_account=AWS_ACCOUNT_NUMBER,
audited_account_arn=f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:root",
audited_user_id=None,
audited_partition="aws",
audited_identity_arn=None,
profile=None,
profile_region=None,
credentials=None,
assumed_role_info=None,
audited_regions=["us-east-1", "eu-west-1"],
organizations_metadata=None,
audit_resources=None,
mfa_enabled=False,
audit_metadata=Audit_Metadata(
services_scanned=0,
expected_checks=[],
completed_checks=0,
audit_progress=0,
),
)
return audit_info
@mock_ec2
def METHOD_NAME(self):
from prowler.providers.aws.services.ec2.ec2_service import EC2
current_audit_info = self.set_mocked_audit_info()
ssm_client = mock.MagicMock
ssm_client.managed_instances = {}
with mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=current_audit_info,
), mock.patch(
"prowler.providers.aws.services.ssm.ssm_service.SSM",
new=ssm_client,
), mock.patch(
"prowler.providers.aws.services.ssm.ssm_client.ssm_client",
new=ssm_client,
), mock.patch(
"prowler.providers.aws.services.ec2.ec2_instance_managed_by_ssm.ec2_instance_managed_by_ssm.ec2_client",
new=EC2(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_instance_managed_by_ssm.ec2_instance_managed_by_ssm import (
ec2_instance_managed_by_ssm,
)
check = ec2_instance_managed_by_ssm()
result = check.execute()
assert len(result) == 0
@mock_ec2
def test_ec2_instance_managed_by_ssm_non_compliance_instance(self):
ssm_client = mock.MagicMock
ssm_client.managed_instances = {}
ec2 = resource("ec2", region_name=AWS_REGION)
instance = ec2.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
UserData="This is some user_data",
)[0]
ssm_client = mock.MagicMock
ssm_client.managed_instances = {}
from prowler.providers.aws.services.ec2.ec2_service import EC2
current_audit_info = self.set_mocked_audit_info()
with mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=current_audit_info,
), mock.patch(
"prowler.providers.aws.services.ssm.ssm_service.SSM",
new=ssm_client,
), mock.patch(
"prowler.providers.aws.services.ssm.ssm_client.ssm_client",
new=ssm_client,
), mock.patch(
"prowler.providers.aws.services.ec2.ec2_instance_managed_by_ssm.ec2_instance_managed_by_ssm.ec2_client",
new=EC2(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_instance_managed_by_ssm.ec2_instance_managed_by_ssm import (
ec2_instance_managed_by_ssm,
)
check = ec2_instance_managed_by_ssm()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].region == AWS_REGION
assert result[0].resource_tags is None
assert (
result[0].status_extended
== f"EC2 Instance {instance.id} is not managed by Systems Manager."
)
assert result[0].resource_id == instance.id
@mock_ec2
def test_ec2_instance_managed_by_ssm_compliance_instance(self):
ec2 = resource("ec2", region_name=AWS_REGION)
instance = ec2.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
UserData="This is some user_data",
)[0]
ssm_client = mock.MagicMock
ssm_client.managed_instances = {
instance.id: ManagedInstance(
arn=f"arn:aws:ec2:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:instance/{instance.id}",
id=instance.id,
region=AWS_REGION,
)
}
from prowler.providers.aws.services.ec2.ec2_service import EC2
current_audit_info = self.set_mocked_audit_info()
with mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=current_audit_info,
), mock.patch(
"prowler.providers.aws.services.ssm.ssm_service.SSM",
new=ssm_client,
), mock.patch(
"prowler.providers.aws.services.ssm.ssm_client.ssm_client",
new=ssm_client,
), mock.patch(
"prowler.providers.aws.services.ec2.ec2_instance_managed_by_ssm.ec2_instance_managed_by_ssm.ec2_client",
new=EC2(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_instance_managed_by_ssm.ec2_instance_managed_by_ssm import (
ec2_instance_managed_by_ssm,
)
check = ec2_instance_managed_by_ssm()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].region == AWS_REGION
assert result[0].resource_tags is None
assert (
result[0].status_extended
== f"EC2 Instance {instance.id} is managed by Systems Manager."
)
assert result[0].resource_id == instance.id |
7,284 | get pauli string | """
QAOA with finite measurement shot noise
"""
from functools import partial
import numpy as np
from scipy import optimize
import networkx as nx
import optax
import cotengra as ctg
import tensorcircuit as tc
from tensorcircuit import experimental as E
from tensorcircuit.applications.graphdata import maxcut_solution_bruteforce
K = tc.set_backend("jax")
# note this script only supports jax backend
opt_ctg = ctg.ReusableHyperOptimizer(
methods=["greedy", "kahypar"],
parallel="ray",
minimize="combo",
max_time=10,
max_repeats=128,
progbar=True,
)
tc.set_contractor("custom", optimizer=opt_ctg, preprocessing=True)
def get_graph(n, d, weights=None):
g = nx.random_regular_graph(d, n)
if weights is not None:
i = 0
for e in g.edges:
g[e[0]][e[1]]["weight"] = weights[i]
i += 1
return g
def get_exact_maxcut_loss(g):
cut, _ = maxcut_solution_bruteforce(g)
totalw = 0
for e in g.edges:
totalw += g[e[0]][e[1]].get("weight", 1)
loss = totalw - 2 * cut
return loss
def METHOD_NAME(g):
n = len(g.nodes)
pss = []
ws = []
for e in g.edges:
l = [0 for _ in range(n)]
l[e[0]] = 3
l[e[1]] = 3
pss.append(l)
ws.append(g[e[0]][e[1]].get("weight", 1))
return pss, ws
def generate_circuit(param, g, n, nlayers):
# construct the circuit ansatz
c = tc.Circuit(n)
for i in range(n):
c.H(i)
for j in range(nlayers):
c = tc.templates.blocks.QAOA_block(c, g, param[j, 0], param[j, 1])
return c
def ps2z(psi):
# ps2xyz([1, 2, 2, 0]) = {"x": [0], "y": [1, 2], "z": []}
zs = [] # no x or y for QUBO problem
for i, j in enumerate(psi):
if j == 3:
zs.append(i)
return zs
rkey = K.get_random_state(42)
def main_benchmark_suite(n, nlayers, d=3, init=None):
g = get_graph(n, d, weights=np.random.uniform(size=[int(d * n / 2)]))
loss_exact = get_exact_maxcut_loss(g)
print("exact minimal loss by max cut bruteforce: ", loss_exact)
pss, ws = METHOD_NAME(g)
if init is None:
init = np.random.normal(scale=0.1, size=[nlayers, 2])
@partial(K.jit, static_argnums=(2))
def exp_val(param, key, shots=10000):
# expectation with shot noise
# ps, w: H = \sum_i w_i ps_i
# describing the system Hamiltonian as a weighted sum of Pauli string
c = generate_circuit(param, g, n, nlayers)
loss = 0
s = c.state()
mc = tc.quantum.measurement_counts(
s,
counts=shots,
format="sample_bin",
random_generator=key,
jittable=True,
is_prob=False,
)
for ps, w in zip(pss, ws):
loss += w * tc.quantum.correlation_from_samples(ps2z(ps), mc, c._nqubits)
return K.real(loss)
@K.jit
def exp_val_analytical(param):
c = generate_circuit(param, g, n, nlayers)
loss = 0
for ps, w in zip(pss, ws):
loss += w * c.expectation_ps(z=ps2z(ps))
return K.real(loss)
# 0. Exact result double check
hm = tc.quantum.PauliStringSum2COO(
K.convert_to_tensor(pss), K.convert_to_tensor(ws), numpy=True
)
hm = K.to_dense(hm)
e, _ = np.linalg.eigh(hm)
print("exact minimal loss via eigenstate: ", e[0])
# 1.1 QAOA with numerically exact expectation: gradient free
print("QAOA without shot noise")
exp_val_analytical_sp = tc.interfaces.scipy_interface(
exp_val_analytical, shape=[nlayers, 2], gradient=False
)
r = optimize.minimize(
exp_val_analytical_sp,
init,
method="Nelder-Mead",
options={"maxiter": 5000},
)
print(r)
print("double check the value?: ", exp_val_analytical_sp(r["x"]))
# cobyla seems to have issue to given consistent x and cobyla
# 1.2 QAOA with numerically exact expectation: gradient based
exponential_decay_scheduler = optax.exponential_decay(
init_value=1e-2, transition_steps=500, decay_rate=0.9
)
opt = K.optimizer(optax.adam(exponential_decay_scheduler))
param = init # zeros stall the gradient
param = tc.array_to_tensor(init, dtype=tc.rdtypestr)
exp_val_grad_analytical = K.jit(K.value_and_grad(exp_val_analytical))
for i in range(1000):
e, gs = exp_val_grad_analytical(param)
param = opt.update(gs, param)
if i % 100 == 99:
print(e)
print("QAOA energy after gradient descent:", e)
# 2.1 QAOA with finite shot noise: gradient free
print("QAOA with shot noise")
def exp_val_wrapper(param):
global rkey
rkey, skey = K.random_split(rkey)
# maintain stateless randomness in scipy optimize interface
return exp_val(param, skey)
exp_val_sp = tc.interfaces.scipy_interface(
exp_val_wrapper, shape=[nlayers, 2], gradient=False
)
r = optimize.minimize(
exp_val_sp,
init,
method="Nelder-Mead",
options={"maxiter": 5000},
)
print(r)
# the real energy position after optimization
print("converged as: ", exp_val_analytical_sp(r["x"]))
# 2.2 QAOA with finite shot noise: gradient based
exponential_decay_scheduler = optax.exponential_decay(
init_value=1e-2, transition_steps=500, decay_rate=0.9
)
opt = K.optimizer(optax.adam(exponential_decay_scheduler))
param = tc.array_to_tensor(init, dtype=tc.rdtypestr)
exp_grad = E.parameter_shift_grad_v2(
exp_val, argnums=0, random_argnums=1, shifts=(0.001, 0.002)
)
# parameter shift doesn't directly apply in QAOA case
rkey = K.get_random_state(42)
for i in range(1000):
rkey, skey = K.random_split(rkey)
gs = exp_grad(param, skey)
param = opt.update(gs, param)
if i % 100 == 99:
rkey, skey = K.random_split(rkey)
print(exp_val(param, skey))
# the real energy position after optimization
print("converged as:", exp_val_analytical(param))
if __name__ == "__main__":
main_benchmark_suite(8, 4) |
7,285 | validate mock ran with noop | from unittest.mock import MagicMock, patch
from django.test import RequestFactory, override_settings
from sentry.middleware.integrations.classifications import (
BaseClassification,
IntegrationClassification,
PluginClassification,
)
from sentry.middleware.integrations.integration_control import IntegrationControlMiddleware
from sentry.middleware.integrations.parsers.plugin import PluginRequestParser
from sentry.middleware.integrations.parsers.slack import SlackRequestParser
from sentry.silo import SiloMode
from sentry.testutils.cases import TestCase
@patch.object(
IntegrationControlMiddleware,
"classifications",
[IntegrationClassification, PluginClassification],
)
class IntegrationControlMiddlewareTest(TestCase):
get_response = MagicMock()
middleware = IntegrationControlMiddleware(get_response=get_response)
integration_cls = IntegrationClassification(response_handler=get_response)
plugin_cls = PluginClassification(response_handler=get_response)
def setUp(self):
self.factory = RequestFactory()
def METHOD_NAME(self, request, mock):
# Ensure mock runs when middleware is called
mock.reset_mock()
response = self.middleware(request)
assert mock.called
# Ensure noop response
assert response == self.get_response()
@override_settings(SILO_MODE=SiloMode.MONOLITH)
@patch.object(
IntegrationControlMiddleware,
"_should_operate",
wraps=middleware._should_operate,
)
def test_inactive_on_monolith(self, mock_should_operate):
request = self.factory.post("/extensions/slack/webhook/")
assert mock_should_operate(request) is False
self.METHOD_NAME(request, mock_should_operate)
@override_settings(SILO_MODE=SiloMode.REGION)
@patch.object(
IntegrationControlMiddleware,
"_should_operate",
wraps=middleware._should_operate,
)
def test_inactive_on_region_silo(self, mock_should_operate):
request = self.factory.post("/extensions/slack/webhook/")
assert mock_should_operate(request) is False
self.METHOD_NAME(request, mock_should_operate)
@override_settings(SILO_MODE=SiloMode.CONTROL)
@patch.object(IntegrationClassification, "should_operate", wraps=integration_cls.should_operate)
@patch.object(PluginClassification, "should_operate", wraps=plugin_cls.should_operate)
def test_attempts_all_classifications(self, mock_plugin_operate, mock_integration_operate):
class NewClassification(BaseClassification):
pass
self.middleware.register_classifications(classifications=[NewClassification])
with patch.object(
NewClassification, "should_operate", return_value=True
) as mock_new_should_operate, patch.object(
NewClassification, "get_response"
) as mock_new_get_response:
self.middleware(self.factory.post("/"))
assert mock_integration_operate.called
assert mock_plugin_operate.called
assert mock_new_should_operate.called
assert mock_new_get_response.called
@override_settings(SILO_MODE=SiloMode.CONTROL)
@patch.object(IntegrationClassification, "should_operate", wraps=integration_cls.should_operate)
@patch.object(PluginClassification, "should_operate", wraps=plugin_cls.should_operate)
def test_attempts_ordered_classifications(self, mock_plugin_operate, mock_integration_operate):
self.middleware(self.factory.post("/extensions/slack/webhook/"))
assert mock_integration_operate.called
assert not mock_plugin_operate.called
@override_settings(SILO_MODE=SiloMode.CONTROL)
@patch.object(SlackRequestParser, "get_response")
def test_returns_parser_get_response_integration(self, mock_parser_get_response):
result = {"ok": True}
mock_parser_get_response.return_value = result
response = self.middleware(self.factory.post("/extensions/slack/webhook/"))
assert result == response
@override_settings(SILO_MODE=SiloMode.CONTROL)
@patch.object(PluginRequestParser, "get_response")
def test_returns_parser_get_response_plugin(self, mock_parser_get_response):
result = {"ok": True}
mock_parser_get_response.return_value = result
response = self.middleware(self.factory.post("/plugins/bitbucket/organizations/1/webhook/"))
assert result == response |
7,286 | test lexx decorators | # coding: utf-8
import sys
import abipy.data as abidata
import abipy.abilab as abilab
import abipy.abio.decorators as ideco
from abipy.core.testing import AbipyTest
from abipy.abio.factories import *
class DecoratorTest(AbipyTest):
def setUp(self):
# Si ebands
si_structure = abilab.Structure.from_file(abidata.cif_file("si.cif"))
self.si_ebands = ebands_input(si_structure, abidata.pseudos("14si.pspnc"), ecut=2, kppa=10)
# Reference input string. Used to test if decorators do not change the initial Input.
self.si_ebands_inpstr = str(self.si_ebands)
# NiO bands with PAW
nio_structure = abidata.structure_from_ucell("NiO")
self.nio_ebands = ebands_input(nio_structure, abidata.pseudos("28ni.paw", "8o.2.paw"),
ecut=2, pawecutdg=4, kppa=10)
self.nio_ebands_inpstr = str(self.nio_ebands)
def tearDown(self):
"""Testing if initial inputs are unchanged."""
assert all(not inp.decorators for inp in self.si_ebands)
assert self.si_ebands_inpstr == str(self.si_ebands)
assert all(not inp.decorators for inp in self.nio_ebands)
assert self.nio_ebands_inpstr == str(self.nio_ebands)
def validate_inp(self, inp, ndec=1):
# Hack needed because ecut is not in the pseudos.
inp.set_vars(ecut=3)
#v = inp.validate()
#if v.retcode != 0:
# raise RuntimeError(v.err)
#else:
# print("Valid input!")
# Test validity of individual datasets.
for dtset in inp.split_datasets():
v = dtset.abivalidate()
#assert dtset.decorators == inp.decorators
#assert len(dtset.decorators) == ndec
if v.retcode != 0:
raise RuntimeError("Wrong input. See {0}".format(v))
else:
print("Valid input!")
def test_spin_decorator(self):
"""Testing spin decorator."""
spinor_deco = ideco.SpinDecorator("spinor")
self.assert_msonable(spinor_deco)
print(spinor_deco)
new_inp = spinor_deco(self.si_ebands)
print(new_inp)
# kptopt is set to 4 if non-collinear magnetism and kptopt == 3 is not specified.
for dt in new_inp:
assert dt["nsppol"] == 1 and dt["nspinor"] == 2 and dt["kptopt"] == 4
#self.validate_inp(new_inp)
# kptopt should not be changes if it's set to 3 and non-collinear magnetism
inp_with_kpt3 = self.si_ebands.deepcopy()
inp_with_kpt3.kptopt = 3
# FIXME: Here there's a bug because get should check the global variables!
#for dt in spinor_deco(inp_with_kpt3):
# assert dt["nsppol"] == 1 and dt["nspinor"] == 2 and dt["kptopt"] == 3
def test_smearing_decorator(self):
"""Testing electronic smearing decorator."""
smearing_deco = ideco.SmearingDecorator("fermi_dirac:0.1 eV")
self.assert_msonable(smearing_deco)
new_inp = smearing_deco(self.si_ebands)
self.validate_inp(new_inp)
def test_xcdecorator(self):
"""Testing XCdecorator."""
xc_deco = ideco.XcDecorator(17)
self.assert_msonable(xc_deco)
new_inp = xc_deco(self.si_ebands)
self.validate_inp(new_inp)
def test_ldau_decorators(self):
"""Testing LdaUDecorator."""
symbols_luj = dict(Ni=dict(l=2, u=5.0, j=0.5))
ldau_deco = ideco.LdaUDecorator(symbols_luj, usepawu=1, unit="eV")
self.assert_msonable(ldau_deco)
new_inp = ldau_deco(self.nio_ebands)
new_inp.set_vars(chkprim=0, ecut=3, pawecutdg=3)
print(new_inp)
self.validate_inp(new_inp)
#assert 0
# LDA+U only if PAW
with self.assertRaises(ldau_deco.Error):
ldau_deco(self.si_ebands)
def METHOD_NAME(self):
"""Testing LexxDecorator."""
lexx_deco = ideco.LexxDecorator({"Ni": 2})
self.assert_msonable(lexx_deco)
new_inp = lexx_deco(self.nio_ebands)
new_inp.set_vars(chkprim=0, ecut=3, pawecutdg=3)
print(new_inp)
self.validate_inp(new_inp)
#assert 0
def test_new_with_decorators(self):
"""Testing AbinitInput.new_with_decorators."""
spinor_deco = ideco.SpinDecorator("spinor")
smearing_deco = ideco.SmearingDecorator("nosmearing")
new_inp = self.si_ebands.new_with_decorators(spinor_deco)
new_inp = self.si_ebands.new_with_decorators([spinor_deco, smearing_deco])
if __name__ == '__main__':
import unittest
unittest.main() |
7,287 | test load metadata from file | import json
import os
from copy import deepcopy
from pathlib import Path
from typing import Dict, Union
from pynwb.ophys import ImagingPlane, TwoPhotonSeries
from neuroconv.utils import (
dict_deep_update,
fill_defaults,
get_schema_from_hdmf_class,
get_schema_from_method_signature,
load_dict_from_file,
)
def compare_dicts(a: dict, b: dict):
a = sort_item(a)
b = sort_item(b)
assert json.dumps(a, indent=2) == json.dumps(b, indent=2)
def compare_dicts_2(a: dict, b: dict):
a = sort_item(a)
b = sort_item(b)
assert json.dumps(a) == json.dumps(b)
def sort_item(item):
if isinstance(item, list):
return [sort_item(x) for x in sorted(item, key=str)]
elif isinstance(item, dict):
return {k: sort_item(item[k]) for k in sorted(item)}
else:
return item
def test_get_schema_from_method_signature():
class A:
def __init__(self, a: int, b: float, c: Union[Path, str], d: bool, e: str = "hi", f: Dict[str, str] = None):
pass
schema = get_schema_from_method_signature(A.__init__)
correct_schema = dict(
additionalProperties=False,
properties=dict(
a=dict(type="number"),
b=dict(type="number"),
c=dict(type="string"),
d=dict(type="boolean"),
e=dict(default="hi", type="string"),
f=dict(type="object", additionalProperties={"^.*$": dict(type="string")}),
),
required=[
"a",
"b",
"c",
"d",
],
type="object",
)
assert schema == correct_schema
def test_dict_deep_update_1():
# 1. test the updating of two dicts with all keys and values as immutable elements
a1 = dict(a=1, b="hello", c=23)
b1 = dict(a=3, b="goodbye", d="compare")
result1 = dict_deep_update(a1, b1)
correct_result = dict(a=3, b="goodbye", c=23, d="compare")
compare_dicts(result1, correct_result)
def test_dict_deep_update_2():
# 2. test dict update with values as dictionaries themselves
a1 = dict(a=1, b="hello", c=23)
b1 = dict(a=3, b="goodbye", d="compare")
a2 = dict(a=1, c=a1)
b2 = dict(a=3, b="compare", c=b1)
result2 = dict_deep_update(a2, b2)
correct_result = dict(a=3, b="compare", c=dict_deep_update(a1, b1))
compare_dicts(result2, correct_result)
def test_dict_deep_update_3():
# 3.1 test merge of dicts with a key's value as a list of int/str
a1 = dict(a=1, b="hello", c=23)
b1 = dict(a=3, b="goodbye", d="compare")
a2 = dict(a=1, c=a1)
b2 = dict(a=3, b="compare", c=b1)
a3 = dict(a2, ls1=[1, 2, "test"])
b3 = dict(b2, ls1=[3, 1, "test2"], ls3=[2, 3, "test4"])
# test whether repeated values are not removed
result3_1 = dict_deep_update(a3, b3, remove_repeats=False)
correct_result = dict(dict_deep_update(a2, b2), ls1=[1, 1, 2, 3, "test", "test2"], ls3=[2, 3, "test4"])
compare_dicts(result3_1, correct_result)
# test removing repeats
result3_1 = dict_deep_update(a3, b3)
correct_result = dict(dict_deep_update(a2, b2), ls1=[1, 2, 3, "test", "test2"], ls3=[2, 3, "test4"])
compare_dicts(result3_1, correct_result)
# 3.2 test without append: in this case ls1 would be overwritten
result3_2 = dict_deep_update(a3, b3, append_list=False)
correct_result = dict(dict_deep_update(a2, b2), ls1=b3["ls1"], ls3=[2, 3, "test4"])
compare_dicts(result3_2, correct_result)
def test_dict_deep_update_4():
# 4. case of dicts with key's values as a list of dicts.
a1 = dict(a=1, b="hello", c=23)
b1 = dict(a=3, b="goodbye", d="compare")
a2 = dict(a=1, c=a1)
b2 = dict(a=3, b="compare", c=b1)
a3 = dict(a2, ls1=[1, 2, "test"])
b3 = dict(b2, ls1=[3, 1, "test2"])
c1 = dict(a1, b="world", e="string")
a4 = dict(deepcopy(a3), ls1=[a1, b1])
b4 = dict(b3, ls1=[c1])
# compare key is common in both: if the compare key is found in any of the dicts
# in the list then those dicts are dict_deep_updated.
result4 = dict_deep_update(a4, b4, compare_key="a")
correct_result = dict(dict_deep_update(a3, b3), ls1=[dict_deep_update(a1, c1), b1])
compare_dicts(result4, correct_result)
# compare key missing: if compare key is missing then the list is appended always
result4 = dict_deep_update(a4, b4, compare_key="b")
correct_result = dict(dict_deep_update(a3, b3), ls1=[a1, c1, b1])
compare_dicts(result4, correct_result)
def test_fill_defaults():
schema = dict(
additionalProperties=False,
properties=dict(
a=dict(type="number"),
b=dict(type="number"),
c=dict(type="string"),
d=dict(type="boolean"),
e=dict(default="hi", type="string"),
),
required=[
"a",
"b",
"c",
"d",
],
type="object",
)
defaults = dict(a=3, c="bye", e="new")
fill_defaults(schema, defaults)
correct_new_schema = dict(
additionalProperties=False,
properties=dict(
a=dict(type="number", default=3),
b=dict(type="number"),
c=dict(type="string", default="bye"),
d=dict(type="boolean"),
e=dict(default="new", type="string"),
),
required=[
"a",
"b",
"c",
"d",
],
type="object",
)
compare_dicts(schema, correct_new_schema)
def METHOD_NAME():
m0 = dict(
NWBFile=dict(
experimenter="Mr Tester",
identifier="abc123",
institution="My University",
lab="My lab",
session_description="testing conversion tools software",
session_start_time="2020-04-15T10:00:00+00:00",
),
Subject=dict(
description="ADDME",
sex="M",
species="ADDME",
subject_id="sid000",
weight="10g",
date_of_birth="2020-04-07T00:15:00+00:00",
),
Ecephys=dict(
Device=[dict(name="device_ecephys")],
ElectricalSeries=[
dict(description="ADDME", name="ElectricalSeries", rate=10.0, starting_time=0.0, conversion=1.0)
],
ElectrodeGroup=[
dict(description="ADDME", device="device_ecephys", location="ADDME", name="ElectrodeGroup")
],
),
)
yaml_file_path = os.path.join(os.path.dirname(__file__), "metadata_tests.yaml")
json_file_path = os.path.join(os.path.dirname(__file__), "metadata_tests.json")
m1 = load_dict_from_file(file_path=yaml_file_path)
compare_dicts_2(m0, m1)
m2 = load_dict_from_file(file_path=json_file_path)
compare_dicts_2(m0, m2)
def test_get_schema_from_ImagingPlane_array_type():
imaging_plane_schema = get_schema_from_hdmf_class(ImagingPlane)
assert "origin_coords" in imaging_plane_schema["properties"]
assert "grid_spacing" in imaging_plane_schema["properties"]
def test_get_schema_from_TwoPhotonSeries_array_type():
two_photon_series_schema = get_schema_from_hdmf_class(TwoPhotonSeries)
assert "data" not in two_photon_series_schema["properties"]
assert "timestamps" not in two_photon_series_schema["properties"]
assert "external_file" not in two_photon_series_schema["properties"] |
7,288 | get model version | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetModelVersionResult',
'AwaitableGetModelVersionResult',
'get_model_version',
'get_model_version_output',
]
@pulumi.output_type
class GetModelVersionResult:
"""
Azure Resource Manager resource envelope.
"""
def __init__(__self__, id=None, model_version_properties=None, name=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if model_version_properties and not isinstance(model_version_properties, dict):
raise TypeError("Expected argument 'model_version_properties' to be a dict")
pulumi.set(__self__, "model_version_properties", model_version_properties)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="modelVersionProperties")
def model_version_properties(self) -> 'outputs.ModelVersionResponse':
"""
[Required] Additional attributes of the entity.
"""
return pulumi.get(self, "model_version_properties")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetModelVersionResult(GetModelVersionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetModelVersionResult(
id=self.id,
model_version_properties=self.model_version_properties,
name=self.name,
system_data=self.system_data,
type=self.type)
def METHOD_NAME(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
version: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetModelVersionResult:
"""
Azure Resource Manager resource envelope.
:param str name: Container name. This is case-sensitive.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str version: Version identifier. This is case-sensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['version'] = version
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20230601preview:getModelVersion', __args__, opts=opts, typ=GetModelVersionResult).value
return AwaitableGetModelVersionResult(
id=pulumi.get(__ret__, 'id'),
model_version_properties=pulumi.get(__ret__, 'model_version_properties'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_model_version_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetModelVersionResult]:
"""
Azure Resource Manager resource envelope.
:param str name: Container name. This is case-sensitive.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str version: Version identifier. This is case-sensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
... |
7,289 | data actions | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'PermissionResponse',
]
@pulumi.output_type
class PermissionResponse(dict):
"""
Role definition permissions.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "conditionVersion":
suggest = "condition_version"
elif key == "dataActions":
suggest = "data_actions"
elif key == "notActions":
suggest = "not_actions"
elif key == "notDataActions":
suggest = "not_data_actions"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PermissionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PermissionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PermissionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
condition: str,
condition_version: str,
actions: Optional[Sequence[str]] = None,
METHOD_NAME: Optional[Sequence[str]] = None,
not_actions: Optional[Sequence[str]] = None,
not_data_actions: Optional[Sequence[str]] = None):
"""
Role definition permissions.
:param str condition: The conditions on the role definition. This limits the resources it can be assigned to. e.g.: @Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName] StringEqualsIgnoreCase 'foo_storage_container'
:param str condition_version: Version of the condition. Currently the only accepted value is '2.0'
:param Sequence[str] actions: Allowed actions.
:param Sequence[str] data_actions: Allowed Data actions.
:param Sequence[str] not_actions: Denied actions.
:param Sequence[str] not_data_actions: Denied Data actions.
"""
pulumi.set(__self__, "condition", condition)
pulumi.set(__self__, "condition_version", condition_version)
if actions is not None:
pulumi.set(__self__, "actions", actions)
if METHOD_NAME is not None:
pulumi.set(__self__, "data_actions", METHOD_NAME)
if not_actions is not None:
pulumi.set(__self__, "not_actions", not_actions)
if not_data_actions is not None:
pulumi.set(__self__, "not_data_actions", not_data_actions)
@property
@pulumi.getter
def condition(self) -> str:
"""
The conditions on the role definition. This limits the resources it can be assigned to. e.g.: @Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName] StringEqualsIgnoreCase 'foo_storage_container'
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter(name="conditionVersion")
def condition_version(self) -> str:
"""
Version of the condition. Currently the only accepted value is '2.0'
"""
return pulumi.get(self, "condition_version")
@property
@pulumi.getter
def actions(self) -> Optional[Sequence[str]]:
"""
Allowed actions.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter(name="dataActions")
def METHOD_NAME(self) -> Optional[Sequence[str]]:
"""
Allowed Data actions.
"""
return pulumi.get(self, "data_actions")
@property
@pulumi.getter(name="notActions")
def not_actions(self) -> Optional[Sequence[str]]:
"""
Denied actions.
"""
return pulumi.get(self, "not_actions")
@property
@pulumi.getter(name="notDataActions")
def not_data_actions(self) -> Optional[Sequence[str]]:
"""
Denied Data actions.
"""
return pulumi.get(self, "not_data_actions")
|
7,290 | landmark pred | import numpy as np
def clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries.
:param boxes: [N, 4* num_classes]
:param im_shape: tuple of 2
:return: [N, 4* num_classes]
"""
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
# def bbox_overlaps(boxes, query_boxes):
# return bbox_overlaps_cython(boxes, query_boxes)
def bbox_overlaps(boxes, query_boxes):
return bbox_overlaps_py(boxes, query_boxes)
def bbox_overlaps_py(boxes, query_boxes):
"""
determine overlaps between boxes and query_boxes
:param boxes: n * 4 bounding boxes
:param query_boxes: k * 4 bounding boxes
:return: overlaps: n * k overlaps
"""
n_ = boxes.shape[0]
k_ = query_boxes.shape[0]
overlaps = np.zeros((n_, k_), dtype=np.float)
for k in range(k_):
query_box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1)
for n in range(n_):
iw = min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1
if iw > 0:
ih = min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1
if ih > 0:
box_area = (boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1)
all_area = float(box_area + query_box_area - iw * ih)
overlaps[n, k] = iw * ih / all_area
return overlaps
def nonlinear_transform(ex_rois, gt_rois):
"""
compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [N, 4]
:param gt_rois: [N, 4]
:return: [N, 4]
"""
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0)
gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0)
targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-14)
targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-14)
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
if gt_rois.shape[1] <= 4:
targets = np.vstack(
(targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
else:
targets = [targets_dx, targets_dy, targets_dw, targets_dh]
# if config.USE_BLUR:
# for i in range(4, gt_rois.shape[1]):
# t = gt_rois[:,i]
# targets.append(t)
targets = np.vstack(targets).transpose()
return targets
def landmark_transform(ex_rois, gt_rois):
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)
targets = []
for i in range(gt_rois.shape[1]):
for j in range(gt_rois.shape[2]):
# if not config.USE_OCCLUSION and j==2:
# continue
if j == 2:
continue
if j == 0: # w
target = (gt_rois[:, i, j] - ex_ctr_x) / (ex_widths + 1e-14)
elif j == 1: # h
target = (gt_rois[:, i, j] - ex_ctr_y) / (ex_heights + 1e-14)
else: # visibile
target = gt_rois[:, i, j]
targets.append(target)
targets = np.vstack(targets).transpose()
return targets
def nonlinear_pred(boxes, box_deltas):
"""
Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
:param boxes: !important [N 4]
:param box_deltas: [N, 4 * num_classes]
:return: [N 4 * num_classes]
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * (pred_w - 1.0)
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * (pred_h - 1.0)
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * (pred_w - 1.0)
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * (pred_h - 1.0)
return pred_boxes
def METHOD_NAME(boxes, landmark_deltas):
if boxes.shape[0] == 0:
return np.zeros((0, landmark_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
preds = []
for i in range(landmark_deltas.shape[1]):
if i % 2 == 0:
pred = (landmark_deltas[:, i] * widths + ctr_x)
else:
pred = (landmark_deltas[:, i] * heights + ctr_y)
preds.append(pred)
preds = np.vstack(preds).transpose()
return preds
def iou_transform(ex_rois, gt_rois):
""" return bbox targets, IoU loss uses gt_rois as gt """
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
return gt_rois
def iou_pred(boxes, box_deltas):
"""
Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
:param boxes: !important [N 4]
:param box_deltas: [N, 4 * num_classes]
:return: [N 4 * num_classes]
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
dx1 = box_deltas[:, 0::4]
dy1 = box_deltas[:, 1::4]
dx2 = box_deltas[:, 2::4]
dy2 = box_deltas[:, 3::4]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = dx1 + x1[:, np.newaxis]
# y1
pred_boxes[:, 1::4] = dy1 + y1[:, np.newaxis]
# x2
pred_boxes[:, 2::4] = dx2 + x2[:, np.newaxis]
# y2
pred_boxes[:, 3::4] = dy2 + y2[:, np.newaxis]
return pred_boxes
# define bbox_transform and bbox_pred
bbox_transform = nonlinear_transform
bbox_pred = nonlinear_pred |
7,291 | should stop | import asyncio
import time
from typing import Any, Awaitable, Callable, Optional
from .. import background_tasks, globals # pylint: disable=redefined-builtin
from ..binding import BindableProperty
from ..slot import Slot
class Timer:
active = BindableProperty()
interval = BindableProperty()
def __init__(self,
interval: float,
callback: Callable[..., Any], *,
active: bool = True,
once: bool = False,
) -> None:
"""Timer
One major drive behind the creation of NiceGUI was the necessity to have a simple approach to update the interface in regular intervals,
for example to show a graph with incoming measurements.
A timer will execute a callback repeatedly with a given interval.
:param interval: the interval in which the timer is called (can be changed during runtime)
:param callback: function or coroutine to execute when interval elapses
:param active: whether the callback should be executed or not (can be changed during runtime)
:param once: whether the callback is only executed once after a delay specified by `interval` (default: `False`)
"""
self.interval = interval
self.callback: Optional[Callable[..., Any]] = callback
self.active = active
self.slot: Optional[Slot] = globals.get_slot()
self._is_canceled: bool = False
coroutine = self._run_once if once else self._run_in_loop
if globals.state == globals.State.STARTED:
background_tasks.create(coroutine(), name=str(callback))
else:
globals.app.on_startup(coroutine)
def activate(self) -> None:
"""Activate the timer."""
assert not self._is_canceled, 'Cannot activate a canceled timer'
self.active = True
def deactivate(self) -> None:
"""Deactivate the timer."""
self.active = False
def cancel(self) -> None:
"""Cancel the timer."""
self._is_canceled = True
async def _run_once(self) -> None:
try:
if not await self._connected():
return
assert self.slot is not None
with self.slot:
await asyncio.sleep(self.interval)
if self.active and not self.METHOD_NAME():
await self._invoke_callback()
finally:
self._cleanup()
async def _run_in_loop(self) -> None:
try:
if not await self._connected():
return
assert self.slot is not None
with self.slot:
while not self.METHOD_NAME():
try:
start = time.time()
if self.active:
await self._invoke_callback()
dt = time.time() - start
await asyncio.sleep(self.interval - dt)
except asyncio.CancelledError:
break
except Exception as e:
globals.handle_exception(e)
await asyncio.sleep(self.interval)
finally:
self._cleanup()
async def _invoke_callback(self) -> None:
try:
assert self.callback is not None
result = self.callback()
if isinstance(result, Awaitable):
await result
except Exception as e:
globals.handle_exception(e)
async def _connected(self, timeout: float = 60.0) -> bool:
"""Wait for the client connection before the timer callback can be allowed to manipulate the state.
See https://github.com/zauberzeug/nicegui/issues/206 for details.
Returns True if the client is connected, False if the client is not connected and the timer should be cancelled.
"""
assert self.slot is not None
if self.slot.parent.client.shared:
return True
# ignore served pages which do not reconnect to backend (e.g. monitoring requests, scrapers etc.)
try:
await self.slot.parent.client.connected(timeout=timeout)
return True
except TimeoutError:
globals.log.error(f'Timer cancelled because client is not connected after {timeout} seconds')
return False
def METHOD_NAME(self) -> bool:
assert self.slot is not None
return (
self.slot.parent.is_deleted or
self.slot.parent.client.id not in globals.clients or
self._is_canceled or
globals.state in {globals.State.STOPPING, globals.State.STOPPED}
)
def _cleanup(self) -> None:
self.slot = None
self.callback = None |
7,292 | test load correct file structure | import torchvision.transforms as transforms
import pytest
from pathlib import Path
from common.dataset import loader_from_zipped, errorMessage
from common.constants import UNZIPPED_DIR_NAME
from filecmp import dircmp
import torch
import os
different_folder = "zip_files/different_folders.Zip"
empty_folder = "zip_files/empty.zip"
double_zipped = "zip_files/double_zipped.zip"
not_zip = "zip_files/not_zip"
num_classes = "zip_files/num_classes.zip"
dir_in = "" if (os.getcwd()).split("\\")[-1].split("/")[-1] == "tests" else "tests"
@pytest.mark.parametrize(
"filepath,expected",
[
(not_zip, errorMessage.CHECK_FILE_STRUCTURE.value),
(different_folder, errorMessage.CHECK_FILE_STRUCTURE.value),
],
)
def test_invalid_file_structure(filepath, expected):
# filepath = str(Path(filepath).parent.absolute()) + "/" + filepath.split("/")[-1]
filepath = os.path.join(dir_in, filepath)
print("os.cwd(): ", os.getcwd())
print("dir_in: ", dir_in)
print("filepath: ", filepath)
with pytest.raises(ValueError) as e:
loader_from_zipped(filepath)
assert str(e.value) == expected
@pytest.mark.parametrize(
"filepath, relative_output_path",
[(double_zipped, f"{UNZIPPED_DIR_NAME}/input/double_zipped")],
)
def METHOD_NAME(filepath, relative_output_path):
try:
expected_filename = filepath.split("/")[-1]
filepath = os.path.join(dir_in, "zip_files")
filepath = filepath + "/" + expected_filename
loader_from_zipped(
filepath,
train_transform=[
transforms.GaussianBlur(kernel_size=3),
transforms.ToTensor(),
],
)
# print("passed the loader from zipped function without exception")
expected_filename = expected_filename.replace(".zip", "")
print("expected/{}".format(expected_filename))
print(relative_output_path)
print(os.path.exists("expected/{}".format(expected_filename)))
print(os.path.exists(relative_output_path))
dcmf2 = os.path.join(dir_in, "expected")
dcmp = dircmp(relative_output_path, dcmf2 + "/" + expected_filename)
assert len(dcmp.diff_files) == 0
except Exception:
assert False
@pytest.mark.parametrize(
"train_transform, valid_transform, filepath",
[
(None, [transforms.GaussianBlur(kernel_size=3)], "double_zipped.zip"),
(
[
transforms.GaussianBlur(kernel_size=3),
transforms.RandomHorizontalFlip(p=0.4),
transforms.ToTensor(),
],
None,
double_zipped,
),
(
[transforms.RandomHorizontalFlip(p=0.9), transforms.ToTensor()],
[
transforms.RandomVerticalFlip(p=0.3),
transforms.GaussianBlur(kernel_size=3),
transforms.ToTensor(),
],
double_zipped,
)
# ([transforms.Normalize(0, 1), transforms.ToTensor()], None, double_zipped)
],
)
def check_diff_transforms(train_transform, valid_transform, filepath):
filepath = os.path.join(dir_in, filepath)
train_loader, valid_loader = loader_from_zipped(
filepath, test_transform=valid_transform, train_transform=train_transform
)
for data, index in train_loader:
if index == 0:
train_data_val = data
print(train_data_val)
break
for data, index in valid_loader:
if index == 0:
valid_data_val = data
break
assert not torch.equal(train_data_val, valid_data_val)
@pytest.mark.parametrize(
"filepath, tensor_array",
[
(double_zipped, [transforms.Normalize(0, 1)]), ## Applying Tensor only
(
double_zipped,
[transforms.RandomVerticalFlip(p=0.3), transforms.ToTensor()],
),
(
double_zipped,
[
transforms.ToTensor(),
transforms.RandomChoice(transforms=[transforms.Resize((256, 256))]),
],
),
],
)
def check_ordered_transforms(filepath, tensor_array):
with pytest.raises(ValueError) as e:
train_loader, valid_loader = loader_from_zipped(
filepath, train_transform=tensor_array
)
assert str(e.value) == errorMessage.CHECK_TRANSFORM.value |
7,293 | test create type | from textwrap import dedent
import pytest
import strawberry
from strawberry.annotation import StrawberryAnnotation
from strawberry.field import StrawberryField
from strawberry.tools import create_type
from strawberry.type import get_object_definition
def METHOD_NAME():
@strawberry.field
def name() -> str:
return "foo"
MyType = create_type("MyType", [name], description="This is a description")
definition = get_object_definition(MyType, strict=True)
assert definition.name == "MyType"
assert definition.description == "This is a description"
assert definition.is_input is False
assert len(definition.fields) == 1
assert definition.fields[0].python_name == "name"
assert definition.fields[0].graphql_name is None
assert definition.fields[0].type == str
def test_create_type_extend_and_directives():
@strawberry.field
def name() -> str:
return "foo"
MyType = create_type(
"MyType",
[name],
description="This is a description",
extend=True,
directives=[object()],
)
definition = get_object_definition(MyType, strict=True)
assert definition.name == "MyType"
assert definition.description == "This is a description"
assert definition.is_input is False
assert definition.extend is True
assert len(list(definition.directives)) == 1
assert len(definition.fields) == 1
assert definition.fields[0].python_name == "name"
assert definition.fields[0].graphql_name is None
assert definition.fields[0].type == str
def test_create_input_type():
name = StrawberryField(
python_name="name", type_annotation=StrawberryAnnotation(str)
)
MyType = create_type(
"MyType", [name], is_input=True, description="This is a description"
)
definition = get_object_definition(MyType, strict=True)
assert definition.name == "MyType"
assert definition.description == "This is a description"
assert definition.is_input
assert len(definition.fields) == 1
assert definition.fields[0].python_name == "name"
assert definition.fields[0].graphql_name is None
assert definition.fields[0].type == str
def test_create_interface_type():
name = StrawberryField(
python_name="name", type_annotation=StrawberryAnnotation(str)
)
MyType = create_type(
"MyType", [name], is_interface=True, description="This is a description"
)
definition = get_object_definition(MyType, strict=True)
assert definition.name == "MyType"
assert definition.description == "This is a description"
assert definition.is_input is False
assert definition.is_interface
assert len(definition.fields) == 1
assert definition.fields[0].python_name == "name"
assert definition.fields[0].graphql_name is None
assert definition.fields[0].type == str
def test_create_variable_type():
def get_name() -> str:
return "foo"
name = strawberry.field(name="name", resolver=get_name)
MyType = create_type("MyType", [name])
definition = get_object_definition(MyType, strict=True)
assert len(definition.fields) == 1
assert definition.fields[0].python_name == "get_name"
assert definition.fields[0].graphql_name == "name"
assert definition.fields[0].type == str
def test_create_type_empty_list():
with pytest.raises(ValueError):
create_type("MyType", [])
def test_create_type_field_no_name():
name = strawberry.field()
with pytest.raises(ValueError):
create_type("MyType", [name])
def test_create_type_field_invalid():
with pytest.raises(TypeError):
create_type("MyType", [strawberry.type()])
def test_create_mutation_type():
@strawberry.type
class User:
username: str
@strawberry.mutation
def make_user(info, username: str) -> User:
return User(username=username)
Mutation = create_type("Mutation", [make_user])
definition = get_object_definition(Mutation, strict=True)
assert len(definition.fields) == 1
assert definition.fields[0].python_name == "make_user"
assert definition.fields[0].graphql_name is None
assert definition.fields[0].type == User
def test_create_mutation_type_with_params():
@strawberry.type
class User:
username: str
@strawberry.mutation(name="makeNewUser", description="Make a new user")
def make_user(info, username: str) -> User:
return User(username=username)
Mutation = create_type("Mutation", [make_user])
definition = get_object_definition(Mutation, strict=True)
assert len(definition.fields) == 1
assert definition.fields[0].python_name == "make_user"
assert definition.fields[0].graphql_name == "makeNewUser"
assert definition.fields[0].type == User
assert definition.fields[0].description == "Make a new user"
def test_create_schema():
@strawberry.type
class User:
id: strawberry.ID
@strawberry.field
def get_user_by_id(info, id: strawberry.ID) -> User:
return User(id=id)
Query = create_type("Query", [get_user_by_id])
schema = strawberry.Schema(query=Query)
sdl = """
type Query {
getUserById(id: ID!): User!
}
type User {
id: ID!
}
"""
assert dedent(sdl).strip() == str(schema)
result = schema.execute_sync(
"""
{
getUserById(id: "TEST") {
id
}
}
"""
)
assert not result.errors
assert result.data == {"getUserById": {"id": "TEST"}} |
7,294 | iq to bytes | from collections import OrderedDict
from multiprocessing import Array
from multiprocessing.connection import Connection
import numpy as np
from urh.dev.native.Device import Device
from urh.dev.native.lib import usrp
class USRP(Device):
DEVICE_METHODS = Device.DEVICE_METHODS.copy()
DEVICE_METHODS.update({"SET_SUBDEVICE": "set_subdevice", Device.Command.SET_ANTENNA_INDEX.name: "set_antenna"})
SYNC_RX_CHUNK_SIZE = 16384
SYNC_TX_CHUNK_SIZE = 16384 * 2
CONTINUOUS_TX_CHUNK_SIZE = -1 # take everything from queue
DEVICE_LIB = usrp
ASYNCHRONOUS = False
DATA_TYPE = np.float32
@classmethod
def get_device_list(cls):
return usrp.find_devices("")
@classmethod
def adapt_num_read_samples_to_sample_rate(cls, sample_rate):
cls.SYNC_RX_CHUNK_SIZE = 16384 * int(sample_rate / 1e6)
@classmethod
def setup_device(cls, ctrl_connection: Connection, device_identifier):
ret = usrp.open(device_identifier)
if device_identifier:
ctrl_connection.send("OPEN ({}):{}".format(device_identifier, ret))
else:
ctrl_connection.send("OPEN:" + str(ret))
success = ret == 0
if success:
device_repr = usrp.get_device_representation()
ctrl_connection.send(device_repr)
else:
ctrl_connection.send(usrp.get_last_error())
return success
@classmethod
def init_device(cls, ctrl_connection: Connection, is_tx: bool, parameters: OrderedDict):
usrp.set_tx(is_tx)
success = super().init_device(ctrl_connection, is_tx, parameters)
if success:
ctrl_connection.send("Current antenna is {} (possible antennas: {})".format(usrp.get_antenna(),
", ".join(usrp.get_antennas())))
return success
@classmethod
def shutdown_device(cls, ctrl_connection, is_tx: bool):
usrp.stop_stream()
usrp.destroy_stream()
ret = usrp.close()
ctrl_connection.send("CLOSE:" + str(ret))
return True
@classmethod
def prepare_sync_receive(cls, ctrl_connection: Connection):
ctrl_connection.send("Initializing stream...")
usrp.setup_stream()
return usrp.start_stream(cls.SYNC_RX_CHUNK_SIZE)
@classmethod
def receive_sync(cls, data_conn: Connection):
usrp.recv_stream(data_conn, cls.SYNC_RX_CHUNK_SIZE)
@classmethod
def prepare_sync_send(cls, ctrl_connection: Connection):
ctrl_connection.send("Initializing stream...")
usrp.setup_stream()
ret = usrp.start_stream(0)
ctrl_connection.send("Initialize stream:{0}".format(ret))
return ret
@classmethod
def send_sync(cls, data):
usrp.send_stream(data)
def __init__(self, center_freq, sample_rate, bandwidth, gain, if_gain=1, baseband_gain=1,
resume_on_full_receive_buffer=False):
super().__init__(center_freq=center_freq, sample_rate=sample_rate, bandwidth=bandwidth,
gain=gain, if_gain=if_gain, baseband_gain=baseband_gain,
resume_on_full_receive_buffer=resume_on_full_receive_buffer)
self.success = 0
self.error_codes = {4711: "Antenna index not supported on this device"}
self.subdevice = ""
def set_device_gain(self, gain):
super().set_device_gain(gain * 0.01)
@property
def has_multi_device_support(self):
return True
@property
def device_parameters(self):
return OrderedDict([
("SET_SUBDEVICE", self.subdevice),
(self.Command.SET_ANTENNA_INDEX.name, self.antenna_index),
(self.Command.SET_FREQUENCY.name, self.frequency),
(self.Command.SET_SAMPLE_RATE.name, self.sample_rate),
(self.Command.SET_BANDWIDTH.name, self.bandwidth),
(self.Command.SET_RF_GAIN.name, self.gain * 0.01),
("identifier", self.device_serial),
])
@staticmethod
def bytes_to_iq(buffer):
return np.frombuffer(buffer, dtype=np.float32).reshape((-1, 2), order="C")
@staticmethod
def METHOD_NAME(samples: np.ndarray):
arr = Array("f", 2 * len(samples), lock=False)
numpy_view = np.frombuffer(arr, dtype=np.float32)
numpy_view[:] = samples.flatten(order="C")
return arr |
7,295 | prepare | """Embedded workers for integration tests."""
import logging
import os
import threading
from contextlib import contextmanager
from typing import Any, Iterable, Union # noqa
import celery.worker.consumer # noqa
from celery import Celery, worker # noqa
from celery.result import _set_task_join_will_block, allow_join_result
from celery.utils.dispatch import Signal
from celery.utils.nodenames import anon_nodename
WORKER_LOGLEVEL = os.environ.get('WORKER_LOGLEVEL', 'error')
test_worker_starting = Signal(
name='test_worker_starting',
providing_args={},
)
test_worker_started = Signal(
name='test_worker_started',
providing_args={'worker', 'consumer'},
)
test_worker_stopped = Signal(
name='test_worker_stopped',
providing_args={'worker'},
)
class TestWorkController(worker.WorkController):
"""Worker that can synchronize on being fully started."""
logger_queue = None
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
self._on_started = threading.Event()
super().__init__(*args, **kwargs)
if self.pool_cls.__module__.split('.')[-1] == 'prefork':
from billiard import Queue
self.logger_queue = Queue()
self.pid = os.getpid()
try:
from tblib import pickling_support
pickling_support.install()
except ImportError:
pass
# collect logs from forked process.
# XXX: those logs will appear twice in the live log
self.queue_listener = logging.handlers.QueueListener(self.logger_queue, logging.getLogger())
self.queue_listener.start()
class QueueHandler(logging.handlers.QueueHandler):
def METHOD_NAME(self, record):
record.from_queue = True
# Keep origin record.
return record
def handleError(self, record):
if logging.raiseExceptions:
raise
def start(self):
if self.logger_queue:
handler = self.QueueHandler(self.logger_queue)
handler.addFilter(lambda r: r.process != self.pid and not getattr(r, 'from_queue', False))
logger = logging.getLogger()
logger.addHandler(handler)
return super().start()
def on_consumer_ready(self, consumer):
# type: (celery.worker.consumer.Consumer) -> None
"""Callback called when the Consumer blueprint is fully started."""
self._on_started.set()
test_worker_started.send(
sender=self.app, worker=self, consumer=consumer)
def ensure_started(self):
# type: () -> None
"""Wait for worker to be fully up and running.
Warning:
Worker must be started within a thread for this to work,
or it will block forever.
"""
self._on_started.wait()
@contextmanager
def start_worker(
app, # type: Celery
concurrency=1, # type: int
pool='solo', # type: str
loglevel=WORKER_LOGLEVEL, # type: Union[str, int]
logfile=None, # type: str
perform_ping_check=True, # type: bool
ping_task_timeout=10.0, # type: float
shutdown_timeout=10.0, # type: float
**kwargs # type: Any
):
# type: (...) -> Iterable
"""Start embedded worker.
Yields:
celery.app.worker.Worker: worker instance.
"""
test_worker_starting.send(sender=app)
worker = None
try:
with _start_worker_thread(app,
concurrency=concurrency,
pool=pool,
loglevel=loglevel,
logfile=logfile,
perform_ping_check=perform_ping_check,
shutdown_timeout=shutdown_timeout,
**kwargs) as worker:
if perform_ping_check:
from .tasks import ping
with allow_join_result():
assert ping.delay().get(timeout=ping_task_timeout) == 'pong'
yield worker
finally:
test_worker_stopped.send(sender=app, worker=worker)
@contextmanager
def _start_worker_thread(app,
concurrency=1,
pool='solo',
loglevel=WORKER_LOGLEVEL,
logfile=None,
WorkController=TestWorkController,
perform_ping_check=True,
shutdown_timeout=10.0,
**kwargs):
# type: (Celery, int, str, Union[str, int], str, Any, **Any) -> Iterable
"""Start Celery worker in a thread.
Yields:
celery.worker.Worker: worker instance.
"""
setup_app_for_worker(app, loglevel, logfile)
if perform_ping_check:
assert 'celery.ping' in app.tasks
# Make sure we can connect to the broker
with app.connection(hostname=os.environ.get('TEST_BROKER')) as conn:
conn.default_channel.queue_declare
worker = WorkController(
app=app,
concurrency=concurrency,
hostname=anon_nodename(),
pool=pool,
loglevel=loglevel,
logfile=logfile,
# not allowed to override TestWorkController.on_consumer_ready
ready_callback=None,
without_heartbeat=kwargs.pop("without_heartbeat", True),
without_mingle=True,
without_gossip=True,
**kwargs)
t = threading.Thread(target=worker.start, daemon=True)
t.start()
worker.ensure_started()
_set_task_join_will_block(False)
try:
yield worker
finally:
from celery.worker import state
state.should_terminate = 0
t.join(shutdown_timeout)
if t.is_alive():
raise RuntimeError(
"Worker thread failed to exit within the allocated timeout. "
"Consider raising `shutdown_timeout` if your tasks take longer "
"to execute."
)
state.should_terminate = None
@contextmanager
def _start_worker_process(app,
concurrency=1,
pool='solo',
loglevel=WORKER_LOGLEVEL,
logfile=None,
**kwargs):
# type (Celery, int, str, Union[int, str], str, **Any) -> Iterable
"""Start worker in separate process.
Yields:
celery.app.worker.Worker: worker instance.
"""
from celery.apps.multi import Cluster, Node
app.set_current()
cluster = Cluster([Node('testworker1@%h')])
cluster.start()
try:
yield
finally:
cluster.stopwait()
def setup_app_for_worker(app, loglevel, logfile) -> None:
# type: (Celery, Union[str, int], str) -> None
"""Setup the app to be used for starting an embedded worker."""
app.finalize()
app.set_current()
app.set_default()
type(app.log)._setup = False
app.log.setup(loglevel=loglevel, logfile=logfile) |
7,296 | configure s3 access | # This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2023 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
""" rasterio environment management tools
"""
import threading
from types import SimpleNamespace
import rasterio # type: ignore[import]
from rasterio.session import AWSSession, DummySession # type: ignore[import]
import rasterio.env # type: ignore[import]
from datacube.utils.generic import thread_local_cache
_CFG_LOCK = threading.Lock()
_CFG = SimpleNamespace(aws=None, cloud_defaults=False, kwargs={}, epoch=0)
SECRET_KEYS = ("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN")
def _sanitize(opts, keys):
return {k: (v if k not in keys else "xx..xx") for k, v in opts.items()}
def _state(purge=False):
"""
.env None| rasterio.Env
.epoch -1 | +Int
"""
return thread_local_cache(
"__rio_state__", SimpleNamespace(env=None, epoch=-1), purge=purge
)
def get_rio_env(sanitize=True):
""" Get GDAL params configured by rasterio for the current thread.
:param sanitize: If True replace sensitive Values with 'x'
"""
env = rasterio.env.local._env # pylint: disable=protected-access
if env is None:
return {}
opts = env.get_config_options()
if sanitize:
opts = _sanitize(opts, SECRET_KEYS)
return opts
def deactivate_rio_env():
""" Exit previously configured environment, or do nothing if one wasn't configured.
"""
state = _state(purge=True)
if state.env is not None:
state.env.__exit__(None, None, None)
def activate_rio_env(aws=None, cloud_defaults=False, **kwargs):
""" Inject activated rasterio.Env into current thread.
This de-activates previously setup environment.
:param aws: Dictionary of options for rasterio.session.AWSSession
OR 'auto' -- session = rasterio.session.AWSSession()
:param cloud_defaults: When True inject settings for reading COGs
:param **kwargs: Passed on to rasterio.Env(..) constructor
"""
session = DummySession()
if aws is not None:
if not (aws == "auto" or isinstance(aws, dict)):
raise ValueError('Only support: None|"auto"|{..} for `aws` parameter')
aws = {} if aws == "auto" else dict(**aws)
region_name = aws.get("region_name", "auto")
if region_name == "auto":
from datacube.utils.aws import auto_find_region
try:
aws["region_name"] = auto_find_region()
except ValueError as e:
# only treat it as error if it was requested by user
if "region_name" in aws:
raise e
session = AWSSession(**aws)
opts = (
dict(
GDAL_DISABLE_READDIR_ON_OPEN="EMPTY_DIR",
GDAL_HTTP_MAX_RETRY="10",
GDAL_HTTP_RETRY_DELAY="0.5",
)
if cloud_defaults
else {}
)
opts.update(**kwargs)
state = _state()
if state.env is not None:
state.env.__exit__(None, None, None)
env = rasterio.Env(session=session, **opts)
env.__enter__()
state.env = env
state.epoch = -1
return get_rio_env()
def activate_from_config():
""" Check if this threads needs to reconfigure, then does reconfigure.
- Does nothing if this thread is already configured and configuration hasn't changed.
- Configures current thread with default rio settings
"""
cfg = _CFG
state = _state()
if cfg.epoch != state.epoch:
ee = activate_rio_env(
aws=cfg.aws, cloud_defaults=cfg.cloud_defaults, **cfg.kwargs
)
state.epoch = cfg.epoch
return ee
return None
def set_default_rio_config(aws=None, cloud_defaults=False, **kwargs):
""" Setup default configuration for rasterio/GDAL.
Doesn't actually activate one, just stores configuration for future
use from IO threads.
:param aws: Dictionary of options for rasterio.session.AWSSession
OR 'auto' -- session = rasterio.session.AWSSession()
:param cloud_defaults: When True inject settings for reading COGs
:param **kwargs: Passed on to rasterio.Env(..) constructor
"""
global _CFG # pylint: disable=global-statement
with _CFG_LOCK:
_CFG = SimpleNamespace(
aws=aws, cloud_defaults=cloud_defaults, kwargs=kwargs, epoch=_CFG.epoch + 1
)
def METHOD_NAME(
profile=None,
region_name="auto",
aws_unsigned=False,
requester_pays=False,
cloud_defaults=True,
client=None,
**gdal_opts
):
"""
Use :meth:`datacube.utils.aws.configure_s3_access` instead.
"""
from datacube.utils.aws import METHOD_NAME
return METHOD_NAME(
profile=profile,
region_name=region_name,
aws_unsigned=aws_unsigned,
requester_pays=requester_pays,
cloud_defaults=cloud_defaults,
client=client,
**gdal_opts
) |
7,297 | flush | from __future__ import annotations
import json
import queue
import typing as t
import threading
from pathlib import Path
from collections import defaultdict
from starwhale.consts import VERSION_PREFIX_CNT
from starwhale.utils.fs import (
ensure_dir,
ensure_file,
blake2b_content,
BLAKE2B_SIGNATURE_ALGO,
)
from starwhale.utils.error import NoSupportError
from starwhale.core.dataset.type import Link, BaseArtifact
from starwhale.api._impl.data_store import TableWriter, LocalDataStore
from .base import (
TrackRecord,
ParamsRecord,
MetricsRecord,
ArtifactsRecord,
MetricsTabularRow,
ArtifactsTabularRow,
HandleQueueElementType,
)
class HandlerThread(threading.Thread):
def __init__(
self,
workdir: t.Union[Path, str],
handle_queue: queue.Queue[HandleQueueElementType],
sync_queue: t.Optional[queue.Queue[t.Any]] = None,
) -> None:
super().__init__(name="HandlerThread")
self.handle_queue = handle_queue
self.sync_queue = sync_queue
self._run_exception: t.Optional[Exception] = None
self._workdir = Path(workdir)
self._data_store = LocalDataStore(str(self._workdir))
self._table_writers: t.Dict[str, TableWriter] = {}
self._params: t.Dict[str, t.Dict] = defaultdict(dict)
# TODO: support non-in-memory artifacts auto-incr counter with datastore
self._artifacts_counter: t.Dict = defaultdict(int)
self.daemon = True
def _raise_run_exception(self) -> None:
if self._run_exception is not None:
raise threading.ThreadError(
f"HandlerThread raise exception: {self._run_exception}"
)
def METHOD_NAME(self) -> None:
self._dump_params()
for writer in self._table_writers.values():
writer.METHOD_NAME()
self._data_store.dump()
def close(self) -> None:
self.handle_queue.put(None)
self.join()
self.METHOD_NAME()
for writer in self._table_writers.values():
writer.close()
self._raise_run_exception()
def _handle_metrics(self, record: MetricsRecord) -> None:
row = MetricsTabularRow.from_record(record)
table_name = f"{record.typ.value}/{record.source.value}"
self._update_table(table_name, row.asdict(), "__step")
def _handle_artifacts(self, record: ArtifactsRecord) -> None:
table_name = f"{record.typ.value}/{record.source.value}"
store_dir = self._workdir / record.typ.value / "_files"
def _convert_data_to_link(data: BaseArtifact) -> Link:
raw_content = data.to_bytes()
sign_name = blake2b_content(raw_content)
fpath = (
store_dir
/ BLAKE2B_SIGNATURE_ALGO
/ sign_name[:VERSION_PREFIX_CNT]
/ sign_name
)
ensure_file(fpath, raw_content, parents=True)
return Link(
uri=sign_name,
offset=0,
size=len(raw_content),
use_plain_type=True,
data_type=data,
)
for k, v in record.data.items():
if isinstance(v, Link):
data = v
elif isinstance(v, BaseArtifact):
data = _convert_data_to_link(v)
else:
raise NoSupportError(
f"artifact only accepts BaseArtifact or Link type: {v}"
)
row = ArtifactsTabularRow(
name=k,
index=self._artifacts_counter[k],
created_at=record.clock_time,
data=data,
link_wrapper=not isinstance(v, Link),
)
self._update_table(table_name, row.asdict(), "__key")
self._artifacts_counter[k] += 1
def _handle_params(self, record: ParamsRecord) -> None:
self._params[record.source.value].update(record.data)
def _dump_params(self) -> None:
params_dir = self._workdir / "params"
ensure_dir(params_dir)
for k, v in self._params.items():
if not v:
continue
ensure_file(params_dir / f"{k}.json", json.dumps(v, separators=(",", ":")))
def _dispatch(self, record: TrackRecord) -> None:
if isinstance(record, MetricsRecord):
self._handle_metrics(record)
elif isinstance(record, ParamsRecord):
self._handle_params(record)
elif isinstance(record, ArtifactsRecord):
self._handle_artifacts(record)
else:
raise NoSupportError(f"no support to handle {record}({type(record)})")
def _update_table(self, table_name: str, data: t.Dict, key_column: str) -> None:
if table_name not in self._table_writers:
self._table_writers[table_name] = TableWriter(
table_name=table_name,
key_column=key_column,
data_store=self._data_store,
run_exceptions_limits=10,
)
writer = self._table_writers[table_name]
writer.insert(data)
def run(self) -> None:
try:
while True:
record = self.handle_queue.get(block=True, timeout=None)
if record is None:
if self.handle_queue.qsize() > 0:
continue
else:
break # pragma: no cover
self._dispatch(record)
except Exception as e:
self._run_exception = e
raise |
7,298 | check and get bufsize | #!/usr/bin/env python3
# Copyright (C) 2017 Alexandre Abadie <alexandre.abadie@inria.fr>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
import os
from testrunner import run
EXPECTED_HELP = (
'Command Description',
'---------------------------------------',
'bufsize Get the shell\'s buffer size',
'start_test starts a test',
'end_test ends a test',
'echo prints the input command',
'empty print nothing on command',
'app_metadata Returns application metadata',
'pm interact with layered PM subsystem',
'ps Prints information about running threads.',
'reboot Reboot the node',
'version Prints current RIOT_VERSION',
'xfa_test1 xfa test command 1',
'xfa_test2 xfa test command 2'
)
EXPECTED_PS = (
'\tpid | state Q | pri',
r'\t \d | running Q | 7'
)
RIOT_TERMINAL = os.environ.get('RIOT_TERMINAL')
CLEANTERMS = {"socat"}
TESTRUNNER_SHELL_SKIP_REBOOT = bool(int(os.environ.get('TESTRUNNER_SHELL_SKIP_REBOOT') or 0))
# In native we are directly executing the binary (no terminal program). We must
# therefore use Ctrl-V (DLE or "data link escape") before Ctrl-C to send a
# literal ETX instead of SIGINT.
# When using a board it is also a problem because we are using a "user friendly"
# terminal that interprets Ctrl-C. So until we have rawterm we must also use
# ctrl-v in boards.
DLE = '\x16'
CONTROL_C = DLE+'\x03'
CONTROL_D = DLE+'\x04'
PROMPT = '> '
CMDS = (
('start_test', '[TEST_START]'),
# test empty line input
('\n', PROMPT),
# test simple word separation
('echo a string', '"echo""a""string"'),
('echo multiple spaces between argv', '"echo""multiple""spaces""between""argv"'),
('echo \t tabs\t\t processed \t\tlike\t \t\tspaces', '"echo""tabs""processed""like""spaces"'),
# test unknown commands
('unknown_command', 'shell: command not found: unknown_command'),
# test leading/trailing BLANK
(' echo leading spaces', '"echo""leading""spaces"'),
('\t\t\t\t\techo leading tabs', '"echo""leading""tabs"'),
('echo trailing spaces ', '"echo""trailing""spaces"'),
('echo trailing tabs\t\t\t\t\t', '"echo""trailing""tabs"'),
# test backspace
('hello-willy\b\b\b\borld', 'shell: command not found: hello-world'),
('\b\b\b\becho', '"echo"'),
# test escaping
('echo \\\'', '"echo""\'"'),
('echo \\"', '"echo""""'),
('echo escaped\\ space', '"echo""escaped space"'),
('echo escape within \'\\s\\i\\n\\g\\l\\e\\q\\u\\o\\t\\e\'', '"echo""escape""within""singlequote"'),
('echo escape within "\\d\\o\\u\\b\\l\\e\\q\\u\\o\\t\\e"', '"echo""escape""within""doublequote"'),
("""echo "t\e st" "\\"" '\\'' a\ b""", '"echo""te st"""""\'""a b"'), # noqa: W605
# test correct quoting
('echo "hello"world', '"echo""helloworld"'),
('echo hel"lowo"rld', '"echo""helloworld"'),
('echo hello"world"', '"echo""helloworld"'),
('echo quoted space " "', '"echo""quoted""space"" "'),
('echo abc"def\'ghijk"lmn', '"echo""abcdef\'ghijklmn"'),
('echo abc\'def"ghijk\'lmn', '"echo""abcdef"ghijklmn"'),
('echo "\'" \'"\'', '"echo""\'""""'),
# test incorrect quoting
('echo a\\', 'shell: incorrect quoting'),
('echo "', 'shell: incorrect quoting'),
('echo \'', 'shell: incorrect quoting'),
('echo abcdef"ghijklmn', 'shell: incorrect quoting'),
('echo abcdef\'ghijklmn', 'shell: incorrect quoting'),
# test default commands
('ps', EXPECTED_PS),
('help', EXPECTED_HELP),
# test commands added to shell_commands_xfa
('xfa_test1', '[XFA TEST 1 OK]'),
('xfa_test2', '[XFA TEST 2 OK]'),
# test reboot
('reboot', 'test_shell.'),
('end_test', '[TEST_END]'),
)
CMDS_CLEANTERM = {
(CONTROL_C, PROMPT),
}
CMDS_REGEX = {'ps'}
BOARD = os.environ['BOARD']
# there's an issue with some boards' serial that causes lost characters.
LINE_EXCEEDED_BLACKLIST = {
# There is an issue with nrf52dk when the Virtual COM port is connected
# and sending more than 64 bytes over UART. If no terminal is connected
# to the Virtual COM and interfacing directly to the nrf52832 UART pins
# the issue is not present. See issue #10639 on GitHub.
'nrf52dk',
'z1',
}
def print_error(message):
FAIL = '\033[91m'
ENDC = '\033[0m'
print(FAIL + message + ENDC)
def check_cmd(child, cmd, expected):
regex = cmd in CMDS_REGEX
child.expect(PROMPT)
child.sendline(cmd)
for line in expected:
if regex:
child.expect(line)
else:
child.expect_exact(line)
def check_startup(child):
child.sendline(CONTROL_C)
child.expect_exact(PROMPT)
def METHOD_NAME(child):
child.sendline('bufsize')
child.expect('([0-9]+)\r\n')
bufsize = int(child.match.group(1))
return bufsize
def check_line_exceeded(child, longline):
if BOARD in LINE_EXCEEDED_BLACKLIST:
print_error('test case "check_line_exceeded" blacklisted, SKIP')
return
child.sendline(longline)
child.expect('shell: maximum line length exceeded')
def check_line_canceling(child):
child.expect(PROMPT)
child.sendline('garbage1234' + CONTROL_C)
garbage_expected = 'garbage1234\r\r\n'
garbage_received = child.read(len(garbage_expected))
assert garbage_expected == garbage_received
def check_erase_long_line(child, longline):
# FIXME: this only works on native, due to #10634 combined with socat
# insisting in line-buffering the terminal.
if BOARD == 'native':
longline_erased = longline + "\b"*len(longline) + "echo"
child.sendline(longline_erased)
child.expect_exact('"echo"')
def check_control_d(child):
# The current shell instance was initiated by shell_run_once(). The shell will exit.
child.sendline(CONTROL_D)
child.expect_exact('shell exited')
# The current shell instance was initiated by shell_run(). The shell will respawn
# automatically except on native. On native, RIOT is shut down completely,
# therefore exclude this part.
if BOARD != 'native':
child.sendline(CONTROL_D)
child.expect_exact(PROMPT)
def testfunc(child):
# avoid sending an extra empty line on native.
if BOARD == 'native':
child.crlf = '\n'
bufsize = METHOD_NAME(child)
longline = "_" * (bufsize - len("verylong")) + "verylong"
check_line_exceeded(child, longline)
if RIOT_TERMINAL in CLEANTERMS:
check_line_canceling(child)
else:
print("skipping check_line_canceling()")
check_erase_long_line(child, longline)
check_control_d(child)
# loop other defined commands and expected output
for cmd, expected in CMDS:
if cmd == "reboot" and TESTRUNNER_SHELL_SKIP_REBOOT:
continue
check_cmd(child, cmd, expected)
if RIOT_TERMINAL in CLEANTERMS:
for cmd, expected in CMDS_CLEANTERM:
check_cmd(child, cmd, expected)
else:
print("skipping cleanterm tests")
if __name__ == "__main__":
sys.exit(run(testfunc)) |
7,299 | expect sqlalchemy deprecated | from __future__ import annotations
import contextlib
import re
import sys
from typing import Any
from typing import Dict
from sqlalchemy import exc as sa_exc
from sqlalchemy.engine import default
from sqlalchemy.testing.assertions import _expect_warnings
from sqlalchemy.testing.assertions import eq_ # noqa
from sqlalchemy.testing.assertions import is_ # noqa
from sqlalchemy.testing.assertions import is_false # noqa
from sqlalchemy.testing.assertions import is_not_ # noqa
from sqlalchemy.testing.assertions import is_true # noqa
from sqlalchemy.testing.assertions import ne_ # noqa
from sqlalchemy.util import decorator
from ..util import sqla_compat
def _assert_proper_exception_context(exception):
"""assert that any exception we're catching does not have a __context__
without a __cause__, and that __suppress_context__ is never set.
Python 3 will report nested as exceptions as "during the handling of
error X, error Y occurred". That's not what we want to do. we want
these exceptions in a cause chain.
"""
if (
exception.__context__ is not exception.__cause__
and not exception.__suppress_context__
):
assert False, (
"Exception %r was correctly raised but did not set a cause, "
"within context %r as its cause."
% (exception, exception.__context__)
)
def assert_raises(except_cls, callable_, *args, **kw):
return _assert_raises(except_cls, callable_, args, kw, check_context=True)
def assert_raises_context_ok(except_cls, callable_, *args, **kw):
return _assert_raises(except_cls, callable_, args, kw)
def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
return _assert_raises(
except_cls, callable_, args, kwargs, msg=msg, check_context=True
)
def assert_raises_message_context_ok(
except_cls, msg, callable_, *args, **kwargs
):
return _assert_raises(except_cls, callable_, args, kwargs, msg=msg)
def _assert_raises(
except_cls, callable_, args, kwargs, msg=None, check_context=False
):
with _expect_raises(except_cls, msg, check_context) as ec:
callable_(*args, **kwargs)
return ec.error
class _ErrorContainer:
error: Any = None
@contextlib.contextmanager
def _expect_raises(except_cls, msg=None, check_context=False):
ec = _ErrorContainer()
if check_context:
are_we_already_in_a_traceback = sys.exc_info()[0]
try:
yield ec
success = False
except except_cls as err:
ec.error = err
success = True
if msg is not None:
assert re.search(msg, str(err), re.UNICODE), f"{msg} !~ {err}"
if check_context and not are_we_already_in_a_traceback:
_assert_proper_exception_context(err)
print(str(err).encode("utf-8"))
# assert outside the block so it works for AssertionError too !
assert success, "Callable did not raise an exception"
def expect_raises(except_cls, check_context=True):
return _expect_raises(except_cls, check_context=check_context)
def expect_raises_message(except_cls, msg, check_context=True):
return _expect_raises(except_cls, msg=msg, check_context=check_context)
def eq_ignore_whitespace(a, b, msg=None):
a = re.sub(r"^\s+?|\n", "", a)
a = re.sub(r" {2,}", " ", a)
b = re.sub(r"^\s+?|\n", "", b)
b = re.sub(r" {2,}", " ", b)
assert a == b, msg or "%r != %r" % (a, b)
_dialect_mods: Dict[Any, Any] = {}
def _get_dialect(name):
if name is None or name == "default":
return default.DefaultDialect()
else:
d = sqla_compat._create_url(name).get_dialect()()
if name == "postgresql":
d.implicit_returning = True
elif name == "mssql":
d.legacy_schema_aliasing = False
return d
def expect_warnings(*messages, **kw):
"""Context manager which expects one or more warnings.
With no arguments, squelches all SAWarnings emitted via
sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise
pass string expressions that will match selected warnings via regex;
all non-matching warnings are sent through.
The expect version **asserts** that the warnings were in fact seen.
Note that the test suite sets SAWarning warnings to raise exceptions.
"""
return _expect_warnings(Warning, messages, **kw)
def emits_python_deprecation_warning(*messages):
"""Decorator form of expect_warnings().
Note that emits_warning does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with _expect_warnings(DeprecationWarning, assert_=False, *messages):
return fn(*args, **kw)
return decorate
def METHOD_NAME(*messages, **kw):
return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw)
def expect_sqlalchemy_deprecated_20(*messages, **kw):
return _expect_warnings(sa_exc.RemovedIn20Warning, messages, **kw) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.