id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
4,600 | arc create job file factory | # coding: utf-8
"""
ARC remote workflow implementation. See http://www.nordugrid.org/arc/ce.
"""
__all__ = ["ARCWorkflow"]
import os
from abc import abstractmethod
from collections import OrderedDict
from law.workflow.remote import BaseRemoteWorkflow, BaseRemoteWorkflowProxy
from law.job.base import JobArguments, JobInputFile, DeprecatedInputFiles
from law.task.proxy import ProxyCommand
from law.target.file import get_path
from law.parameter import CSVParameter
from law.util import law_src_path, merge_dicts, DotDict
from law.logger import get_logger
from law.contrib.arc.job import ARCJobManager, ARCJobFileFactory
logger = get_logger(__name__)
class ARCWorkflowProxy(BaseRemoteWorkflowProxy):
workflow_type = "arc"
def __init__(self, *args, **kwargs):
super(ARCWorkflowProxy, self).__init__(*args, **kwargs)
# check if there is at least one ce
if not self.task.arc_ce:
raise Exception("please set at least one arc computing element (--arc-ce)")
def create_job_manager(self, **kwargs):
return self.task.arc_create_job_manager(**kwargs)
def create_job_file_factory(self, **kwargs):
return self.task.METHOD_NAME(**kwargs)
def create_job_file(self, job_num, branches):
task = self.task
# the file postfix is pythonic range made from branches, e.g. [0, 1, 2, 4] -> "_0To5"
postfix = "_{}To{}".format(branches[0], branches[-1] + 1)
# create the config
c = self.job_file_factory.get_config()
c.input_files = DeprecatedInputFiles()
c.output_files = []
c.render_variables = {}
c.custom_content = []
# get the actual wrapper file that will be executed by the remote job
wrapper_file = task.arc_wrapper_file()
law_job_file = task.arc_job_file()
if wrapper_file and get_path(wrapper_file) != get_path(law_job_file):
c.input_files["executable_file"] = wrapper_file
c.executable = wrapper_file
else:
c.executable = law_job_file
c.input_files["job_file"] = law_job_file
# collect task parameters
exclude_args = (
task.exclude_params_branch |
task.exclude_params_workflow |
task.exclude_params_remote_workflow |
task.exclude_params_arc_workflow |
{"workflow"}
)
proxy_cmd = ProxyCommand(
task.as_branch(branches[0]),
exclude_task_args=exclude_args,
exclude_global_args=["workers", "local-scheduler"],
)
if task.arc_use_local_scheduler():
proxy_cmd.add_arg("--local-scheduler", "True", overwrite=True)
for key, value in OrderedDict(task.arc_cmdline_args()).items():
proxy_cmd.add_arg(key, value, overwrite=True)
# job script arguments
job_args = JobArguments(
task_cls=task.__class__,
task_params=proxy_cmd.build(skip_run=True),
branches=branches,
workers=task.job_workers,
auto_retry=False,
dashboard_data=self.dashboard.remote_hook_data(
job_num, self.job_data.attempts.get(job_num, 0)),
)
c.arguments = job_args.join()
# add the bootstrap file
bootstrap_file = task.arc_bootstrap_file()
if bootstrap_file:
c.input_files["bootstrap_file"] = bootstrap_file
# add the stageout file
stageout_file = task.arc_stageout_file()
if stageout_file:
c.input_files["stageout_file"] = stageout_file
# does the dashboard have a hook file?
dashboard_file = self.dashboard.remote_hook_file()
if dashboard_file:
c.input_files["dashboard_file"] = dashboard_file
# log files
c.log = None
c.stdout = None
c.stderr = None
if task.transfer_logs:
log_file = "stdall.txt"
c.stdout = log_file
c.stderr = log_file
c.custom_log_file = log_file
# meta infos
c.job_name = "{}{}".format(task.live_task_id, postfix)
c.output_uri = task.arc_output_uri()
# task hook
c = task.arc_job_config(c, job_num, branches)
# build the job file and get the sanitized config
job_file, config = self.job_file_factory(postfix=postfix, **c.__dict__)
# determine the custom log file uri if set
abs_log_file = None
if config.custom_log_file:
abs_log_file = os.path.join(config.output_uri, config.custom_log_file)
# return job and log files
return {"job": job_file, "log": abs_log_file}
def destination_info(self):
info = super(ARCWorkflowProxy, self).destination_info()
info["ce"] = "ce: {}".format(",".join(self.task.arc_ce))
info = self.task.arc_destination_info(info)
return info
class ARCWorkflow(BaseRemoteWorkflow):
workflow_proxy_cls = ARCWorkflowProxy
arc_workflow_run_decorators = None
arc_job_manager_defaults = None
arc_job_file_factory_defaults = None
arc_ce = CSVParameter(
default=(),
significant=False,
description="target arc computing element(s); default: empty",
)
arc_job_kwargs = []
arc_job_kwargs_submit = ["arc_ce"]
arc_job_kwargs_cancel = None
arc_job_kwargs_cleanup = None
arc_job_kwargs_query = None
exclude_params_branch = {"arc_ce"}
exclude_params_arc_workflow = set()
exclude_index = True
@abstractmethod
def arc_output_directory(self):
return None
@abstractmethod
def arc_bootstrap_file(self):
return None
def arc_wrapper_file(self):
return None
def arc_job_file(self):
return JobInputFile(law_src_path("job", "law_job.sh"))
def arc_stageout_file(self):
return None
def arc_workflow_requires(self):
return DotDict()
def arc_output_postfix(self):
return ""
def arc_output_uri(self):
return self.arc_output_directory().uri()
def arc_job_manager_cls(self):
return ARCJobManager
def arc_create_job_manager(self, **kwargs):
kwargs = merge_dicts(self.arc_job_manager_defaults, kwargs)
return self.arc_job_manager_cls()(**kwargs)
def arc_job_file_factory_cls(self):
return ARCJobFileFactory
def METHOD_NAME(self, **kwargs):
# job file fectory config priority: kwargs > class defaults
kwargs = merge_dicts({}, self.arc_job_file_factory_defaults, kwargs)
return self.arc_job_file_factory_cls()(**kwargs)
def arc_job_config(self, config, job_num, branches):
return config
def arc_check_job_completeness(self):
return False
def arc_check_job_completeness_delay(self):
return 0.0
def arc_use_local_scheduler(self):
return True
def arc_cmdline_args(self):
return {}
def arc_destination_info(self, info):
return info |
4,601 | test not found | import time
from http.client import BadStatusLine
from unittest import mock
from urllib.error import HTTPError, URLError
from ssl import CertificateError
from socket import timeout, error
import pytest
from django.utils.timezone import now
from datetime import timedelta
from django.core.management import call_command
from mirrors.models import CheckLocation, MirrorLog
from mirrors.tests.conftest import HOSTNAME, PROTOCOL
def mocked_request(urlopen, Request, return_value):
urlopen.return_value.read.return_value = return_value
Request.get_host.return_value = HOSTNAME
Request.type.return_value = PROTOCOL
def mocked_request_exception(urlopen, Request, excp):
urlopen.return_value.read.side_effect = excp
Request.get_host.return_value = HOSTNAME
Request.type.return_value = PROTOCOL
@mock.patch('urllib.request.Request')
@mock.patch('urllib.request.urlopen')
def test_invalid(urlopen, Request, mirrorurl):
mocked_request(urlopen, Request, 'data')
call_command('mirrorcheck')
mirrorlog = MirrorLog.objects.first()
assert mirrorlog.error != ''
assert not mirrorlog.is_success
@mock.patch('urllib.request.Request')
@mock.patch('urllib.request.urlopen')
def test_valid(urlopen, Request, mirrorurl):
mocked_request(urlopen, Request, str(int(time.time())))
call_command('mirrorcheck')
mirrorlog = MirrorLog.objects.first()
assert mirrorlog.error == ''
assert mirrorlog.is_success
@mock.patch('urllib.request.Request')
@mock.patch('urllib.request.urlopen')
def test_valid_olddate(urlopen, Request, mirrorurl):
mocked_request(urlopen, Request, str(int(time.time())))
date = now() - timedelta(days=600)
MirrorLog.objects.create(url=mirrorurl, check_time=date)
call_command('mirrorcheck')
assert len(MirrorLog.objects.all()) == 1
@mock.patch('urllib.request.Request')
@mock.patch('urllib.request.urlopen')
def METHOD_NAME(urlopen, Request, mirrorurl):
excp = HTTPError('https://archlinux.org/404.txt', 404, 'Not Found', '', None)
mocked_request_exception(urlopen, Request, excp)
call_command('mirrorcheck')
mirrorlog = MirrorLog.objects.first()
assert mirrorlog.error == str(excp)
assert not mirrorlog.is_success
@mock.patch('urllib.request.Request')
@mock.patch('urllib.request.urlopen')
def test_not_found_variant(urlopen, Request, mirrorurl):
excp = BadStatusLine('')
mocked_request_exception(urlopen, Request, excp)
call_command('mirrorcheck')
mirrorlog = MirrorLog.objects.first()
assert 'Exception in processing' in mirrorlog.error
assert not mirrorlog.is_success
@mock.patch('urllib.request.Request')
@mock.patch('urllib.request.urlopen')
def test_cert_error(urlopen, Request, mirrorurl):
excp = CertificateError('certificate error')
mocked_request_exception(urlopen, Request, excp)
call_command('mirrorcheck')
mirrorlog = MirrorLog.objects.first()
assert 'certificate error' in mirrorlog.error
assert not mirrorlog.is_success
@mock.patch('urllib.request.Request')
@mock.patch('urllib.request.urlopen')
def test_general_httpexception(urlopen, Request, mirrorurl):
excp = URLError('550 No such file', '550.txt')
mocked_request_exception(urlopen, Request, excp)
call_command('mirrorcheck')
mirrorlog = MirrorLog.objects.first()
assert excp.reason in mirrorlog.error
assert not mirrorlog.is_success
@mock.patch('urllib.request.Request')
@mock.patch('urllib.request.urlopen')
def test_socket_timeout(urlopen, Request, mirrorurl):
excp = timeout('timeout')
mocked_request_exception(urlopen, Request, excp)
call_command('mirrorcheck')
mirrorlog = MirrorLog.objects.first()
assert 'Connection timed out.' in mirrorlog.error
assert not mirrorlog.is_success
@mock.patch('urllib.request.Request')
@mock.patch('urllib.request.urlopen')
def test_socket_error(urlopen, Request, mirrorurl):
excp = error('error')
mocked_request_exception(urlopen, Request, excp)
call_command('mirrorcheck')
mirrorlog = MirrorLog.objects.first()
assert str(excp) in mirrorlog.error
assert not mirrorlog.is_success
def test_checklocation_fail(db):
with pytest.raises(CheckLocation.DoesNotExist) as e:
call_command('mirrorcheck', '-l', '1')
assert 'CheckLocation matching query does not exist.' == str(e.value)
def test_checklocation_model(checklocation):
with mock.patch('mirrors.management.commands.mirrorcheck.logger') as logger:
call_command('mirrorcheck', '-l', '1')
logger.info.assert_called() |
4,602 | handler | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"mobile-network slice list",
)
class List(AAZCommand):
"""List all slices in the mobile network.
:example: List slice by resource group
az mobile-network slice list --mobile-network-name mobile-network-name -g rg
"""
_aaz_info = {
"version": "2022-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.mobilenetwork/mobilenetworks/{}/slices", "2022-11-01"],
]
}
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.mobile_network_name = AAZStrArg(
options=["--mobile-network-name"],
help="The name of the mobile network.",
required=True,
fmt=AAZStrArgFormat(
pattern="^[a-zA-Z0-9][a-zA-Z0-9_-]*$",
max_length=64,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.SlicesListByMobileNetwork(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class SlicesListByMobileNetwork(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/mobileNetworks/{mobileNetworkName}/slices",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"mobileNetworkName", self.ctx.args.mobile_network_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
flags={"read_only": True},
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.location = AAZStrType(
flags={"required": True},
)
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_element.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.value.Element.properties
properties.description = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.snssai = AAZObjectType(
flags={"required": True},
)
snssai = cls._schema_on_200.value.Element.properties.snssai
snssai.sd = AAZStrType()
snssai.sst = AAZIntType(
flags={"required": True},
)
system_data = cls._schema_on_200.value.Element.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
tags = cls._schema_on_200.value.Element.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ListHelper:
"""Helper class for List"""
__all__ = ["List"] |
4,603 | layout | from conan import ConanFile
from conan.tools.apple import fix_apple_shared_install_name
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.env import VirtualBuildEnv
from conan.tools.files import chdir, copy, get, rename, replace_in_file, rm, rmdir
from conan.tools.gnu import Autotools, AutotoolsToolchain
from conan.tools.METHOD_NAME import basic_layout
from conan.tools.microsoft import is_msvc, NMakeToolchain
from conan.tools.scm import Version
import os
required_conan_version = ">=1.55.0"
class LibFDKAACConan(ConanFile):
name = "libfdk_aac"
url = "https://github.com/conan-io/conan-center-index"
description = "A standalone library of the Fraunhofer FDK AAC code from Android"
license = "https://github.com/mstorsjo/fdk-aac/blob/master/NOTICE"
homepage = "https://sourceforge.net/projects/opencore-amr/"
topics = ("multimedia", "audio", "fraunhofer", "aac", "decoder", "encoding", "decoding")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
@property
def _use_cmake(self):
return Version(self.version) >= "2.0.2"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def METHOD_NAME(self):
if self._use_cmake:
cmake_layout(self, src_folder="src")
else:
basic_layout(self, src_folder="src")
def build_requirements(self):
if not self._use_cmake and not is_msvc(self):
self.tool_requires("libtool/2.4.7")
if self._settings_build.os == "Windows":
self.win_bash = True
if not self.conf.get("tools.microsoft.bash:path", check_type=str):
self.tool_requires("msys2/cci.latest")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
if self._use_cmake:
tc = CMakeToolchain(self)
tc.variables["BUILD_PROGRAMS"] = False
tc.variables["FDK_AAC_INSTALL_CMAKE_CONFIG_MODULE"] = False
tc.variables["FDK_AAC_INSTALL_PKGCONFIG_MODULE"] = False
tc.generate()
elif is_msvc(self):
tc = NMakeToolchain(self)
tc.generate()
else:
env = VirtualBuildEnv(self)
env.generate()
tc = AutotoolsToolchain(self)
tc.generate()
def build(self):
if self._use_cmake:
cmake = CMake(self)
cmake.configure()
cmake.build()
elif is_msvc(self):
makefile_vc = os.path.join(self.source_folder, "Makefile.vc")
replace_in_file(self, makefile_vc, "CFLAGS = /nologo /W3 /Ox /MT", "CFLAGS = /nologo")
replace_in_file(self, makefile_vc, "MKDIR_FLAGS = -p", "MKDIR_FLAGS =")
# Build either shared or static, and don't build utility (it always depends on static lib)
replace_in_file(self, makefile_vc, "copy $(PROGS) $(bindir)", "")
replace_in_file(self, makefile_vc, "copy $(LIB_DEF) $(libdir)", "")
if self.options.shared:
replace_in_file(
self, makefile_vc,
"all: $(LIB_DEF) $(STATIC_LIB) $(SHARED_LIB) $(IMP_LIB) $(PROGS)",
"all: $(LIB_DEF) $(SHARED_LIB) $(IMP_LIB)",
)
replace_in_file(self, makefile_vc, "copy $(STATIC_LIB) $(libdir)", "")
else:
replace_in_file(
self, makefile_vc,
"all: $(LIB_DEF) $(STATIC_LIB) $(SHARED_LIB) $(IMP_LIB) $(PROGS)",
"all: $(STATIC_LIB)",
)
replace_in_file(self, makefile_vc, "copy $(IMP_LIB) $(libdir)", "")
replace_in_file(self, makefile_vc, "copy $(SHARED_LIB) $(bindir)", "")
with chdir(self, self.source_folder):
self.run("nmake -f Makefile.vc")
else:
autotools = Autotools(self)
autotools.autoreconf()
if self.settings.os == "Android" and self._settings_build.os == "Windows":
# remove escape for quotation marks, to make ndk on windows happy
replace_in_file(
self, os.path.join(self.source_folder, "configure"),
"s/[ `~#$^&*(){}\\\\|;'\\\''\"<>?]/\\\\&/g", "s/[ `~#$^&*(){}\\\\|;<>?]/\\\\&/g",
)
autotools.configure()
autotools.make()
def package(self):
copy(self, "NOTICE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
if self._use_cmake:
cmake = CMake(self)
cmake.install()
elif is_msvc(self):
with chdir(self, self.source_folder):
self.run(f"nmake -f Makefile.vc prefix=\"{self.package_folder}\" install")
if self.options.shared:
rename(self, os.path.join(self.package_folder, "lib", "fdk-aac.dll.lib"),
os.path.join(self.package_folder, "lib", "fdk-aac.lib"))
else:
autotools = Autotools(self)
autotools.install()
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rm(self, "*.la", os.path.join(self.package_folder, "lib"))
fix_apple_shared_install_name(self)
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "fdk-aac")
self.cpp_info.set_property("cmake_target_name", "FDK-AAC::fdk-aac")
self.cpp_info.set_property("pkg_config_name", "fdk-aac")
# TODO: back to global scope in conan v2 once cmake_find_package_* generators removed
self.cpp_info.components["fdk-aac"].libs = ["fdk-aac"]
if self.settings.os in ["Linux", "FreeBSD", "Android"]:
self.cpp_info.components["fdk-aac"].system_libs.append("m")
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.filenames["cmake_find_package"] = "fdk-aac"
self.cpp_info.filenames["cmake_find_package_multi"] = "fdk-aac"
self.cpp_info.names["cmake_find_package"] = "FDK-AAC"
self.cpp_info.names["cmake_find_package_multi"] = "FDK-AAC"
self.cpp_info.components["fdk-aac"].names["cmake_find_package"] = "fdk-aac"
self.cpp_info.components["fdk-aac"].names["cmake_find_package_multi"] = "fdk-aac"
self.cpp_info.components["fdk-aac"].set_property("cmake_target_name", "FDK-AAC::fdk-aac") |
4,604 | one hot encoding | """Implements the k-nearest neighbors (kNN) classifier"""
from typing import Callable
import heat as ht
from heat.core.dndarray import DNDarray
class KNeighborsClassifier(ht.BaseEstimator, ht.ClassificationMixin):
"""
Implementation of the k-nearest-neighbors Algorithm [1].
This algorithm predicts labels to data vectors by using an labeled training dataset as reference. The input vector
to be predicted is compared to the training vectors by calculating the Euclidean distance between each of them. A
majority vote of the k-nearest, i.e. closest or smallest distanced, training vectors labels is selected as
predicted class.
Parameters
----------
n_neighbors : int, optional, default: 5
Number of neighbours to consider when choosing label.
effective_metric_ : Callable, optional
The distance function used to identify the nearest neighbors, defaults to the Euclidean distance.
References
--------
[1] T. Cover and P. Hart, "Nearest Neighbor Pattern Classification," in IEEE Transactions on Information Theory,
vol. 13, no. 1, pp. 21-27, January 1967, doi: 10.1109/TIT.1967.1053964.
"""
def __init__(self, n_neighbors: int = 5, effective_metric_: Callable = None):
self.n_neighbors = n_neighbors
self.effective_metric_ = (
effective_metric_ if effective_metric_ is not None else ht.spatial.cdist
)
# init declaration to appease flake
self.x = None
self.y = None
self.n_samples_fit_ = -1
self.outputs_2d_ = True
self.classes_ = None
@staticmethod
def METHOD_NAME(x: DNDarray) -> DNDarray:
"""
One-hot-encodes the passed vector or single-column matrix.
Parameters
----------
x : DNDarray
The data to be encoded.
"""
n_samples = x.shape[0]
n_features = ht.max(x).item() + 1
one_hot = ht.zeros((n_samples, n_features), split=x.split, device=x.device, comm=x.comm)
one_hot.lloc[range(one_hot.lshape[0]), x.larray] = 1
return one_hot
def fit(self, x: DNDarray, y: DNDarray):
"""
Fit the k-nearest neighbors classifier from the training dataset.
Parameters
----------
x : DNDarray
Labeled training vectors used for comparison in predictions, Shape=(n_samples, n_features).
y : DNDarray
Corresponding labels for the training feature vectors. Must have the same number of samples as ``x``.
Shape=(n_samples) if integral labels or Shape=(n_samples, n_classes) if one-hot-encoded.
Raises
------
TypeError
If ``x`` or ``y`` are not DNDarrays.
ValueError
If ``x`` and ``y`` shapes mismatch or are not two-dimensional matrices.
Examples
--------
>>> samples = ht.rand(10, 3)
>>> knn = KNeighborsClassifier(n_neighbors=1)
>>> knn.fit(samples)
"""
# check for type consistency
if not isinstance(x, DNDarray) or not isinstance(y, DNDarray):
raise TypeError(f"x and y must be DNDarrays but were {type(x)} {type(y)}")
# ensure that x is a two-dimensional matrix
if len(x.shape) != 2:
raise ValueError(f"x must be two-dimensional, but was {len(x.shape)}")
self.x = x
self.n_samples_fit_ = x.shape[0]
# ensure that x and y have the same number of samples
if x.shape[0] != y.shape[0]:
raise ValueError(
f"Number of samples x and y samples mismatch, got {x.shape[0]}, {y.shape[0]}"
)
# checks the labels for correct dimensionality and encode one-hot
if len(y.shape) == 1:
self.y = self.METHOD_NAME(y)
self.outputs_2d_ = False
elif len(y.shape) == 2:
self.y = y
self.outputs_2d_ = True
else:
raise ValueError(f"y needs to be one- or two-dimensional, but was {len(y.shape)}")
def predict(self, x: DNDarray) -> DNDarray:
"""
Predict the class labels for the provided data.
Parameters
----------
x : DNDarray
The test samples.
"""
distances = self.effective_metric_(x, self.x)
_, indices = ht.topk(distances, self.n_neighbors, largest=False)
predictions = self.y[indices.flatten()]
predictions.balance_()
predictions = ht.reshape(predictions, (indices.gshape + (self.y.gshape[1],)))
predictions = ht.sum(predictions, axis=1)
self.classes_ = ht.argmax(predictions, axis=1)
return self.classes_ |
4,605 | get private link scoped resource output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetPrivateLinkScopedResourceResult',
'AwaitableGetPrivateLinkScopedResourceResult',
'get_private_link_scoped_resource',
'get_private_link_scoped_resource_output',
]
@pulumi.output_type
class GetPrivateLinkScopedResourceResult:
"""
A private link scoped resource
"""
def __init__(__self__, id=None, linked_resource_id=None, name=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if linked_resource_id and not isinstance(linked_resource_id, str):
raise TypeError("Expected argument 'linked_resource_id' to be a str")
pulumi.set(__self__, "linked_resource_id", linked_resource_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="linkedResourceId")
def linked_resource_id(self) -> Optional[str]:
"""
The resource id of the scoped Azure monitor resource.
"""
return pulumi.get(self, "linked_resource_id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
State of the private endpoint connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateLinkScopedResourceResult(GetPrivateLinkScopedResourceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateLinkScopedResourceResult(
id=self.id,
linked_resource_id=self.linked_resource_id,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_link_scoped_resource(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
scope_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateLinkScopedResourceResult:
"""
Gets a scoped resource in a private link scope.
Azure REST API version: 2020-08-15-preview.
:param str name: The name of the scoped resource object.
:param str resource_group_name: The name of the resource group.
:param str scope_name: The name of the Azure Arc PrivateLinkScope resource.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['scopeName'] = scope_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:hybridcompute:getPrivateLinkScopedResource', __args__, opts=opts, typ=GetPrivateLinkScopedResourceResult).value
return AwaitableGetPrivateLinkScopedResourceResult(
id=pulumi.get(__ret__, 'id'),
linked_resource_id=pulumi.get(__ret__, 'linked_resource_id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_link_scoped_resource)
def METHOD_NAME(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scope_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateLinkScopedResourceResult]:
"""
Gets a scoped resource in a private link scope.
Azure REST API version: 2020-08-15-preview.
:param str name: The name of the scoped resource object.
:param str resource_group_name: The name of the resource group.
:param str scope_name: The name of the Azure Arc PrivateLinkScope resource.
"""
... |
4,606 | test examples security secure boot key revoke | from __future__ import print_function
import os
import struct
import zlib
from io import BytesIO
import ttfw_idf
# To prepare a runner for these tests,
# 1. Connect an FPGA with C3 image
# 2. Use a COM port for programming and export it as ESPPORT
# e.g export ESPPORT=/dev/ttyUSB0
# 3. Use another COM port for resetting efuses and connect its DTR pin to efuse reset pin on the FPGA board.
# Export it as EFUSEPORT
# e.g export EFUSEPORT=/dev/ttyUSB1
# 4. Run these tests
def corrupt_signature(signed_bootloader, seed=0, corrupt_sig=True, corrupt_crc=False, corrupt_single_block=None):
# type: (bytes, int, bool, bool, int) -> bytes
image = signed_bootloader[:-4096]
signature = signed_bootloader[-4096:]
sig_blocks = (signature[0:1216], signature[1216:2432], signature[2432:3648])
new_blocks = tuple(corrupt_sig_block(s, seed, corrupt_sig, corrupt_crc) for s in sig_blocks)
# if corrupt_single_block is None, corrupt all blocks
# otherwise, only corrupt the one with that index set
corr_sig_blocks = tuple(new_blocks[n] if corrupt_single_block in [None, n] else sig_blocks[n] for n in range(3))
return image + b''.join(corr_sig_blocks) + signature[3648:]
def corrupt_sig_block(sig_block, seed=0, corrupt_sig=True, corrupt_crc=False):
# type: (bytes, int, bool, bool) -> bytes
assert len(sig_block) == 1216
magic = sig_block[0]
assert magic in [0xe7, 0xff]
if magic != 0xe7:
return sig_block # not valid
data = sig_block[:812]
new_sig = sig = sig_block[812:1196]
crc = sig_block[1196:1200]
padding = sig_block[1200:1216]
if corrupt_sig:
corrupt_idx = seed % len(sig)
corrupt_delta = zlib.crc32(bytes(seed)) & 0xFF
if corrupt_delta == 0:
corrupt_delta = 1
new_byte = sig[corrupt_idx] ^ corrupt_delta
new_sig = sig[0:corrupt_idx] + bytes([new_byte]) + sig[corrupt_idx + 1:]
assert new_sig != sig
if not corrupt_crc:
crc = struct.pack('<I', zlib.crc32(data + new_sig) & 0xffffffff)
else:
crc = struct.pack('<I', zlib.crc32(crc))
result = data + new_sig + crc + padding
assert len(result) == len(sig_block)
return result
def dut_start_secure_app(dut): # type: (ttfw_idf.IDFDUT) -> None
dut.reset_efuses()
bootloader_bin = os.path.join(dut.app.binary_path, 'bootloader/bootloader.bin')
with open(bootloader_bin, 'rb') as f:
dut.write_flash_data([(0x0, f)], None, True, False)
dut.start_app()
# Test secure boot flow.
# Correctly signed bootloader + correctly signed app should work
@ttfw_idf.idf_custom_test(env_tag='Example_Secure_Boot', target=['esp32c3fpga', 'esp32s3fpga'], ignore=True)
def test_examples_security_secure_boot(env, _): # type: (ttfw_idf.TinyFW.Env, None) -> None
efuse_port = os.getenv('EFUSEPORT')
dut = env.get_dut('secure_boot', 'tools/test_apps/security/secure_boot', efuse_reset_port=efuse_port)
dut_start_secure_app(dut)
dut.expect('Secure Boot is enabled', timeout=2)
# Test efuse key index and key block combination.
# Any key index can be written to any key block and should work
@ttfw_idf.idf_custom_test(env_tag='Example_Secure_Boot', target=['esp32c3fpga', 'esp32s3fpga'], ignore=True)
def test_examples_security_secure_boot_key_combo(env, _): # type: (ttfw_idf.TinyFW.Env, None) -> None
efuse_port = os.getenv('EFUSEPORT')
dut = env.get_dut('secure_boot', 'tools/test_apps/security/secure_boot', efuse_reset_port=efuse_port)
dut_start_secure_app(dut)
for index in range(3):
for block in range(6):
dut.reset_efuses()
dut.secure_boot_burn_en_bit()
dut.secure_boot_burn_digest('test_rsa_3072_key.pem', index, block)
dut.reset()
dut.expect('Secure Boot is enabled', timeout=2)
# Test secure boot key revoke.
# If a key is revoked, bootloader signed with that key should fail verification
@ttfw_idf.idf_custom_test(env_tag='Example_Secure_Boot', target=['esp32c3fpga', 'esp32s3fpga'], ignore=True)
def METHOD_NAME(env, _): # type: (ttfw_idf.TinyFW.Env, None) -> None
efuse_port = os.getenv('EFUSEPORT')
dut = env.get_dut('secure_boot', 'tools/test_apps/security/secure_boot', efuse_reset_port=efuse_port)
dut_start_secure_app(dut)
for index in range(3):
dut.reset_efuses()
dut.secure_boot_burn_en_bit()
dut.secure_boot_burn_digest('test_rsa_3072_key.pem', index, 0)
dut.burn_efuse('SECURE_BOOT_KEY_REVOKE%d' % index, 1)
dut.reset()
dut.expect('secure boot verification failed', timeout=2)
# Test bootloader signature corruption.
# Corrupt one byte at a time of bootloader signature and test that the verification fails
@ttfw_idf.idf_custom_test(env_tag='Example_Secure_Boot', target=['esp32c3fpga', 'esp32s3fpga'], ignore=True)
def test_examples_security_secure_boot_corrupt_bl_sig(env, _): # type: (ttfw_idf.TinyFW.Env, None) -> None
efuse_port = os.getenv('EFUSEPORT')
dut = env.get_dut('secure_boot', 'tools/test_apps/security/secure_boot', efuse_reset_port=efuse_port)
dut.reset_efuses()
dut.secure_boot_burn_en_bit()
dut.secure_boot_burn_digest('test_rsa_3072_key.pem', 0, 0)
bootloader_bin = os.path.join(dut.app.binary_path, 'bootloader/bootloader.bin')
with open(bootloader_bin, 'rb') as f:
signed_bl = f.read()
seeds = range(0, 384)
max_seed = max(seeds)
for seed in seeds:
print('Case %d / %d' % (seed, max_seed))
corrupt_bl = corrupt_signature(signed_bl, seed=seed)
dut.write_flash_data([(0x0, BytesIO(corrupt_bl))])
dut.expect('Signature Check Failed', timeout=2)
# Test app signature corruption.
# Corrupt app signature, one byte at a time, and test that the verification fails
@ttfw_idf.idf_custom_test(env_tag='Example_Secure_Boot', target=['esp32c3fpga', 'esp32s3fpga'], ignore=True)
def test_examples_security_secure_boot_corrupt_app_sig(env, _): # type: (ttfw_idf.TinyFW.Env, None) -> None
efuse_port = os.getenv('EFUSEPORT')
dut = env.get_dut('secure_boot', 'tools/test_apps/security/secure_boot', efuse_reset_port=efuse_port)
dut_start_secure_app(dut)
dut.reset_efuses()
dut.secure_boot_burn_en_bit()
dut.secure_boot_burn_digest('test_rsa_3072_key.pem', 0, 0)
app_bin = os.path.join(dut.app.binary_path, 'secure_boot.bin')
with open(app_bin, 'rb') as f:
signed_app = f.read()
seeds = range(0, 384)
max_seed = max(seeds)
for seed in seeds:
print('Case %d / %d' % (seed, max_seed))
corrupt_app = corrupt_signature(signed_app, seed=seed)
dut.write_flash_data([(0x20000, BytesIO(corrupt_app))])
dut.expect('Signature Check Failed', timeout=2)
dut.expect('image valid, signature bad', timeout=2)
print('Testing invalid CRC...')
# Valid signature but invalid CRC
dut.reset_efuses()
dut.secure_boot_burn_en_bit()
dut.secure_boot_burn_digest('test_rsa_3072_key.pem', 0, 0)
corrupt_app = corrupt_signature(signed_app, corrupt_sig=False, corrupt_crc=True)
dut.write_flash_data([(0x20000, BytesIO(corrupt_app))])
dut.expect('Sig block 0 invalid: Stored CRC ends', timeout=2)
dut.expect('Secure boot signature verification failed', timeout=2)
dut.expect('No bootable app partitions in the partition table', timeout=2)
if __name__ == '__main__':
test_examples_security_secure_boot()
test_examples_security_secure_boot_key_combo()
METHOD_NAME()
test_examples_security_secure_boot_corrupt_bl_sig()
test_examples_security_secure_boot_corrupt_app_sig() |
4,607 | info | # (C) Copyright 2005-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
""" Trait definition for a wxPython-based Kiva font.
"""
from pyface.font import Font as PyfaceFont
from traits.api import DefaultValue, TraitError, TraitType, NoDefaultSpecified
import kiva.constants as kc
from kiva.fonttools.font import Font, FontParseError, simple_parser
#: Expected attributes on the Font class.
font_attrs = [
'face_name', 'size', 'family', 'weight', 'style', 'underline', 'encoding',
]
#: Mapping from Pyface Font generic family names to corresponding constants.
pyface_family_to_kiva_family = {
'default': kc.DEFAULT,
'fantasy': kc.DECORATIVE,
'decorative': kc.DECORATIVE,
'serif': kc.ROMAN,
'roman': kc.ROMAN,
'cursive': kc.SCRIPT,
'script': kc.SCRIPT,
'sans-serif': kc.SWISS,
'swiss': kc.SWISS,
'monospace': kc.MODERN,
'modern': kc.MODERN,
'typewriter': kc.TELETYPE,
'teletype': kc.TELETYPE,
}
def pyface_font_to_font(font):
"""Convert a Pyface font to an equivalent Kiva Font.
This ignores stretch and some options like small caps and strikethrough
as the Kiva font object can't represent these at the moment.
Parameters
----------
font : Pyface Font instance
The font to convert.
Returns
-------
font : Kiva Font instance
The resulting Kiva Font object.
"""
face_name = font.family[0]
for face in font.family:
if face in pyface_family_to_kiva_family:
family = pyface_family_to_kiva_family[face]
break
else:
family = kc.DEFAULT
size = int(font.size)
weight = font.weight_
style = kc.NORMAL if font.style == 'normal' else kc.ITALIC
underline = 'underline' in font.decorations
return Font(face_name, size, family, weight, style, underline)
class KivaFont(TraitType):
""" A Trait which casts strings to a Kiva Font value.
"""
#: The default value should be a tuple (factory, args, kwargs)
default_value_type = DefaultValue.callable_and_args
#: The parser to use when converting text to keyword args. This should
#: accept a string and return a dictionary of Font class trait values (ie.
#: "family", "size", "weight", etc.). If it can't parse the string, it
#: should raise FontParseError.
parser = None
def __init__(self, default_value=None, *, parser=simple_parser, **metadata): # noqa: E501
self.parser = parser
default_value = self._get_default_value(default_value)
super().__init__(default_value, **metadata)
def validate(self, object, name, value):
if isinstance(value, Font):
return value
if isinstance(value, PyfaceFont):
return pyface_font_to_font(value)
if isinstance(value, str):
try:
return Font(**self.parser(value))
except FontParseError:
self.error(object, name, value)
self.error(object, name, value)
def METHOD_NAME(self):
return (
"a Kiva Font, a Pyface Font, or a string describing a font"
)
def get_editor(self, trait):
from enable.trait_defs.ui.kiva_font_editor import KivaFontEditor
return KivaFontEditor()
def clone(self, default_value=NoDefaultSpecified, **metadata):
# Need to override clone due to Traits issue #1629
new = super().clone(NoDefaultSpecified, **metadata)
if default_value is not NoDefaultSpecified:
new.default_value = self._get_default_value(default_value)
new.default_value_type = DefaultValue.callable_and_args
return new
def _get_default_value(self, default_value):
"""Construct a default value suitable for callable_and_args."""
if default_value is not None:
try:
font = self.validate(None, None, default_value)
except TraitError:
raise ValueError(
f"expected {self.METHOD_NAME()}, but got {default_value!r}"
)
klass = font.__class__
kwargs = {attr: getattr(font, attr) for attr in font_attrs}
else:
klass = Font
kwargs = {}
return (klass, (), kwargs) |
4,608 | test run sequential catch tests | import sys
sys.path.insert(0, '../common_python')
import tools
import pytest
import os, re
import subprocess as sp
# BVE this should work for now, but needs to be cleaned up more
def hack_find_spack_build_dir(basedir):
if os.getenv('LBANN_BUILD_DIR', default=None):
build_dir = os.getenv('LBANN_BUILD_DIR')
return build_dir
else:
build_dir = basedir + '/builds'
with os.scandir(build_dir) as it:
for entry in it:
if entry.is_dir() and re.match(r'lbann_.*', entry.name):
return entry.path + '/build'
def get_system_seq_launch(cluster):
if cluster in ['lassen', 'ray']:
return ['lrun', '-1', '--smpiargs=\"-disable_gpu_hooks\"']
elif cluster in ['tioga', 'corona']:
return ['flux mini run', '-N1', '-n1']
return ['srun', '-N1', '-n1', '--mpibind=off']
def get_system_mpi_launch(cluster):
if cluster in ['lassen', 'ray']:
return ['jsrun', '-n2', '-r1', '-a4', '-c', 'ALL_CPUS', '-g', 'ALL_GPUS', '-d', 'packed', '-b', 'packed:10']
elif cluster == 'pascal':
return ['srun', '-N2', '--ntasks-per-node=2', '--mpibind=off']
elif cluster in ['tioga', 'corona']:
return ['flux mini run', '-N2', '-n2', '-g1', '-o gpu-affinity=per-task', '-o cpu-affinity=per-task']
else: # Catalyst
return ['srun', '-N2', '--ntasks-per-node=4']
# Notice that these tests will automatically skip if the executable
# doesn't exist. Since we do not save the testing executable as a
# GitLab CI artifact on Catalyst, Corona, or Pascal, this should only
# run on Ray and Lassen in GitLab CI testing pipelines.
def METHOD_NAME(cluster, dirname):
if cluster != 'lassen':
message = f'{os.path.basename(__file__)} is only required on lassen due to limitations of CI testing'
print('Skip - ' + message)
pytest.skip(message)
output_dir = os.path.join(dirname, 'ci_test', 'unit_tests')
build_dir = hack_find_spack_build_dir(dirname)
seq_catch_exe = os.path.join(build_dir, 'unit_test', 'seq-catch-tests')
if not os.path.exists(seq_catch_exe):
print('Skip - executable not found')
pytest.skip('executable not found')
# Run the sequential tests
seq_launch = get_system_seq_launch(cluster)
seq_output_file_name = 'seq_catch_tests_output-%s.xml' % (cluster)
seq_output_file = os.path.join(output_dir, seq_output_file_name)
seq_catch_args = [seq_catch_exe, '-r', 'junit', '-o', seq_output_file]
output = sp.run(seq_launch + seq_catch_args, cwd=build_dir)
if output.returncode != 0:
raise AssertionError('return_code={%d}' % output.returncode)
def test_run_parallel_catch_tests(cluster, dirname):
if cluster != 'lassen':
message = f'{os.path.basename(__file__)} is only required on lassen due to limitations of CI testing'
print('Skip - ' + message)
pytest.skip(message)
output_dir = os.path.join(dirname, 'ci_test', 'unit_tests')
build_dir = hack_find_spack_build_dir(dirname)
mpi_catch_exe = os.path.join(build_dir, 'unit_test', 'mpi-catch-tests')
if not os.path.exists(mpi_catch_exe):
print('Skip - executable not found')
pytest.skip('executable not found')
# Run the parallel tests
mpi_launch = get_system_mpi_launch(cluster)
mpi_output_file_name = 'mpi_catch_tests_output-%s-rank=%%r-size=%%s.xml' % (cluster)
mpi_output_file = os.path.join(output_dir, mpi_output_file_name)
mpi_catch_args = [mpi_catch_exe, '-r', 'junit', '-o', mpi_output_file]
output = sp.run(mpi_launch + mpi_catch_args, cwd=build_dir)
if output.returncode != 0:
raise AssertionError('return_code={%d}' % output.returncode)
def test_run_parallel_filesystem_catch_tests(cluster, dirname):
if cluster != 'lassen':
message = f'{os.path.basename(__file__)} is only required on lassen due to limitations of CI testing'
print('Skip - ' + message)
pytest.skip(message)
output_dir = os.path.join(dirname, 'ci_test', 'unit_tests')
build_dir = hack_find_spack_build_dir(dirname)
mpi_catch_exe = os.path.join(build_dir, 'unit_test', 'mpi-catch-tests')
if not os.path.exists(mpi_catch_exe):
print('Skip - executable not found')
pytest.skip('executable not found')
# Run the parallel tests
mpi_launch = get_system_mpi_launch(cluster)
mpi_output_file_name = 'mpi_filesystem_catch_tests_output-%s-rank=%%r-size=%%s.xml' % (cluster)
mpi_output_file = os.path.join(output_dir, mpi_output_file_name)
mpi_catch_args = [mpi_catch_exe, '"[filesystem]"', '-r', 'junit', '-o', mpi_output_file]
output = sp.run(mpi_launch + mpi_catch_args, cwd=build_dir)
if output.returncode != 0:
raise AssertionError('return_code={%d}' % output.returncode) |
4,609 | check shutdown | import re
import threading
import asyncio
from aiohttp import web
from openpype.lib import Logger
from .cors_middleware import cors_middleware
class WebServerManager:
"""Manger that care about web server thread."""
def __init__(self, port=None, host=None):
self._log = None
self.port = port or 8079
self.host = host or "localhost"
self.client = None
self.handlers = {}
self.on_stop_callbacks = []
self.app = web.Application(
middlewares=[
cors_middleware(
origins=[re.compile(r"^https?\:\/\/localhost")]
)
]
)
# add route with multiple methods for single "external app"
self.webserver_thread = WebServerThread(self)
@property
def log(self):
if self._log is None:
self._log = Logger.get_logger(self.__class__.__name__)
return self._log
@property
def url(self):
return "http://{}:{}".format(self.host, self.port)
def add_route(self, *args, **kwargs):
self.app.router.add_route(*args, **kwargs)
def add_static(self, *args, **kwargs):
self.app.router.add_static(*args, **kwargs)
def start_server(self):
if self.webserver_thread and not self.webserver_thread.is_alive():
self.webserver_thread.start()
def stop_server(self):
if not self.is_running:
return
try:
self.log.debug("Stopping Web server")
self.webserver_thread.is_running = False
self.webserver_thread.stop()
except Exception:
self.log.warning(
"Error has happened during Killing Web server",
exc_info=True
)
@property
def is_running(self):
if not self.webserver_thread:
return False
return self.webserver_thread.is_running
def thread_stopped(self):
for callback in self.on_stop_callbacks:
callback()
class WebServerThread(threading.Thread):
""" Listener for requests in thread."""
def __init__(self, manager):
self._log = None
super(WebServerThread, self).__init__()
self.is_running = False
self.manager = manager
self.loop = None
self.runner = None
self.site = None
self.tasks = []
@property
def log(self):
if self._log is None:
self._log = Logger.get_logger(self.__class__.__name__)
return self._log
@property
def port(self):
return self.manager.port
@property
def host(self):
return self.manager.host
def run(self):
self.is_running = True
try:
self.log.info("Starting WebServer server")
self.loop = asyncio.new_event_loop() # create new loop for thread
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(self.start_server())
self.log.debug(
"Running Web server on URL: \"localhost:{}\"".format(self.port)
)
asyncio.ensure_future(self.METHOD_NAME(), loop=self.loop)
self.loop.run_forever()
except Exception:
self.log.warning(
"Web Server service has failed", exc_info=True
)
finally:
self.loop.close() # optional
self.is_running = False
self.manager.thread_stopped()
self.log.info("Web server stopped")
async def start_server(self):
""" Starts runner and TCPsite """
self.runner = web.AppRunner(self.manager.app)
await self.runner.setup()
self.site = web.TCPSite(self.runner, self.host, self.port)
await self.site.start()
def stop(self):
"""Sets is_running flag to false, 'check_shutdown' shuts server down"""
self.is_running = False
async def METHOD_NAME(self):
""" Future that is running and checks if server should be running
periodically.
"""
while self.is_running:
while self.tasks:
task = self.tasks.pop(0)
self.log.debug("waiting for task {}".format(task))
await task
self.log.debug("returned value {}".format(task.result))
await asyncio.sleep(0.5)
self.log.debug("Starting shutdown")
await self.site.stop()
self.log.debug("Site stopped")
await self.runner.cleanup()
self.log.debug("Runner stopped")
tasks = [
task
for task in asyncio.all_tasks()
if task is not asyncio.current_task()
]
list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks
results = await asyncio.gather(*tasks, return_exceptions=True)
self.log.debug(
f'Finished awaiting cancelled tasks, results: {results}...'
)
await self.loop.shutdown_asyncgens()
# to really make sure everything else has time to stop
await asyncio.sleep(0.07)
self.loop.stop() |
4,610 | demo | #!/usr/bin/env python3
from __future__ import division
from builtins import range
import copy
import math
from asciimatics.effects import Cycle, Print, Stars
from asciimatics.renderers import SpeechBubble, FigletText, Box
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.sprites import Arrow, Plot, Sam
from asciimatics.paths import Path
from asciimatics.exceptions import ResizeScreenError
import sys
def _speak(screen, text, pos, start):
return Print(
screen,
SpeechBubble(text, "L", uni=screen.unicode_aware),
x=pos[0] + 4, y=pos[1] - 4,
colour=Screen.COLOUR_CYAN,
clear=True,
start_frame=start,
stop_frame=start+50)
def METHOD_NAME(screen):
scenes = []
centre = (screen.width // 2, screen.height // 2)
podium = (8, 5)
# Scene 1.
path = Path()
path.jump_to(-20, centre[1])
path.move_straight_to(centre[0], centre[1], 10)
path.wait(30)
path.move_straight_to(podium[0], podium[1], 10)
path.wait(100)
effects = [
Arrow(screen, path, colour=Screen.COLOUR_GREEN),
_speak(screen, "WELCOME TO ASCIIMATICS", centre, 30),
_speak(screen, "My name is Aristotle Arrow.", podium, 110),
_speak(screen,
"I'm here to help you learn ASCIImatics.", podium, 180),
]
scenes.append(Scene(effects))
# Scene 2.
path = Path()
path.jump_to(podium[0], podium[1])
effects = [
Arrow(screen, path, colour=Screen.COLOUR_GREEN),
_speak(screen, "Let's start with the Screen...", podium, 10),
_speak(screen, "This is your Screen object.", podium, 80),
Print(screen,
Box(screen.width, screen.height, uni=screen.unicode_aware),
0, 0, start_frame=90),
_speak(screen, "It lets you play a Scene like this one I'm in.",
podium, 150),
_speak(screen, "A Scene contains one or more Effects.", podium, 220),
_speak(screen, "Like me - I'm a Sprite!", podium, 290),
_speak(screen, "Or these Stars.", podium, 360),
_speak(screen, "As you can see, the Screen handles them both at once.",
podium, 430),
_speak(screen, "It can handle as many Effects as you like.",
podium, 500),
_speak(screen, "Please press <SPACE> now.", podium, 570),
Stars(screen, (screen.width + screen.height) // 2, start_frame=360)
]
scenes.append(Scene(effects, -1))
# Scene 3.
path = Path()
path.jump_to(podium[0], podium[1])
effects = [
Arrow(screen, path, colour=Screen.COLOUR_GREEN),
_speak(screen, "This is a new Scene.", podium, 10),
_speak(screen, "The Screen stops all Effects and clears itself between "
"Scenes.",
podium, 70),
_speak(screen, "That's why you can't see the Stars now.", podium, 130),
_speak(screen, "(Though you can override that if you need to.)", podium,
200),
_speak(screen, "Please press <SPACE> now.", podium, 270),
]
scenes.append(Scene(effects, -1))
# Scene 4.
path = Path()
path.jump_to(podium[0], podium[1])
effects = [
Arrow(screen, path, colour=Screen.COLOUR_GREEN),
_speak(screen, "So, how do you design your animation?", podium, 10),
_speak(screen, "1) Decide on your cinematic flow of Scenes.", podium,
80),
_speak(screen, "2) Create the Effects in each Scene.", podium, 150),
_speak(screen, "3) Pass the Scenes to the Screen to play.", podium,
220),
_speak(screen, "It really is that easy!", podium, 290),
_speak(screen, "Just look at this sample code.", podium, 360),
_speak(screen, "Please press <SPACE> now.", podium, 430),
]
scenes.append(Scene(effects, -1))
# Scene 5.
path = Path()
path.jump_to(podium[0], podium[1])
effects = [
Arrow(screen, path, colour=Screen.COLOUR_GREEN),
_speak(screen, "There are various effects you can use. For "
"example...",
podium, 10),
Cycle(screen,
FigletText("Colour cycling"),
centre[1] - 5,
start_frame=100),
Cycle(screen,
FigletText("using Figlet"),
centre[1] + 1,
start_frame=100),
_speak(screen, "Look in the effects module for more...",
podium, 290),
_speak(screen, "Please press <SPACE> now.", podium, 360),
]
scenes.append(Scene(effects, -1))
# Scene 6.
path = Path()
path.jump_to(podium[0], podium[1])
curve_path = []
for i in range(0, 11):
curve_path.append(
(centre[0] + (screen.width / 4 * math.sin(i * math.pi / 5)),
centre[1] - (screen.height / 4 * math.cos(i * math.pi / 5))))
path2 = Path()
path2.jump_to(centre[0], centre[1] - screen.height // 4)
path2.move_round_to(curve_path, 60)
effects = [
Arrow(screen, path, colour=Screen.COLOUR_GREEN),
_speak(screen, "Sprites (like me) are also an Effect.", podium, 10),
_speak(screen, "We take a pre-defined Path to follow.", podium, 80),
_speak(screen, "Like this one...", podium, 150),
Plot(screen, path2, colour=Screen.COLOUR_BLUE, start_frame=160,
stop_frame=300),
_speak(screen, "My friend Sam will now follow it...", podium, 320),
Sam(screen, copy.copy(path2), start_frame=380),
_speak(screen, "Please press <SPACE> now.", podium, 420),
]
scenes.append(Scene(effects, -1))
# Scene 7.
path = Path()
path.jump_to(podium[0], podium[1])
path.wait(60)
path.move_straight_to(-5, podium[1], 20)
path.wait(300)
effects = [
Arrow(screen, path, colour=Screen.COLOUR_GREEN),
_speak(screen, "Goodbye!", podium, 10),
Cycle(screen,
FigletText("THE END!"),
centre[1] - 4,
start_frame=100),
Print(screen, SpeechBubble("Press X to exit"), centre[1] + 6,
start_frame=150)
]
scenes.append(Scene(effects, 500))
screen.play(scenes, stop_on_resize=True)
if __name__ == "__main__":
while True:
try:
Screen.wrapper(METHOD_NAME)
sys.exit(0)
except ResizeScreenError:
pass |
4,611 | get model | # /usr/bin/env python3.8
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import pytest
import numpy as np
from aimet_common import cost_calculator
from aimet_common.defs import CostMetric
from aimet_tensorflow.keras.layer_database import *
from aimet_tensorflow.keras.svd_spiltter import WeightSvdModuleSplitter
from aimet_tensorflow.keras.svd_pruner import WeightSvdPruner
from aimet_tensorflow.keras.utils import pymo_utils
import aimet_common.libpymo as pymo
def METHOD_NAME(model_type="Sequential"):
tf.keras.backend.clear_session()
if model_type == "Sequential":
return tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, strides=(2, 2), name='conv1', padding='same', input_shape=(28, 28, 3)),
tf.keras.layers.Conv2D(64, 5, name='conv2', padding='same'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, name='linear')
])
elif model_type == "Functional":
inp = tf.keras.Input((28, 28, 3))
x = tf.keras.layers.Conv2D(32, 3, strides=(2, 2), name='conv1', padding='same')(inp)
x = tf.keras.layers.Conv2D(64, 5, name='conv2', padding='same')(x)
x = tf.keras.layers.Flatten()(x)
out = tf.keras.layers.Dense(10, name = 'linear')(x)
return tf.keras.Model(inp, out)
def _get_layers(model, model_type="Sequential"):
# Drop first layer (Input layer) of Functional model
if model_type == "Functional":
return model.layers[1:]
elif model_type == "Sequential":
return model.layers
class TestWeightSvdLayerSplitandSVDPrunner:
@pytest.mark.parametrize("model_type", ["Sequential", "Functional"])
@pytest.mark.parametrize("rank", [12, 20])
@pytest.mark.parametrize("cost_metric", [CostMetric.mac, CostMetric.memory])
def test_split_layer(self, model_type, rank, cost_metric):
"""
test the output after and before the split_module call
"""
model = METHOD_NAME(model_type)
orig_conv_op = _get_layers(model, model_type)[1]
org_conv_op_shape = orig_conv_op.output_shape
layer1 = Layer(orig_conv_op, orig_conv_op.name, output_shape=org_conv_op_shape)
svd_lib_ref = pymo.GetSVDInstance()
pymo_utils.PymoSvdUtils.configure_layers_in_pymo_svd([layer1], cost_metric, svd_lib_ref, pymo.TYPE_SINGLE)
split_conv_op1, split_conv_op2 = WeightSvdModuleSplitter.split_module(model, layer1.module, rank, svd_lib_ref)
split_conv_output = split_conv_op2.output_shape
assert org_conv_op_shape == split_conv_output
# check the bias value after split.
assert len(split_conv_op2.get_weights()) == len(orig_conv_op.get_weights())
if len(orig_conv_op.get_weights()) > 1:
orig_bias_out = orig_conv_op.get_weights()[1]
split_bias_out = split_conv_op2.get_weights()[1]
assert np.allclose(orig_bias_out, split_bias_out, atol=1e-4)
# First split conv op should also have bias
assert len(split_conv_op1.get_weights()) == 2
# Length of the bias of first conv split should be equal to rank
assert len(split_conv_op1.get_weights()[1]) == rank
@pytest.mark.parametrize("model_type", ["Sequential", "Functional"])
@pytest.mark.parametrize("cost_metric", [CostMetric.mac, CostMetric.memory])
def test_split_layer_with_stride(self, model_type, cost_metric):
"""
test the conv2d split after and before split_module call with stride
"""
model = METHOD_NAME(model_type)
orig_conv_op = _get_layers(model, model_type)[0]
org_conv_op_shape = orig_conv_op.output_shape
layer1 = Layer(orig_conv_op, orig_conv_op.name, output_shape=org_conv_op_shape)
rank = cost_calculator.WeightSvdCostCalculator.calculate_rank_given_comp_ratio(layer1, 0.5, cost_metric)
svd_lib_ref = pymo.GetSVDInstance()
pymo_utils.PymoSvdUtils.configure_layers_in_pymo_svd([layer1], cost_metric, svd_lib_ref, pymo.TYPE_SINGLE)
split_conv_op1, split_conv_op2 = WeightSvdModuleSplitter.split_module(model, layer1.module, rank, svd_lib_ref)
split_conv_output = split_conv_op2.output_shape
assert org_conv_op_shape == split_conv_output
# check the bias value after split.
assert len(split_conv_op2.get_weights()) == len(orig_conv_op.get_weights())
if len(orig_conv_op.get_weights()) > 1:
orig_bias_out = orig_conv_op.get_weights()[1]
split_bias_out = split_conv_op2.get_weights()[1]
assert np.allclose(orig_bias_out, split_bias_out, atol=1e-4)
# First split conv op should also have bias
assert len(split_conv_op1.get_weights()) == 2
# Length of the bias of first conv split should be equal to rank
assert len(split_conv_op1.get_weights()[1]) == rank
@pytest.mark.parametrize("model_type", ["Sequential", "Functional"])
@pytest.mark.parametrize("cmp_ratio", [0.4, 0.75])
@pytest.mark.parametrize("cost_metric", [CostMetric.mac, CostMetric.memory])
@pytest.mark.parametrize("layer_index", [1, 3]) # 2 points to conv and 4 points to FC
def test_perform_svd_and_split_layer(self, model_type, cmp_ratio, cost_metric, layer_index):
model = METHOD_NAME(model_type)
layer_db = LayerDatabase(model)
# Copy the db
comp_layer_db = copy.deepcopy(layer_db)
layer = comp_layer_db.find_layer_by_name(_get_layers(model, model_type)[layer_index].name)
org_count = len(list(comp_layer_db._compressible_layers.values()))
splitter = WeightSvdPruner()
splitter._prune_layer(layer_db, comp_layer_db, layer, 0.5, cost_metric)
# orginal layer will be replaced by the two new layers
assert layer not in list(comp_layer_db._compressible_layers.values())
after_split_count = len(list(comp_layer_db._compressible_layers.values()))
assert (org_count + 1) == after_split_count
|
4,612 | status handler | import time
from machine import Pin, ADC, UART
from picographics import PicoGraphics, DISPLAY_ENVIRO_PLUS
from pimoroni import RGBLED, Button
from breakout_bme68x import BreakoutBME68X, STATUS_HEATER_STABLE
from pimoroni_i2c import PimoroniI2C
from breakout_ltr559 import BreakoutLTR559
from pms5003 import PMS5003
import umqtt.simple
import WIFI_CONFIG
from network_manager import NetworkManager
import uasyncio
"""
This example reads from all the sensors on Enviro+.
(plus the optional particulate sensor)
Posts results via MQTT.
"""
# change this to adjust temperature compensation
TEMPERATURE_OFFSET = 3
# MQTT broker settings
CLIENT_ID = "EnviroPlus"
SERVER_ADDRESS = "broker_IP_goes_here"
MQTT_USERNAME = "broker_username_goes_here"
MQTT_PASSWORD = "broker_password_goes_here"
UPDATE_INTERVAL = 60 # how often to post MQTT data, in seconds
def METHOD_NAME(mode, status, ip):
display.set_pen(BLACK)
display.clear()
display.set_pen(WHITE)
display.text("Network: {}".format(WIFI_CONFIG.SSID), 10, 10, scale=2)
status_text = "Connecting..."
if status is not None:
if status:
status_text = "Connection successful!"
else:
status_text = "Connection failed!"
display.text(status_text, 10, 30, scale=2)
display.text("IP: {}".format(ip), 10, 60, scale=2)
display.update()
# set up wifi
network_manager = NetworkManager(WIFI_CONFIG.COUNTRY, METHOD_NAME=METHOD_NAME)
# set up the display
display = PicoGraphics(display=DISPLAY_ENVIRO_PLUS)
display.set_backlight(1.0)
# set up the LED
led = RGBLED(6, 7, 10, invert=True)
led.set_rgb(255, 0, 0)
# set up the buttons
button_a = Button(12, invert=True)
button_b = Button(13, invert=True)
# set up the Pico W's I2C
PINS_BREAKOUT_GARDEN = {"sda": 4, "scl": 5}
i2c = PimoroniI2C(**PINS_BREAKOUT_GARDEN)
# set up BME688 and LTR559 sensors
bme = BreakoutBME68X(i2c, address=0x77)
ltr = BreakoutLTR559(i2c)
# set up analog channel for microphone
mic = ADC(Pin(26))
# configure the PMS5003 for Enviro+
pms5003 = PMS5003(
uart=UART(1, tx=Pin(8), rx=Pin(9), baudrate=9600),
pin_enable=Pin(3),
pin_reset=Pin(2),
mode="active"
)
# sets up MQTT
mqtt_client = umqtt.simple.MQTTClient(client_id=CLIENT_ID, server=SERVER_ADDRESS, user=MQTT_USERNAME, password=MQTT_PASSWORD, keepalive=30)
# some constants we'll use for drawing
WHITE = display.create_pen(255, 255, 255)
BLACK = display.create_pen(0, 0, 0)
RED = display.create_pen(255, 0, 0)
GREEN = display.create_pen(0, 255, 0)
WIDTH, HEIGHT = display.get_bounds()
display.set_font("bitmap8")
# some other variables we'll use to keep track of stuff
current_time = 0
mqtt_time = 0
mqtt_success = False
e = "Wait a minute"
# connect to wifi
uasyncio.get_event_loop().run_until_complete(network_manager.client(WIFI_CONFIG.SSID, WIFI_CONFIG.PSK))
while True:
# read BME688
temperature, pressure, humidity, gas, status, _, _ = bme.read()
heater = "Stable" if status & STATUS_HEATER_STABLE else "Unstable"
# correct temperature and humidity using an offset
corrected_temperature = temperature - TEMPERATURE_OFFSET
dewpoint = temperature - ((100 - humidity) / 5)
corrected_humidity = 100 - (5 * (corrected_temperature - dewpoint))
# read LTR559
ltr_reading = ltr.get_reading()
lux = ltr_reading[BreakoutLTR559.LUX]
prox = ltr_reading[BreakoutLTR559.PROXIMITY]
# read mic
mic_reading = mic.read_u16()
# read particle sensor
particulate_reading = pms5003.read()
if heater == "Stable" and ltr_reading is not None:
led.set_rgb(0, 0, 0)
current_time = time.ticks_ms()
if (current_time - mqtt_time) / 1000 >= UPDATE_INTERVAL:
# then do an MQTT
try:
mqtt_client.connect()
mqtt_client.publish(topic="EnviroTemperature", msg=str(corrected_temperature))
mqtt_client.publish(topic="EnviroHumidity", msg=str(corrected_humidity))
mqtt_client.publish(topic="EnviroPressure", msg=str(pressure / 100))
mqtt_client.publish(topic="EnviroGas", msg=str(gas))
mqtt_client.publish(topic="EnviroLux", msg=str(lux))
mqtt_client.publish(topic="EnviroMic", msg=str(mic_reading))
mqtt_client.publish(topic="EnviroParticulates1_0", msg=str(particulate_reading.pm_ug_per_m3(1.0)))
mqtt_client.publish(topic="EnviroParticulates2_5", msg=str(particulate_reading.pm_ug_per_m3(2.5)))
mqtt_client.publish(topic="EnviroParticulates10", msg=str(particulate_reading.pm_ug_per_m3(10)))
mqtt_client.disconnect()
mqtt_success = True
mqtt_time = time.ticks_ms()
led.set_rgb(0, 50, 0)
except Exception as e:
print(e)
mqtt_success = False
led.set_rgb(255, 0, 0)
else:
# light up the LED red if there's a problem with MQTT or sensor readings
led.set_rgb(255, 0, 0)
# turn off the backlight with A and turn it back on with B
# things run a bit hotter when screen is on, so we're applying a different temperature offset
if button_a.is_pressed:
display.set_backlight(1.0)
TEMPERATURE_OFFSET = 5
time.sleep(0.5)
elif button_b.is_pressed:
display.set_backlight(0)
TEMPERATURE_OFFSET = 3
time.sleep(0.5)
# draw some stuff on the screen
display.set_pen(BLACK)
display.clear()
display.set_pen(WHITE)
display.text("Posting Enviro+ sensor data via MQTT", 10, 10, WIDTH, scale=3)
if mqtt_success is True:
current_time = time.ticks_ms()
display.set_pen(GREEN)
display.text(f"Last MQTTed {(current_time - mqtt_time) / 1000:.0f} seconds ago", 10, 130, WIDTH, scale=3)
else:
display.set_pen(RED)
display.text(e, 10, 130, WIDTH, scale=3)
display.update()
time.sleep(1.0) |
4,613 | test init terraform with in repo module | # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
import textwrap
from pathlib import Path
from typing import Optional
from pants.backend.terraform.dependencies import TerraformInitRequest, TerraformInitResponse
from pants.backend.terraform.goals.deploy import DeployTerraformFieldSet
from pants.backend.terraform.testutil import (
StandardDeployment,
rule_runner_with_auto_approve,
standard_deployment,
)
from pants.engine.fs import DigestContents, FileContent
from pants.engine.internals.native_engine import Address
from pants.testutil.rule_runner import RuleRunner
rule_runner = rule_runner_with_auto_approve
standard_deployment = standard_deployment
def _do_init_terraform(
rule_runner: RuleRunner, standard_deployment: StandardDeployment, initialise_backend: bool
) -> DigestContents:
rule_runner.write_files(standard_deployment.files)
target = rule_runner.get_target(standard_deployment.target)
field_set = DeployTerraformFieldSet.create(target)
result = rule_runner.request(
TerraformInitResponse,
[
TerraformInitRequest(
field_set.root_module,
field_set.backend_config,
field_set.dependencies,
initialise_backend=initialise_backend,
)
],
)
initialised_files = rule_runner.request(DigestContents, [result.sources_and_deps])
assert isinstance(initialised_files, DigestContents)
return initialised_files
def find_file(files: DigestContents, pattern: str) -> Optional[FileContent]:
return next((file for file in files if Path(file.path).match(pattern)), None)
def test_init_terraform(rule_runner: RuleRunner, standard_deployment: StandardDeployment) -> None:
"""Test for the happy path of initialising Terraform with a backend config."""
initialised_files = _do_init_terraform(
rule_runner, standard_deployment, initialise_backend=True
)
# Assert uses backend by checking that the overrides in the backend file are present in the local stub state file
stub_tfstate_raw = find_file(initialised_files, "src/tf/.terraform/terraform.tfstate")
assert stub_tfstate_raw
stub_tfstate = json.loads(stub_tfstate_raw.content)
assert stub_tfstate["backend"]["config"]["path"] == str(standard_deployment.state_file)
# Assert dependencies are initialised by checking for the dependency itself
assert find_file(
initialised_files,
".terraform/providers/registry.terraform.io/hashicorp/null/*/*/terraform-provider-null*",
), "Did not find expected provider"
# Assert lockfile is included
assert find_file(initialised_files, ".terraform.lock.hcl"), "Did not find expected provider"
def test_init_terraform_without_backends(
rule_runner: RuleRunner, standard_deployment: StandardDeployment
) -> None:
initialised_files = _do_init_terraform(
rule_runner, standard_deployment, initialise_backend=False
)
# Not initialising the backend means that ./.terraform/.terraform.tfstate will not be present
assert not find_file(
initialised_files, "**/*.tfstate"
), "Terraform state file should not be present if the the request was to not initialise the backend"
# The dependencies should still be present
assert find_file(
initialised_files,
".terraform/providers/registry.terraform.io/hashicorp/null/*/*/terraform-provider-null*",
), "Did not find expected provider"
def METHOD_NAME(rule_runner: RuleRunner, tmpdir) -> None:
deployment_files = {
"src/tf/deployment/BUILD": textwrap.dedent(
"""\
terraform_deployment(name="root", root_module=":mod")
terraform_module(name="mod")
"""
),
"src/tf/deployment/main.tf": textwrap.dedent(
"""\
module "mod0" {
source = "../module/"
}
"""
),
}
module_files = {
"src/tf/module/BUILD": "terraform_module()",
"src/tf/module/main.tf": 'resource "null_resource" "dep" {}',
}
deployment = StandardDeployment(
{**deployment_files, **module_files},
Path(str(tmpdir.mkdir(".terraform").join("state.json"))),
Address("src/tf/deployment", target_name="root"),
)
initialised_files = _do_init_terraform(rule_runner, deployment, initialise_backend=True)
# Assert that our module got included in the module.json
assert initialised_files
modules_file_raw = find_file(initialised_files, ".terraform/modules/modules.json")
assert modules_file_raw
modules_file = json.loads(modules_file_raw.content)
assert any(
module for module in modules_file["Modules"] if module["Key"] == "mod0"
), "Did not find our module in modules.json"
# Assert that the module was explored as part of init
assert find_file(
initialised_files,
".terraform/providers/registry.terraform.io/hashicorp/null/*/*/terraform-provider-null*",
), "Did not find expected provider contained in module, did we successfully include it in the files passed to `init`?" |
4,614 | test pushing with owner | import io
import sys
from pathlib import Path
from uuid import uuid4
import pendulum
import pytest
from _pytest.capture import CaptureFixture
from prefect.utilities.dockerutils import (
ImageBuilder,
PushError,
push_image,
silence_docker_warnings,
)
from prefect.utilities.slugify import slugify
with silence_docker_warnings():
from docker import DockerClient
from docker.errors import NotFound
pytestmark = pytest.mark.service("docker")
@pytest.fixture
def contexts() -> Path:
return Path(__file__).parent / "contexts"
@pytest.fixture(scope="module")
def howdy(docker: DockerClient, worker_id: str) -> str:
# Give the image something completely unique so that we know it will generate a
# new image each time
message = f"hello from the registry, {str(uuid4())}!"
with ImageBuilder("busybox") as image:
image.add_line(f"LABEL io.prefect.test-worker {worker_id}")
image.add_line(f'ENTRYPOINT [ "echo", "{message}" ]')
image_id = image.build()
greeting = docker.containers.run(image_id, remove=True).decode().strip()
assert greeting == message
# Give the image a unit tag for this run we we can confirm it is only untagged but
# not removed by the process of pushing it to the registry
test_run_tag = str(uuid4())
docker.images.get(image_id).tag(test_run_tag)
return image_id
def test_pushing_to_registry(docker: DockerClient, registry: str, howdy: str):
tag_prefix = slugify(pendulum.now("utc").isoformat())[:20]
registry_tag = push_image(howdy, registry, "howdy")
assert registry_tag.startswith(f"localhost:5555/howdy:{tag_prefix}")
greeting = docker.containers.run(registry_tag, remove=True).decode().strip()
assert greeting.startswith("hello from the registry")
def test_pushing_to_registry_with_tag(docker: DockerClient, registry: str, howdy: str):
registry_tag = push_image(howdy, registry, "howdy", tag="my-tag")
assert registry_tag.startswith("localhost:5555/howdy:my-tag")
greeting = docker.containers.run(registry_tag, remove=True).decode().strip()
assert greeting.startswith("hello from the registry")
def METHOD_NAME(docker: DockerClient, registry: str, howdy: str):
tag_prefix = slugify(pendulum.now("utc").isoformat())[:20]
registry_tag = push_image(howdy, registry, "prefecthq/howdy")
assert registry_tag.startswith(f"localhost:5555/prefecthq/howdy:{tag_prefix}")
greeting = docker.containers.run(registry_tag, remove=True).decode().strip()
assert greeting.startswith("hello from the registry")
def test_does_not_leave_registry_tag_locally(
docker: DockerClient, registry: str, howdy: str
):
tag_prefix = slugify(pendulum.now("utc").isoformat())[:20]
registry_tag = push_image(howdy, registry, "howdy")
assert registry_tag.startswith(f"localhost:5555/howdy:{tag_prefix}")
with pytest.raises(NotFound):
docker.images.get(registry_tag)
def test_registry_error(howdy: str):
with pytest.raises(PushError, match="lookup.+nowhere"):
push_image(howdy, "http://nowhere:5678", "howdy")
def test_streams_nowhere_by_default(howdy: str, registry: str, capsys: CaptureFixture):
push_image(howdy, registry, "howdy")
captured = capsys.readouterr()
assert not captured.err
assert not captured.out
def test_streams_progress_to_stdout(howdy: str, registry: str, capsys: CaptureFixture):
push_image(howdy, registry, "howdy", stream_progress_to=sys.stdout)
captured = capsys.readouterr()
assert not captured.err
output = captured.out
# spot check a few things we should expect to find in the output
assert "push refers to repository" in output
assert "\nPreparing" in output
assert "\nPushing [" in output or "\nLayer already exists" in output
def test_streams_progress_to_given_stream(howdy: str, registry: str):
my_stream = io.StringIO()
push_image(howdy, registry, "howdy", stream_progress_to=my_stream)
output = my_stream.getvalue()
# spot check a few things we should expect to find in the output
assert "push refers to repository" in output
assert "\nPreparing" in output
assert "\nPushing [" in output or "\nLayer already exists" in output |
4,615 | int to float | # This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import math
import struct
from sys import float_info
from typing import Callable, Optional, SupportsFloat
# Format codes for (int, float) sized types, used for byte-wise casts.
# See https://docs.python.org/3/library/struct.html#format-characters
STRUCT_FORMATS = {
16: ("!H", "!e"),
32: ("!I", "!f"),
64: ("!Q", "!d"),
}
def reinterpret_bits(x, from_, to):
return struct.unpack(to, struct.pack(from_, x))[0]
def float_of(x, width):
assert width in (16, 32, 64)
if width == 64:
return float(x)
elif width == 32:
return reinterpret_bits(float(x), "!f", "!f")
else:
return reinterpret_bits(float(x), "!e", "!e")
def is_negative(x: SupportsFloat) -> bool:
try:
return math.copysign(1.0, x) < 0
except TypeError:
raise TypeError(
f"Expected float but got {x!r} of type {type(x).__name__}"
) from None
def count_between_floats(x, y, width=64):
assert x <= y
if is_negative(x):
if is_negative(y):
return float_to_int(x, width) - float_to_int(y, width) + 1
else:
return count_between_floats(x, -0.0, width) + count_between_floats(
0.0, y, width
)
else:
assert not is_negative(y)
return float_to_int(y, width) - float_to_int(x, width) + 1
def float_to_int(value, width=64):
fmt_int, fmt_flt = STRUCT_FORMATS[width]
return reinterpret_bits(value, fmt_flt, fmt_int)
def METHOD_NAME(value, width=64):
fmt_int, fmt_flt = STRUCT_FORMATS[width]
return reinterpret_bits(value, fmt_int, fmt_flt)
def next_up(value, width=64):
"""Return the first float larger than finite `val` - IEEE 754's `nextUp`.
From https://stackoverflow.com/a/10426033, with thanks to Mark Dickinson.
"""
assert isinstance(value, float), f"{value!r} of type {type(value)}"
if math.isnan(value) or (math.isinf(value) and value > 0):
return value
if value == 0.0 and is_negative(value):
return 0.0
fmt_int, fmt_flt = STRUCT_FORMATS[width]
# Note: n is signed; float_to_int returns unsigned
fmt_int = fmt_int.lower()
n = reinterpret_bits(value, fmt_flt, fmt_int)
if n >= 0:
n += 1
else:
n -= 1
return reinterpret_bits(n, fmt_int, fmt_flt)
def next_down(value, width=64):
return -next_up(-value, width)
def next_down_normal(value, width, allow_subnormal):
value = next_down(value, width)
if (not allow_subnormal) and 0 < abs(value) < width_smallest_normals[width]:
return 0.0 if value > 0 else -width_smallest_normals[width]
return value
def next_up_normal(value, width, allow_subnormal):
return -next_down_normal(-value, width, allow_subnormal)
# Smallest positive non-zero numbers that is fully representable by an
# IEEE-754 float, calculated with the width's associated minimum exponent.
# Values from https://en.wikipedia.org/wiki/IEEE_754#Basic_and_interchange_formats
width_smallest_normals = {
16: 2 ** -(2 ** (5 - 1) - 2),
32: 2 ** -(2 ** (8 - 1) - 2),
64: 2 ** -(2 ** (11 - 1) - 2),
}
assert width_smallest_normals[64] == float_info.min
def make_float_clamper(
min_float: float = 0.0,
max_float: float = math.inf,
*,
allow_zero: bool = False, # Allows +0.0 (even if minfloat > 0)
) -> Optional[Callable[[float], float]]:
"""
Return a function that clamps positive floats into the given bounds.
Returns None when no values are allowed (min > max and zero is not allowed).
"""
if max_float < min_float:
if allow_zero:
min_float = max_float = 0.0
else:
return None
range_size = min(max_float - min_float, float_info.max)
mantissa_mask = (1 << 52) - 1
def float_clamper(float_val: float) -> float:
if min_float <= float_val <= max_float:
return float_val
if float_val == 0.0 and allow_zero:
return float_val
# Outside bounds; pick a new value, sampled from the allowed range,
# using the mantissa bits.
mant = float_to_int(float_val) & mantissa_mask
float_val = min_float + range_size * (mant / mantissa_mask)
# Re-enforce the bounds (just in case of floating point arithmetic error)
return max(min_float, min(max_float, float_val))
return float_clamper |
4,616 | spec | """
time_freq_provider
==================
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class time_freq_provider(Operator):
"""Reads the time/frequency support from the results files contained in
the streams or data sources.
Parameters
----------
streams_container : StreamsContainer, optional
Streams (result file container) (optional)
data_sources : DataSources
If the stream is null, retrieves the file
path from the data sources.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.metadata.time_freq_provider()
>>> # Make input connections
>>> my_streams_container = dpf.StreamsContainer()
>>> op.inputs.streams_container.connect(my_streams_container)
>>> my_data_sources = dpf.DataSources()
>>> op.inputs.data_sources.connect(my_data_sources)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.metadata.time_freq_provider(
... streams_container=my_streams_container,
... data_sources=my_data_sources,
... )
>>> # Get output data
>>> result_time_freq_support = op.outputs.time_freq_support()
"""
def __init__(
self, streams_container=None, data_sources=None, config=None, server=None
):
super().__init__(
name="time_freq_support_provider", config=config, server=server
)
self._inputs = InputsTimeFreqProvider(self)
self._outputs = OutputsTimeFreqProvider(self)
if streams_container is not None:
self.inputs.streams_container.connect(streams_container)
if data_sources is not None:
self.inputs.data_sources.connect(data_sources)
@staticmethod
def METHOD_NAME():
description = """Reads the time/frequency support from the results files contained in
the streams or data sources."""
spec = Specification(
description=description,
map_input_pin_spec={
3: PinSpecification(
name="streams_container",
type_names=["streams_container"],
optional=True,
document="""Streams (result file container) (optional)""",
),
4: PinSpecification(
name="data_sources",
type_names=["data_sources"],
optional=False,
document="""If the stream is null, retrieves the file
path from the data sources.""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="time_freq_support",
type_names=["time_freq_support"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="time_freq_support_provider", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsTimeFreqProvider
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsTimeFreqProvider
"""
return super().outputs
class InputsTimeFreqProvider(_Inputs):
"""Intermediate class used to connect user inputs to
time_freq_provider operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.metadata.time_freq_provider()
>>> my_streams_container = dpf.StreamsContainer()
>>> op.inputs.streams_container.connect(my_streams_container)
>>> my_data_sources = dpf.DataSources()
>>> op.inputs.data_sources.connect(my_data_sources)
"""
def __init__(self, op: Operator):
super().__init__(time_freq_provider.METHOD_NAME().inputs, op)
self._streams_container = Input(
time_freq_provider.METHOD_NAME().input_pin(3), 3, op, -1
)
self._inputs.append(self._streams_container)
self._data_sources = Input(time_freq_provider.METHOD_NAME().input_pin(4), 4, op, -1)
self._inputs.append(self._data_sources)
@property
def streams_container(self):
"""Allows to connect streams_container input to the operator.
Streams (result file container) (optional)
Parameters
----------
my_streams_container : StreamsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.metadata.time_freq_provider()
>>> op.inputs.streams_container.connect(my_streams_container)
>>> # or
>>> op.inputs.streams_container(my_streams_container)
"""
return self._streams_container
@property
def data_sources(self):
"""Allows to connect data_sources input to the operator.
If the stream is null, retrieves the file
path from the data sources.
Parameters
----------
my_data_sources : DataSources
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.metadata.time_freq_provider()
>>> op.inputs.data_sources.connect(my_data_sources)
>>> # or
>>> op.inputs.data_sources(my_data_sources)
"""
return self._data_sources
class OutputsTimeFreqProvider(_Outputs):
"""Intermediate class used to get outputs from
time_freq_provider operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.metadata.time_freq_provider()
>>> # Connect inputs : op.inputs. ...
>>> result_time_freq_support = op.outputs.time_freq_support()
"""
def __init__(self, op: Operator):
super().__init__(time_freq_provider.METHOD_NAME().outputs, op)
self._time_freq_support = Output(
time_freq_provider.METHOD_NAME().output_pin(0), 0, op
)
self._outputs.append(self._time_freq_support)
@property
def time_freq_support(self):
"""Allows to get time_freq_support output of the operator
Returns
----------
my_time_freq_support : TimeFreqSupport
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.metadata.time_freq_provider()
>>> # Connect inputs : op.inputs. ...
>>> result_time_freq_support = op.outputs.time_freq_support()
""" # noqa: E501
return self._time_freq_support |
4,617 | kind | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.bigquery_migration_v2alpha import gapic_version as package_version
from google.cloud.bigquery_migration_v2alpha.types import translation_service
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=package_version.__version__
)
class SqlTranslationServiceTransport(abc.ABC):
"""Abstract transport class for SqlTranslationService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "bigquerymigration.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Don't apply audience if the credentials file passed from user.
if hasattr(credentials, "with_gdch_audience"):
credentials = credentials.with_gdch_audience(
api_audience if api_audience else host
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.translate_query: gapic_v1.method.wrap_method(
self.translate_query,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def translate_query(
self,
) -> Callable[
[translation_service.TranslateQueryRequest],
Union[
translation_service.TranslateQueryResponse,
Awaitable[translation_service.TranslateQueryResponse],
],
]:
raise NotImplementedError()
@property
def METHOD_NAME(self) -> str:
raise NotImplementedError()
__all__ = ("SqlTranslationServiceTransport",) |
4,618 | login | # coding=utf-8
"""Provider code for HDTorrents."""
from __future__ import unicode_literals
import logging
import re
from medusa import tv
from medusa.bs4_parser import BS4Parser
from medusa.helper.common import (
convert_size,
try_int,
)
from medusa.helper.exceptions import AuthException
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.torrent.torrent_provider import TorrentProvider
from requests.compat import urljoin
from requests.utils import dict_from_cookiejar
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class HDTorrentsProvider(TorrentProvider):
"""HDTorrents Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(HDTorrentsProvider, self).__init__('HDTorrents')
# Credentials
self.username = None
self.password = None
# URLs
self.url = 'https://hd-torrents.org/'
self.urls = {
'login': urljoin(self.url, 'login.php'),
'search': urljoin(self.url, 'torrents.php'),
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']
# Miscellaneous Options
self.freeleech = None
# Cache
self.cache = tv.Cache(self, min_time=30)
def search(self, search_strings, age=0, ep_obj=None, **kwargs):
"""
Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:param age: Not used
:param ep_obj: Not used
:returns: A list of search results (structure)
"""
results = []
if not self.METHOD_NAME():
return results
# Search Params
search_params = {
'search': '',
'active': 5 if self.freeleech else 1,
'options': 0,
'category[0]': 59,
'category[1]': 60,
'category[2]': 30,
'category[3]': 38,
'category[4]': 65,
}
for mode in search_strings:
log.debug('Search mode: {0}', mode)
for search_string in search_strings[mode]:
if mode != 'RSS':
search_params['search'] = search_string
log.debug('Search string: {search}',
{'search': search_string})
response = self.session.get(self.urls['search'], params=search_params)
if not response or not response.text:
log.debug('No data returned from provider')
continue
# Search result page contains some invalid html that prevents html parser from returning all data.
# We cut everything before the table that contains the data we are interested in thus eliminating
# the invalid html portions
try:
index = response.text.index('<TABLE class="mainblockcontenttt"')
except ValueError:
log.debug('Could not find table of torrents mainblockcontenttt')
continue
results += self.parse(response.text[index:], mode)
return results
def parse(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
# Units
units = ['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']
items = []
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', class_='mainblockcontenttt')
torrent_rows = torrent_table('tr') if torrent_table else []
if not torrent_rows or torrent_rows[2].find('td', class_='lista'):
log.debug('Data returned from provider does not contain any torrents')
return items
# Cat., Active, Filename, Dl, Wl, Added, Size, Uploader, S, L, C
labels = [label.a.get_text(strip=True) if label.a else label.get_text(strip=True) for label in
torrent_rows[0]('td')]
# Skip column headers
for row in torrent_rows[1:]:
try:
cells = row.findChildren('td')[:len(labels)]
if len(cells) < len(labels):
continue
title = cells[labels.index('Filename')].a
title = title.get_text(strip=True) if title else None
link = cells[labels.index('Dl')].a
link = link.get('href') if link else None
download_url = urljoin(self.url, link) if link else None
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index('S')].get_text(strip=True))
leechers = try_int(cells[labels.index('L')].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed:
if mode != 'RSS':
log.debug("Discarding torrent because it doesn't meet the"
' minimum seeders: {0}. Seeders: {1}',
title, seeders)
continue
torrent_size = cells[labels.index('Size')].get_text()
size = convert_size(torrent_size, units=units) or -1
pubdate_raw = cells[labels.index('Added')].get_text()
pubdate = self.parse_pubdate(pubdate_raw, dayfirst=True)
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': pubdate,
}
if mode != 'RSS':
log.debug('Found result: {0} with {1} seeders and {2} leechers',
title, seeders, leechers)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
log.exception('Failed parsing provider.')
return items
def METHOD_NAME(self):
"""Login method used for logging in before doing search and torrent downloads."""
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'uid': self.username,
'pwd': self.password,
'submit': 'Confirm',
}
response = self.session.post(self.urls['login'], data=login_params)
if not response or not response.text:
log.warning('Unable to connect to provider')
return False
if re.search('You need cookies enabled to log in.', response.text):
log.warning('Invalid username or password. Check your settings')
return False
return True
def _check_auth(self):
if not self.username or not self.password:
raise AuthException('Your authentication credentials for {0} are missing,'
' check your config.'.format(self.name))
return True
provider = HDTorrentsProvider() |
4,619 | check rst data | """distutils.command.check
Implements the Distutils 'check' command.
"""
from distutils.core import Command
from distutils.errors import DistutilsSetupError
try:
# docutils is installed
from docutils.utils import Reporter
from docutils.parsers.rst import Parser
from docutils import frontend
from docutils import nodes
from io import StringIO
class SilentReporter(Reporter):
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
self.messages = []
Reporter.__init__(self, source, report_level, halt_level, stream,
debug, encoding, error_handler)
def system_message(self, level, message, *children, **kwargs):
self.messages.append((level, message, children, kwargs))
return nodes.system_message(message, level=level,
type=self.levels[level],
*children, **kwargs)
HAS_DOCUTILS = True
except Exception:
# Catch all exceptions because exceptions besides ImportError probably
# indicate that docutils is not ported to Py3k.
HAS_DOCUTILS = False
class check(Command):
"""This command checks the meta-data of the package.
"""
description = ("perform some checks on the package")
user_options = [('metadata', 'm', 'Verify meta-data'),
('restructuredtext', 'r',
('Checks if long string meta-data syntax '
'are reStructuredText-compliant')),
('strict', 's',
'Will exit with an error if a check fails')]
boolean_options = ['metadata', 'restructuredtext', 'strict']
def initialize_options(self):
"""Sets default values for options."""
self.restructuredtext = 0
self.metadata = 1
self.strict = 0
self._warnings = 0
def finalize_options(self):
pass
def warn(self, msg):
"""Counts the number of warnings that occurs."""
self._warnings += 1
return Command.warn(self, msg)
def run(self):
"""Runs the command."""
# perform the various tests
if self.metadata:
self.check_metadata()
if self.restructuredtext:
if HAS_DOCUTILS:
self.check_restructuredtext()
elif self.strict:
raise DistutilsSetupError('The docutils package is needed.')
# let's raise an error in strict mode, if we have at least
# one warning
if self.strict and self._warnings > 0:
raise DistutilsSetupError('Please correct your package.')
def check_metadata(self):
"""Ensures that all required elements of meta-data are supplied.
name, version, URL, (author and author_email) or
(maintainer and maintainer_email)).
Warns if any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: %s" % ', '.join(missing))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' must be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' must be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"must be supplied")
def check_restructuredtext(self):
"""Checks if the long string fields are reST-compliant."""
data = self.distribution.get_long_description()
for warning in self.METHOD_NAME(data):
line = warning[-1].get('line')
if line is None:
warning = warning[1]
else:
warning = '%s (line %s)' % (warning[1], line)
self.warn(warning)
def METHOD_NAME(self, data):
"""Returns warnings when the provided data doesn't compile."""
source_path = StringIO()
parser = Parser()
settings = frontend.OptionParser(components=(Parser,)).get_default_values()
settings.tab_width = 4
settings.pep_references = None
settings.rfc_references = None
reporter = SilentReporter(source_path,
settings.report_level,
settings.halt_level,
stream=settings.warning_stream,
debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
try:
parser.parse(data, document)
except AttributeError as e:
reporter.messages.append(
(-1, 'Could not finish the parsing: %s.' % e, '', {}))
return reporter.messages |
4,620 | replicate | import torch
from torch.nn.modules import Module
from torch.nn.parallel.scatter_gather import gather
from torch.nn.parallel.METHOD_NAME import METHOD_NAME
from torch.nn.parallel.parallel_apply import parallel_apply
from .scatter_gather import scatter_kwargs
class _DataParallel(Module):
r"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards
pass, gradients from each replica are summed into the original module.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is the
same size (so that each GPU processes the same number of samples).
See also: :ref:`cuda-nn-dataparallel-instead`
Arbitrary positional and keyword inputs are allowed to be passed into
DataParallel EXCEPT Tensors. All variables will be scattered on dim
specified (default 0). Primitive types will be broadcasted, but all
other types will be a shallow copy and can be corrupted if written to in
the model's forward pass.
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
output_device: device location of output (default: device_ids[0])
Example::
>>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
>>> output = net(input_var)
"""
# TODO: update notes/cuda.rst when this class handles 8+ GPUs well
def __init__(
self, module, device_ids=None, output_device=None, dim=0, chunk_sizes=None,
):
super(_DataParallel, self).__init__()
if not torch.cuda.is_available():
self.module = module
self.device_ids = []
return
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = device_ids
self.chunk_sizes = chunk_sizes
self.output_device = output_device
if len(self.device_ids) == 1:
self.module.cuda(device_ids[0])
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.METHOD_NAME(self.module, self.device_ids[: len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def METHOD_NAME(self, module, device_ids):
return METHOD_NAME(module, device_ids)
def scatter(self, inputs, kwargs, device_ids, chunk_sizes):
return scatter_kwargs(
inputs, kwargs, device_ids, dim=self.dim, chunk_sizes=self.chunk_sizes,
)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(
replicas, inputs, kwargs, self.device_ids[: len(replicas)]
)
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def data_parallel(
module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None,
):
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module: the module to evaluate in parallel
inputs: inputs to the module
device_ids: GPU ids on which to replicate module
output_device: GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Variable containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[: len(inputs)]
replicas = METHOD_NAME(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim)
def DataParallel(module, device_ids=None, output_device=None, dim=0, chunk_sizes=None):
if chunk_sizes is None:
return torch.nn.DataParallel(module, device_ids, output_device, dim)
standard_size = True
for i in range(1, len(chunk_sizes)):
if chunk_sizes[i] != chunk_sizes[0]:
standard_size = False
if standard_size:
return torch.nn.DataParallel(module, device_ids, output_device, dim)
return _DataParallel(module, device_ids, output_device, dim, chunk_sizes) |
4,621 | from json | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\
The version of the OpenAPI document: 1.0.0
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
""" # noqa: E501
from __future__ import annotations
from inspect import getfullargspec
import json
import pprint
import re # noqa: F401
from typing import Any, List, Optional
from pydantic import BaseModel, Field, StrictStr, ValidationError, validator
from petstore_api.models.basque_pig import BasquePig
from petstore_api.models.danish_pig import DanishPig
from typing import Union, Any, List, TYPE_CHECKING
from pydantic import StrictStr, Field
PIG_ONE_OF_SCHEMAS = ["BasquePig", "DanishPig"]
class Pig(BaseModel):
"""
Pig
"""
# data type: BasquePig
oneof_schema_1_validator: Optional[BasquePig] = None
# data type: DanishPig
oneof_schema_2_validator: Optional[DanishPig] = None
if TYPE_CHECKING:
actual_instance: Union[BasquePig, DanishPig]
else:
actual_instance: Any
one_of_schemas: List[str] = Field(PIG_ONE_OF_SCHEMAS, const=True)
class Config:
validate_assignment = True
discriminator_value_class_map = {
}
def __init__(self, *args, **kwargs) -> None:
if args:
if len(args) > 1:
raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
if kwargs:
raise ValueError("If a position argument is used, keyword arguments cannot be used.")
super().__init__(actual_instance=args[0])
else:
super().__init__(**kwargs)
@validator('actual_instance')
def actual_instance_must_validate_oneof(cls, v):
instance = Pig.construct()
error_messages = []
match = 0
# validate data type: BasquePig
if not isinstance(v, BasquePig):
error_messages.append(f"Error! Input type `{type(v)}` is not `BasquePig`")
else:
match += 1
# validate data type: DanishPig
if not isinstance(v, DanishPig):
error_messages.append(f"Error! Input type `{type(v)}` is not `DanishPig`")
else:
match += 1
if match > 1:
# more than 1 match
raise ValueError("Multiple matches found when setting `actual_instance` in Pig with oneOf schemas: BasquePig, DanishPig. Details: " + ", ".join(error_messages))
elif match == 0:
# no match
raise ValueError("No match found when setting `actual_instance` in Pig with oneOf schemas: BasquePig, DanishPig. Details: " + ", ".join(error_messages))
else:
return v
@classmethod
def from_dict(cls, obj: dict) -> Pig:
return cls.METHOD_NAME(json.dumps(obj))
@classmethod
def METHOD_NAME(cls, json_str: str) -> Pig:
"""Returns the object represented by the json string"""
instance = Pig.construct()
error_messages = []
match = 0
# deserialize data into BasquePig
try:
instance.actual_instance = BasquePig.METHOD_NAME(json_str)
match += 1
except (ValidationError, ValueError) as e:
error_messages.append(str(e))
# deserialize data into DanishPig
try:
instance.actual_instance = DanishPig.METHOD_NAME(json_str)
match += 1
except (ValidationError, ValueError) as e:
error_messages.append(str(e))
if match > 1:
# more than 1 match
raise ValueError("Multiple matches found when deserializing the JSON string into Pig with oneOf schemas: BasquePig, DanishPig. Details: " + ", ".join(error_messages))
elif match == 0:
# no match
raise ValueError("No match found when deserializing the JSON string into Pig with oneOf schemas: BasquePig, DanishPig. Details: " + ", ".join(error_messages))
else:
return instance
def to_json(self) -> str:
"""Returns the JSON representation of the actual instance"""
if self.actual_instance is None:
return "null"
to_json = getattr(self.actual_instance, "to_json", None)
if callable(to_json):
return self.actual_instance.to_json()
else:
return json.dumps(self.actual_instance)
def to_dict(self) -> dict:
"""Returns the dict representation of the actual instance"""
if self.actual_instance is None:
return None
to_dict = getattr(self.actual_instance, "to_dict", None)
if callable(to_dict):
return self.actual_instance.to_dict()
else:
# primitive type
return self.actual_instance
def to_str(self) -> str:
"""Returns the string representation of the actual instance"""
return pprint.pformat(self.dict())
|
4,622 | get user cache dir | import os.path
import re
import subprocess
import sys
from dataclasses import dataclass
from logging import getLogger
from typing import List, Optional, Set, Tuple
import pkg_resources
logger = getLogger(__name__)
@dataclass
class ParsedWheelFilename:
project: str
version: str
build: Optional[str]
python_tags: list[str]
abi_tags: list[str]
platform_tags: list[str]
def parse_wheel_filename(filename: str) -> ParsedWheelFilename:
# Adapted from https://github.com/jwodder/wheel-filename/blob/1568eb2f1726425588550067f09f5c0fde6c9652/src/wheel_filename/__init__.py
PYTHON_TAG_RGX = r"[\w\d]+"
ABI_TAG_RGX = r"[\w\d]+"
PLATFORM_TAG_RGX = r"[\w\d]+"
WHEEL_FILENAME_CRGX = re.compile(
r"(?P<project>[A-Za-z0-9](?:[A-Za-z0-9._]*[A-Za-z0-9])?)"
r"-(?P<version>[A-Za-z0-9_.!+]+)"
r"(?:-(?P<build>[0-9][\w\d.]*))?"
r"-(?P<python_tags>{0}(?:\.{0})*)"
r"-(?P<abi_tags>{1}(?:\.{1})*)"
r"-(?P<platform_tags>{2}(?:\.{2})*)"
r"\.[Ww][Hh][Ll]".format(PYTHON_TAG_RGX, ABI_TAG_RGX, PLATFORM_TAG_RGX)
)
basename = os.path.basename(os.fsdecode(filename))
m = WHEEL_FILENAME_CRGX.fullmatch(basename)
if not m:
raise ValueError(f"Unexpected wheel filename {basename}")
return ParsedWheelFilename(
project=m.group("project"),
version=m.group("version"),
build=m.group("build"),
python_tags=m.group("python_tags").split("."),
abi_tags=m.group("abi_tags").split("."),
platform_tags=m.group("platform_tags").split("."),
)
def create_dist_info_version_name(dist_name: str, version: str) -> str:
# https://packaging.python.org/en/latest/specifications/binary-distribution-format/#escaping-and-unicode
# https://peps.python.org/pep-0440/
safe_name = pkg_resources.safe_name(dist_name).replace("-", "_")
safe_version = pkg_resources.safe_version(version)
return f"{safe_name}-{safe_version}"
def get_windows_folder(ID: int) -> str:
# http://stackoverflow.com/a/3859336/261181
# http://www.installmate.com/support/im9/using/symbols/functions/csidls.htm
if sys.platform == "win32":
import ctypes.wintypes
SHGFP_TYPE_CURRENT = 0
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(0, ID, 0, SHGFP_TYPE_CURRENT, buf)
assert buf.value
return buf.value
else:
raise AssertionError("Meant to be used only on Windows")
def get_windows_roaming_appdata_dir() -> str:
return get_windows_folder(26)
def get_windows_local_appdata_dir() -> str:
return get_windows_folder(28)
def METHOD_NAME() -> str:
if sys.platform == "win32":
return os.path.join(get_windows_local_appdata_dir())
elif sys.platform == "darwin":
return os.path.expanduser("~/Library/Caches")
else:
return os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
def get_base_executable():
if sys.exec_prefix == sys.base_exec_prefix:
return sys.executable
if sys.platform == "win32":
guess = sys.base_exec_prefix + "\\" + os.path.basename(sys.executable)
if os.path.isfile(guess):
return guess
if os.path.islink(sys.executable):
return os.path.realpath(sys.executable)
raise RuntimeError("Don't know how to locate base executable")
def get_venv_executable(path: str) -> str:
if sys.platform == "win32":
return os.path.join(path, "Scripts", "python.exe")
else:
return os.path.join(path, "bin", "python3")
def get_venv_site_packages_path(venv_path: str) -> str:
logger.debug("Querying site packages path for %s", venv_path)
args = [
get_venv_executable(venv_path),
"-c",
"import site; print([p for p in site.getsitepackages() if 'site-packages' in p][0])",
]
result = subprocess.check_output(
args,
executable=args[0],
text=True,
stdin=subprocess.DEVNULL,
).strip()
assert result.startswith(venv_path) and result != venv_path
logger.debug("Got site packages path %s", result)
return result
def parse_meta_dir_name(name: str) -> Tuple[str, str]:
assert name.endswith(".dist-info")
name, version = name[: -len(".dist-info")].split("-")
return name, version
def parse_dist_file_name(file_name: str) -> Tuple[str, str, str]:
file_name = file_name.lower()
if file_name.endswith(".whl"):
pwf = parse_wheel_filename(file_name)
return pwf.project, pwf.version, ".whl"
for suffix in [".zip", ".tar.gz"]:
if file_name.endswith(suffix):
file_name = file_name[: -len(suffix)]
break
else:
raise AssertionError("Unexpected file name " + file_name)
# dist name and version is separated by the dash, but both parts can also contain dashes...
if file_name.count("-") == 1:
dist_name, version = file_name.split("-")
else:
# assuming dashes in the version part have digit on their left and letter on their right
# let's get rid of these
tweaked_file_name = re.sub(r"(\d)-([a-zA-Z])", r"\1_\2", file_name)
# now let's treat the rightmost dash as separator
dist_name = tweaked_file_name.rsplit("-", maxsplit=1)[0]
version = file_name[len(dist_name) + 1 :]
return dist_name, version, suffix
def starts_with_continuation_byte(data: bytes) -> bool:
return len(data) > 0 and is_continuation_byte(data[0])
def is_continuation_byte(byte: int) -> bool:
return (byte & 0b11000000) == 0b10000000
def custom_normalize_dist_name(name: str) -> str:
# https://peps.python.org/pep-0503/#normalized-names
return pkg_resources.safe_name(name).lower().replace("-", "_").replace(".", "_")
def list_volumes(skip_letters: Optional[Set[str]] = None) -> List[str]:
skip_letters = skip_letters or set()
"Adapted from https://github.com/ntoll/uflash/blob/master/uflash.py"
if sys.platform == "win32":
import ctypes
#
# In certain circumstances, volumes are allocated to USB
# storage devices which cause a Windows popup to raise if their
# volume contains no media. Wrapping the check in SetErrorMode
# with SEM_FAILCRITICALERRORS (1) prevents this popup.
#
old_mode = ctypes.windll.kernel32.SetErrorMode(1) # @UndefinedVariable
try:
volumes = []
for disk in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
if disk in skip_letters:
continue
path = "{}:\\".format(disk)
if os.path.exists(path):
volumes.append(path)
return volumes
finally:
ctypes.windll.kernel32.SetErrorMode(old_mode) # @UndefinedVariable
else:
# 'posix' means we're on Linux or OSX (Mac).
# Call the unix "mount" command to list the mounted volumes.
mount_output = subprocess.check_output(["mount"], stdin=subprocess.DEVNULL).splitlines()
return [x.split()[2].decode("utf-8") for x in mount_output] |
4,623 | error format | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"cosmosdb postgres cluster delete",
is_preview=True,
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete a cluster together with servers in it.
:example: Delete the cluster
az cosmosdb postgres cluster delete -n "test-cluster" -g "testGroup" --subscription "ffffffff-ffff-ffff-ffff-ffffffffffff"
"""
_aaz_info = {
"version": "2022-11-08",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.dbforpostgresql/servergroupsv2/{}", "2022-11-08"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.cluster_name = AAZStrArg(
options=["-n", "--name", "--cluster-name"],
help="The name of the cluster.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^(?![0-9]+$)(?!-)[a-z0-9-]{3,40}(?<!-)$",
max_length=40,
min_length=3,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.ClustersDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class ClustersDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2/{clusterName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def METHOD_NAME(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"clusterName", self.ctx.args.cluster_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-08",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
4,624 | test validate rbtree null root parent | # Copyright (c) Meta Platforms, Inc. and affiliates.
# SPDX-License-Identifier: LGPL-2.1-or-later
import collections
from drgn import NULL
from drgn.helpers import ValidationError
from drgn.helpers.linux.rbtree import (
RB_EMPTY_NODE,
RB_EMPTY_ROOT,
rb_find,
rb_first,
rb_last,
rb_next,
rb_parent,
rb_prev,
rbtree_inorder_for_each,
rbtree_inorder_for_each_entry,
validate_rbtree,
validate_rbtree_inorder_for_each_entry,
)
from tests.linux_kernel import LinuxKernelTestCase, skip_unless_have_test_kmod
@skip_unless_have_test_kmod
class TestRbtree(LinuxKernelTestCase):
@classmethod
def setUpClass(cls):
cls.root = cls.prog["drgn_test_rb_root"].address_of_()
cls.entries = cls.prog["drgn_test_rb_entries"]
cls.num_entries = 4
cls.empty_root = cls.prog["drgn_test_empty_rb_root"].address_of_()
def node(self, n):
return self.entries[n].node.address_of_()
def entry(self, n):
return self.entries[n].address_of_()
def test_RB_EMPTY_ROOT(self):
self.assertTrue(RB_EMPTY_ROOT(self.empty_root))
self.assertFalse(RB_EMPTY_ROOT(self.root))
def test_RB_EMPTY_NODE(self):
self.assertTrue(
RB_EMPTY_NODE(self.prog["drgn_test_empty_rb_node"].address_of_())
)
self.assertFalse(RB_EMPTY_NODE(self.node(0)))
def test_rb_parent(self):
if self.root.rb_node.rb_left:
self.assertEqual(rb_parent(self.root.rb_node.rb_left), self.root.rb_node)
if self.root.rb_node.rb_right:
self.assertEqual(rb_parent(self.root.rb_node.rb_right), self.root.rb_node)
def test_rb_first(self):
self.assertEqual(rb_first(self.root), self.node(0))
def test_rb_last(self):
self.assertEqual(rb_last(self.root), self.node(self.num_entries - 1))
def test_rb_next(self):
for i in range(self.num_entries - 1):
self.assertEqual(rb_next(self.node(i)), self.node(i + 1))
self.assertEqual(
rb_next(self.node(self.num_entries - 1)),
NULL(self.prog, "struct rb_node *"),
)
def test_rb_prev(self):
for i in range(1, self.num_entries):
self.assertEqual(rb_prev(self.node(i)), self.node(i - 1))
self.assertEqual(rb_prev(self.node(0)), NULL(self.prog, "struct rb_node *"))
def test_rbtree_inorder_for_each(self):
self.assertEqual(
list(rbtree_inorder_for_each(self.root)),
[self.node(i) for i in range(self.num_entries)],
)
def test_rbtree_inorder_for_each_entry(self):
self.assertEqual(
list(
rbtree_inorder_for_each_entry(
"struct drgn_test_rb_entry", self.root, "node"
)
),
[self.entry(i) for i in range(self.num_entries)],
)
def test_rb_find(self):
def cmp(key, obj):
value = obj.value.value_()
return key - value
for i in range(self.num_entries):
self.assertEqual(
rb_find("struct drgn_test_rb_entry", self.root, "node", i, cmp),
self.entry(i),
)
self.assertEqual(
rb_find(
"struct drgn_test_rb_entry", self.root, "node", self.num_entries, cmp
),
NULL(self.prog, "struct drgn_test_rb_entry *"),
)
@staticmethod
def cmp_entries(a, b):
return a.value.value_() - b.value.value_()
def test_validate_rbtree_success(self):
for root, allow_equal in (
(self.root, False),
(self.empty_root, False),
(self.prog["drgn_test_rbtree_with_equal"].address_of_(), True),
):
validate_rbtree(
"struct drgn_test_rb_entry", root, "node", self.cmp_entries, allow_equal
)
self.assertEqual(
list(
validate_rbtree_inorder_for_each_entry(
"struct drgn_test_rb_entry",
root,
"node",
self.cmp_entries,
allow_equal,
)
),
list(
rbtree_inorder_for_each_entry(
"struct drgn_test_rb_entry", root, "node"
)
),
)
def assert_validation_error(self, regex, name):
self.assertRaisesRegex(
ValidationError,
regex,
validate_rbtree,
"struct drgn_test_rb_entry",
self.prog[name].address_of_(),
"node",
self.cmp_entries,
False,
)
self.assertRaisesRegex(
ValidationError,
regex,
collections.deque,
validate_rbtree_inorder_for_each_entry(
"struct drgn_test_rb_entry",
self.prog[name].address_of_(),
"node",
self.cmp_entries,
False,
),
0,
)
def test_validate_rbtree_has_equal(self):
self.assert_validation_error("compares equal", "drgn_test_rbtree_with_equal")
def test_validate_rbtree_out_of_order(self):
self.assert_validation_error(
"compares (greater|less) than", "drgn_test_rbtree_out_of_order"
)
def METHOD_NAME(self):
self.assert_validation_error(
"root node .* has parent", "drgn_test_rbtree_with_bad_root_parent"
)
def test_validate_rbtree_red_root(self):
self.assert_validation_error(
"root node .* is red", "drgn_test_rbtree_with_red_root"
)
def test_validate_rbtree_inconsistent_parents(self):
self.assert_validation_error(
"rb_parent", "drgn_test_rbtree_with_inconsistent_parents"
)
def test_validate_rbtree_red_violation(self):
self.assert_validation_error(
"red node .* has red child", "drgn_test_rbtree_with_red_violation"
)
def test_validate_rbtree_black_violation(self):
self.assert_validation_error(
"unequal black heights", "drgn_test_rbtree_with_black_violation"
) |
4,625 | handler | import asyncio
import time
from datetime import datetime
from unittest.mock import AsyncMock, patch
import pytest
from aiogram import Bot, flags
from aiogram.dispatcher.event.METHOD_NAME import HandlerObject
from aiogram.types import Chat, Message, User
from aiogram.utils.chat_action import ChatActionMiddleware, ChatActionSender
from tests.mocked_bot import MockedBot
class TestChatActionSender:
async def test_wait(self, bot: Bot, loop: asyncio.BaseEventLoop):
sender = ChatActionSender.typing(bot=bot, chat_id=42)
loop.call_soon(sender._close_event.set)
start = time.monotonic()
await sender._wait(1)
assert time.monotonic() - start < 1
@pytest.mark.parametrize(
"action",
[
"typing",
"upload_photo",
"record_video",
"upload_video",
"record_voice",
"upload_voice",
"upload_document",
"choose_sticker",
"find_location",
"record_video_note",
"upload_video_note",
],
)
async def test_factory(self, action: str, bot: MockedBot):
sender_factory = getattr(ChatActionSender, action)
sender = sender_factory(chat_id=42, bot=bot)
assert isinstance(sender, ChatActionSender)
assert sender.action == action
assert sender.chat_id == 42
assert sender.bot is bot
async def test_worker(self, bot: Bot):
with patch(
"aiogram.client.bot.Bot.send_chat_action",
new_callable=AsyncMock,
) as mocked_send_chat_action:
async with ChatActionSender.typing(
bot=bot, chat_id=42, interval=0.01, initial_sleep=0
):
await asyncio.sleep(0.1)
assert mocked_send_chat_action.await_count > 1
mocked_send_chat_action.assert_awaited_with(
action="typing",
chat_id=42,
message_thread_id=None,
)
async def test_contextmanager(self, bot: MockedBot):
sender: ChatActionSender = ChatActionSender.typing(bot=bot, chat_id=42)
assert not sender.running
await sender._stop() # nothing
async with sender:
assert sender.running
assert not sender._close_event.is_set()
with pytest.raises(RuntimeError):
await sender._run()
assert not sender.running
class TestChatActionMiddleware:
@pytest.mark.parametrize(
"value",
[
None,
"sticker",
{"action": "upload_photo"},
{"interval": 1, "initial_sleep": 0.5},
],
)
async def test_call_default(self, value, bot: Bot):
async def METHOD_NAME(event, data):
return "OK"
if value is None:
handler1 = flags.chat_action(METHOD_NAME)
else:
handler1 = flags.chat_action(value)(METHOD_NAME)
middleware = ChatActionMiddleware()
with patch(
"aiogram.utils.chat_action.ChatActionSender._run",
new_callable=AsyncMock,
) as mocked_run, patch(
"aiogram.utils.chat_action.ChatActionSender._stop",
new_callable=AsyncMock,
) as mocked_stop:
data = {"handler": HandlerObject(callback=handler1), "bot": bot}
message = Message(
chat=Chat(id=42, type="private", title="Test"),
from_user=User(id=42, is_bot=False, first_name="Test"),
date=datetime.now(),
message_id=42,
)
result = await middleware(METHOD_NAME=handler1, event=None, data=data)
assert result == "OK"
mocked_run.assert_not_awaited()
mocked_stop.assert_not_awaited()
result = await middleware(
METHOD_NAME=handler1,
event=message,
data=data,
)
assert result == "OK"
mocked_run.assert_awaited()
mocked_stop.assert_awaited() |
4,626 | test is type | """
Tests for the `TypeChecker`-based type interface.
The actual correctness of the type checking is handled in
`test_jsonschema_test_suite`; these tests check that TypeChecker
functions correctly at a more granular level.
"""
from collections import namedtuple
from unittest import TestCase
from jsonschema import ValidationError, _keywords
from jsonschema._types import TypeChecker
from jsonschema.exceptions import UndefinedTypeCheck, UnknownType
from jsonschema.validators import Draft202012Validator, extend
def equals_2(checker, instance):
return instance == 2
def is_namedtuple(instance):
return isinstance(instance, tuple) and getattr(instance, "_fields", None)
def is_object_or_named_tuple(checker, instance):
if Draft202012Validator.TYPE_CHECKER.is_type(instance, "object"):
return True
return is_namedtuple(instance)
class TestTypeChecker(TestCase):
def METHOD_NAME(self):
checker = TypeChecker({"two": equals_2})
self.assertEqual(
(
checker.is_type(instance=2, type="two"),
checker.is_type(instance="bar", type="two"),
),
(True, False),
)
def test_is_unknown_type(self):
with self.assertRaises(UndefinedTypeCheck) as e:
TypeChecker().is_type(4, "foobar")
self.assertIn(
"'foobar' is unknown to this type checker",
str(e.exception),
)
self.assertTrue(
e.exception.__suppress_context__,
msg="Expected the internal KeyError to be hidden.",
)
def test_checks_can_be_added_at_init(self):
checker = TypeChecker({"two": equals_2})
self.assertEqual(checker, TypeChecker().redefine("two", equals_2))
def test_redefine_existing_type(self):
self.assertEqual(
TypeChecker().redefine("two", object()).redefine("two", equals_2),
TypeChecker().redefine("two", equals_2),
)
def test_remove(self):
self.assertEqual(
TypeChecker({"two": equals_2}).remove("two"),
TypeChecker(),
)
def test_remove_unknown_type(self):
with self.assertRaises(UndefinedTypeCheck) as context:
TypeChecker().remove("foobar")
self.assertIn("foobar", str(context.exception))
def test_redefine_many(self):
self.assertEqual(
TypeChecker().redefine_many({"foo": int, "bar": str}),
TypeChecker().redefine("foo", int).redefine("bar", str),
)
def test_remove_multiple(self):
self.assertEqual(
TypeChecker({"foo": int, "bar": str}).remove("foo", "bar"),
TypeChecker(),
)
def test_type_check_can_raise_key_error(self):
"""
Make sure no one writes:
try:
self._type_checkers[type](...)
except KeyError:
ignoring the fact that the function itself can raise that.
"""
error = KeyError("Stuff")
def raises_keyerror(checker, instance):
raise error
with self.assertRaises(KeyError) as context:
TypeChecker({"foo": raises_keyerror}).is_type(4, "foo")
self.assertIs(context.exception, error)
def test_repr(self):
checker = TypeChecker({"foo": is_namedtuple, "bar": is_namedtuple})
self.assertEqual(repr(checker), "<TypeChecker types={'bar', 'foo'}>")
class TestCustomTypes(TestCase):
def test_simple_type_can_be_extended(self):
def int_or_str_int(checker, instance):
if not isinstance(instance, (int, str)):
return False
try:
int(instance)
except ValueError:
return False
return True
CustomValidator = extend(
Draft202012Validator,
type_checker=Draft202012Validator.TYPE_CHECKER.redefine(
"integer", int_or_str_int,
),
)
validator = CustomValidator({"type": "integer"})
validator.validate(4)
validator.validate("4")
with self.assertRaises(ValidationError):
validator.validate(4.4)
with self.assertRaises(ValidationError):
validator.validate("foo")
def test_object_can_be_extended(self):
schema = {"type": "object"}
Point = namedtuple("Point", ["x", "y"])
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
"object", is_object_or_named_tuple,
)
CustomValidator = extend(
Draft202012Validator,
type_checker=type_checker,
)
validator = CustomValidator(schema)
validator.validate(Point(x=4, y=5))
def test_object_extensions_require_custom_validators(self):
schema = {"type": "object", "required": ["x"]}
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
"object", is_object_or_named_tuple,
)
CustomValidator = extend(
Draft202012Validator,
type_checker=type_checker,
)
validator = CustomValidator(schema)
Point = namedtuple("Point", ["x", "y"])
# Cannot handle required
with self.assertRaises(ValidationError):
validator.validate(Point(x=4, y=5))
def test_object_extensions_can_handle_custom_validators(self):
schema = {
"type": "object",
"required": ["x"],
"properties": {"x": {"type": "integer"}},
}
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
"object", is_object_or_named_tuple,
)
def coerce_named_tuple(fn):
def coerced(validator, value, instance, schema):
if is_namedtuple(instance):
instance = instance._asdict()
return fn(validator, value, instance, schema)
return coerced
required = coerce_named_tuple(_keywords.required)
properties = coerce_named_tuple(_keywords.properties)
CustomValidator = extend(
Draft202012Validator,
type_checker=type_checker,
validators={"required": required, "properties": properties},
)
validator = CustomValidator(schema)
Point = namedtuple("Point", ["x", "y"])
# Can now process required and properties
validator.validate(Point(x=4, y=5))
with self.assertRaises(ValidationError):
validator.validate(Point(x="not an integer", y=5))
# As well as still handle objects.
validator.validate({"x": 4, "y": 5})
with self.assertRaises(ValidationError):
validator.validate({"x": "not an integer", "y": 5})
def test_unknown_type(self):
with self.assertRaises(UnknownType) as e:
Draft202012Validator({}).is_type(12, "some unknown type")
self.assertIn("'some unknown type'", str(e.exception)) |
4,627 | grad | import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, spdiags, triu, tril, find, hstack, eye
from scipy.sparse.linalg import cg, inv, dsolve
from scipy.linalg import norm
from pyamg import *
class TriRadiusRatio():
def __init__(self, mesh):
self.mesh = mesh;
def get_free_node_info(self):
NN = self.mesh.number_of_nodes()
isBdNode = self.mesh.ds.boundary_node_flag()
isFreeNode = np.ones((NN, ), dtype=np.bool)
isFreeNode[isBdNode] = False
return isFreeNode
def get_quality(self):
node = self.mesh.entity('node')
cell = self.mesh.entity('cell')
NC = self.mesh.number_of_cells()
localEdge = self.mesh.ds.local_edge()
v = [node[cell[:,j],:] - node[cell[:,i],:] for i,j in localEdge]
l2 = np.zeros((NC, 3))
for i in range(3):
l2[:, i] = np.sum(v[i]**2, axis=1)
l = np.sqrt(l2)
p = l.sum(axis=1)
q = l.prod(axis=1)
area = np.cross(v[2], -v[1])/2
#print(node)
#print(area)
quality = p*q/(16*area**2)
return quality
def get_iterate_matrix(self):
node = self.mesh.entity('node')
cell = self.mesh.entity('cell')
NN = self.mesh.number_of_nodes()
NC = self.mesh.number_of_cells()
localEdge = self.mesh.ds.local_edge()
v = [node[cell[:,j],:] - node[cell[:,i],:] for i,j in localEdge]
l2 = np.zeros((NC, 3))
for i in range(3):
l2[:, i] = np.sum(v[i]**2, axis=1)
l = np.sqrt(l2)
p = l.sum(axis=1)
q = l.prod(axis=1)
area = np.cross(v[2], -v[1])/2
mu = p*q/(16*area**2)
c = mu[:, None]*(1/(p[:, None]*l) + 1/l2)
val = np.concatenate((
c[:, [1, 2]].sum(axis=1), -c[:, 2], -c[:, 1],
-c[:, 2], c[:, [0, 2]].sum(axis=1), -c[:, 0],
-c[:, 1], -c[:, 0], c[:, [0, 1]].sum(axis=1)))
I = np.einsum('ij, k->ijk', cell, np.ones(3))
J = I.swapaxes(-1, -2)
A = csr_matrix((val, (I.flat, J.flat)), shape=(NN, NN))
cn = mu/area;
val = np.concatenate((-cn, cn, cn, -cn, -cn, cn))
I = np.concatenate((
cell[:, 0], cell[:, 0],
cell[:, 1], cell[:, 1],
cell[:, 2], cell[:, 2]))
J = np.concatenate((
cell[:, 1], cell[:, 2],
cell[:, 0], cell[:, 2],
cell[:, 0], cell[:, 1]))
B = csr_matrix((val, (I, J)), shape=(NN, NN))
return (A, B)
def METHOD_NAME(self):
NC = self.mesh.number_of_cells()
NN = self.mesh.number_of_nodes()
node = self.mesh.entity('node')
cell = self.mesh.entity('cell')
idxi = cell[:, 0]
idxj = cell[:, 1]
idxk = cell[:, 2]
v0 = node[idxk] - node[idxj]
v1 = node[idxi] - node[idxk]
v2 = node[idxj] - node[idxi]
area = 0.5*(-v2[:, [0]]*v1[:, [1]] + v2[:, [1]]*v1[:, [0]])
l2 = np.zeros((NC, 3), dtype=np.float)
l2[:, 0] = np.sum(v0**2, axis=1)
l2[:, 1] = np.sum(v1**2, axis=1)
l2[:, 2] = np.sum(v2**2, axis=1)
l = np.sqrt(l2)
p = l.sum(axis=1, keepdims=True)
q = l.prod(axis=1, keepdims=True)
mu = p*q/(16*area**2)
c = mu*(1/(p*l) + 1/l2)
val = np.concatenate((
c[:, [1, 2]].sum(axis=1), -c[:, 2], -c[:, 1],
-c[:, 2], c[:, [0, 2]].sum(axis=1), -c[:, 0],
-c[:, 1], -c[:, 0], c[:, [0, 1]].sum(axis=1)))
I = np.concatenate((
idxi, idxi, idxi,
idxj, idxj, idxj,
idxk, idxk, idxk))
J = np.concatenate((idxi, idxj, idxk))
J = np.concatenate((J, J, J))
A = csr_matrix((val, (I, J)), shape=(NN, NN))
cn = mu/area
cn.shape = (cn.shape[0],)
val = np.concatenate((-cn, cn, cn, -cn, -cn, cn))
I = np.concatenate((idxi, idxi, idxj, idxj, idxk, idxk))
J = np.concatenate((idxj, idxk, idxi, idxk, idxi, idxj))
B = csr_matrix((val, (I, J)), shape=(NN, NN))
return (A, B)
def iterate_solver(self):
node = self.mesh.entity('node')
isFreeNode = self.get_free_node_info()
for i in range(0, 100):
A, B = self.METHOD_NAME()
self.Jacobi(node, A, B, isFreeNode)
#self.BlockJacobi(node, A, B, isFreeNode)
#self.BlockGauss(node, A, B, isFreeNode)
## count quality
q = self.get_quality()
minq = np.min(q)
avgq = np.mean(q)
print('minq=',minq,'avgq=',avgq)
def Jacobi(self, node, A, B, isFreeNode):
NN = self.mesh.number_of_nodes()
D = spdiags(1.0/A.diagonal(), 0, NN, NN)
C = -(triu(A, 1) + tril(A, -1))
X = D*(C*node[:, 0] - B*node[:, 1])
Y = D*(B*node[:, 0] + C*node[:, 1])
p = np.zeros((NN, 2))
p[isFreeNode, 0] = X[isFreeNode] - node[isFreeNode, 0]
p[isFreeNode, 1] = Y[isFreeNode] - node[isFreeNode, 1]
node +=100*p/NN
def BlockJacobi(self, node, A, B, isFreeNode):
NN = self.mesh.number_of_nodes()
isBdNode = np.logical_not(isFreeNode)
newNode = np.zeros((NN, 2), dtype=np.float)
newNode[isBdNode, :] = node[isBdNode, :]
b = -B*node[:, 1] - A*newNode[:, 0]
newNode[isFreeNode, 0], info = cg(A[np.ix_(isFreeNode, isFreeNode)],
b[isFreeNode], x0=node[isFreeNode, 0], tol=1e-6)
b = B*node[:, 0] - A*newNode[:, 1]
newNode[isFreeNode, 1], info = cg(A[np.ix_(isFreeNode, isFreeNode)],
b[isFreeNode], x0=node[isFreeNode, 1], tol=1e-6)
node[isFreeNode, :] = newNode[isFreeNode, :]
return
def BlockGauss(self, node, A, B, isFreeNode):
NN = self.mesh.number_of_nodes()
isBdNode = np.logical_not(isFreeNode)
newNode = np.zeros((NN, 2), dtype=np.float)
newNode[isBdNode, :] = node[np.ix_(isBdNode, [0, 1])]
ml = smoothed_aggregation_solver(A[np.ix_(isFreeNode, isFreeNode)])
M = ml.aspreconditioner(cycle='W')
b = -B*node[:, 1] - A*newNode[:, 0]
node[isFreeNode, 0], info = cg(A[np.ix_(isFreeNode, isFreeNode)],
b[isFreeNode], x0=node[isFreeNode, 0], tol=1e-8, M=M)
b = B*node[:, 0] - A*newNode[:, 1]
node[isFreeNode, 1], info = cg(A[np.ix_(isFreeNode, isFreeNode)],
b[isFreeNode], x0=node[isFreeNode, 1], tol=1e-8, M=M)
return
'''
''' |
4,628 | test ctc loss | # Copyright 2017 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import chainer.functions as F
import numpy
import pytest
import torch
from espnet.nets.pytorch_backend.e2e_asr import pad_list
from espnet.nets.pytorch_backend.nets_utils import th_accuracy
@pytest.mark.parametrize("ctc_type", ["builtin", "gtnctc", "cudnnctc"])
@pytest.mark.parametrize(
"in_length,out_length", [([11, 17, 15], [4, 2, 3]), ([4], [1])]
)
def METHOD_NAME(in_length, out_length, ctc_type):
if ctc_type == "builtin" or ctc_type == "cudnnctc":
_ctcloss_sum = torch.nn.CTCLoss(reduction="sum")
def torch_ctcloss(th_pred, th_target, th_ilen, th_olen):
th_pred = th_pred.log_softmax(2)
loss = _ctcloss_sum(th_pred, th_target, th_ilen, th_olen)
# Batch-size average
loss = loss / th_pred.size(1)
return loss
elif ctc_type == "gtnctc":
pytest.importorskip("gtn")
from espnet.nets.pytorch_backend.gtn_ctc import GTNCTCLossFunction
_ctcloss_sum = GTNCTCLossFunction.apply
def torch_ctcloss(th_pred, th_target, th_ilen, th_olen):
targets = [t.tolist() for t in th_target]
log_probs = torch.nn.functional.log_softmax(th_pred, dim=2)
loss = _ctcloss_sum(log_probs, targets, th_ilen, 0, "none")
return loss
n_out = 7
input_length = numpy.array(in_length, dtype=numpy.int32)
label_length = numpy.array(out_length, dtype=numpy.int32)
np_pred = [
numpy.random.rand(il, n_out).astype(numpy.float32) for il in input_length
]
np_target = [
numpy.random.randint(0, n_out, size=ol, dtype=numpy.int32)
for ol in label_length
]
# NOTE: np_pred[i] seems to be transposed and used axis=-1 in e2e_asr.py
ch_pred = F.separate(F.pad_sequence(np_pred), axis=-2)
ch_target = F.pad_sequence(np_target, padding=-1)
ch_loss = F.connectionist_temporal_classification(
ch_pred, ch_target, 0, input_length, label_length
).data
th_pred = pad_list([torch.from_numpy(x) for x in np_pred], 0.0).transpose(0, 1)
if ctc_type == "gtnctc":
# gtn implementation expects targets as list
th_target = np_target
# keep as B x T x H for gtn
th_pred = th_pred.transpose(0, 1)
else:
th_target = torch.from_numpy(numpy.concatenate(np_target))
th_ilen = torch.from_numpy(input_length)
th_olen = torch.from_numpy(label_length)
th_loss = torch_ctcloss(th_pred, th_target, th_ilen, th_olen).numpy()
numpy.testing.assert_allclose(th_loss, ch_loss, 0.05)
def test_attn_loss():
n_out = 7
_eos = n_out - 1
n_batch = 3
label_length = numpy.array([4, 2, 3], dtype=numpy.int32)
np_pred = numpy.random.rand(n_batch, max(label_length) + 1, n_out).astype(
numpy.float32
)
# NOTE: 0 is only used for CTC, never appeared in attn target
np_target = [
numpy.random.randint(1, n_out - 1, size=ol, dtype=numpy.int32)
for ol in label_length
]
eos = numpy.array([_eos], "i")
ys_out = [F.concat([y, eos], axis=0) for y in np_target]
# padding for ys with -1
# pys: utt x olen
# NOTE: -1 is default ignore index for chainer
pad_ys_out = F.pad_sequence(ys_out, padding=-1)
y_all = F.reshape(np_pred, (n_batch * (max(label_length) + 1), n_out))
ch_loss = F.softmax_cross_entropy(y_all, F.concat(pad_ys_out, axis=0))
# NOTE: this index 0 is only for CTC not attn. so it can be ignored
# unfortunately, torch cross_entropy does not accept out-of-bound ids
th_ignore = 0
th_pred = torch.from_numpy(y_all.data)
th_target = pad_list([torch.from_numpy(t.data).long() for t in ys_out], th_ignore)
th_loss = torch.nn.functional.cross_entropy(
th_pred,
th_target.view(-1),
ignore_index=th_ignore,
reduction="mean",
)
print(ch_loss)
print(th_loss)
# NOTE: chainer's default setting are normalized by batch-size
loss_data = float(th_loss)
numpy.testing.assert_allclose(loss_data, ch_loss.data, 0.05)
def test_train_acc():
n_out = 7
_eos = n_out - 1
n_batch = 3
label_length = numpy.array([4, 2, 3], dtype=numpy.int32)
np_pred = numpy.random.rand(n_batch, max(label_length) + 1, n_out).astype(
numpy.float32
)
# NOTE: 0 is only used for CTC, never appeared in attn target
np_target = [
numpy.random.randint(1, n_out - 1, size=ol, dtype=numpy.int32)
for ol in label_length
]
eos = numpy.array([_eos], "i")
ys_out = [F.concat([y, eos], axis=0) for y in np_target]
# padding for ys with -1
# pys: utt x olen
# NOTE: -1 is default ignore index for chainer
pad_ys_out = F.pad_sequence(ys_out, padding=-1)
y_all = F.reshape(np_pred, (n_batch * (max(label_length) + 1), n_out))
ch_acc = F.accuracy(y_all, F.concat(pad_ys_out, axis=0), ignore_label=-1)
# NOTE: this index 0 is only for CTC not attn. so it can be ignored
# unfortunately, torch cross_entropy does not accept out-of-bound ids
th_ignore = 0
th_pred = torch.from_numpy(y_all.data)
th_ys = [torch.from_numpy(numpy.append(t, eos)).long() for t in np_target]
th_target = pad_list(th_ys, th_ignore)
th_acc = th_accuracy(th_pred, th_target, th_ignore)
numpy.testing.assert_allclose(ch_acc.data, th_acc) |
4,629 | parse rect | #
# Part of p5: A Python package based on Processing
# Copyright (C) 2017-2019 Abhik Pal
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import xml.etree.ElementTree as etree
import math
import re
import numpy as np
from . import Color
from ..sketch.Vispy2DRenderer.shape import PShape
from . import primitives
from . import transforms
from ..pmath import matrix
from .constants import ROUND
default_values = { # default values of SVG attributes
"stroke-width": 1,
"stroke-color": Color(0, 0, 0),
"stroke-join": 0,
"stroke-cap": 1,
"stroke": "none",
"fill": "none",
}
def get_style(element, style):
# style parser for svg
values = element.get("style")
if element.get(style):
return element.get(style)
if values:
for s in values.split(";"):
if style in s:
value = s.split(":")[1]
if style == "stroke" or style == "fill":
return value
if style == "stroke-width":
return int(value.replace("px", ""))
if style == "stroke-opacity":
return float(value)
if style in default_values.keys():
return default_values[style]
else:
return None
def METHOD_NAME(element):
width = float(element.get("width"))
height = float(element.get("height"))
x = float(element.get("x"))
y = float(element.get("y"))
fill = Color(get_style(element, "fill"))
stroke_weight = get_style(element, "stroke-width")
stroke = Color(get_style(element, "stroke"))
stroke_cap = get_style(element, "stroke-cap")
return PShape(
vertices=[(x, y), (x + width, y), (x + width, y + height), (x, y + height)],
children=[],
fill_color=fill,
stroke_weight=stroke_weight,
stroke_color=stroke,
stroke_cap=stroke_cap,
stroke_join=default_values["stroke-join"],
)
def parse_circle(element):
cx = float(element.get("cx"))
cy = float(element.get("cy"))
r = float(element.get("r"))
fill = Color(get_style(element, "fill"))
stroke_weight = get_style(element, "stroke-width")
stroke = Color(get_style(element, "stroke"))
stroke_cap = get_style(element, "stroke-cap")
return primitives.Arc(
(cx, cy),
(r / 2, r / 2),
0,
2 * math.pi,
"CHORD",
fill_color=fill,
stroke_weight=stroke_weight,
stroke_color=stroke,
stroke_cap=ROUND,
)
def parse_line(element):
x1 = float(element.get("x1"))
y1 = float(element.get("y1"))
x2 = float(element.get("x2"))
y2 = float(element.get("y2"))
fill = Color(get_style(element, "fill"))
stroke_weight = get_style(element, "stroke-width")
stroke = Color(get_style(element, "stroke"))
stroke_cap = get_style(element, "stroke-cap")
return PShape(
vertices=[(x1, y1), (x2, y2)],
fill_color=fill,
stroke_weight=stroke_weight,
stroke_color=stroke,
stroke_cap=stroke_cap,
stroke_join=default_values["stroke-join"],
)
def parse_ellipse(element):
cx = float(element.get("cx"))
cy = float(element.get("cx"))
rx = float(element.get("rx"))
ry = float(element.get("ry"))
fill = Color(get_style(element, "fill"))
stroke_weight = get_style(element, "stroke-width")
stroke = Color(get_style(element, "stroke"))
stroke_cap = get_style(element, "stroke-cap")
return primitives.Arc(
(cx, cy),
(rx / 2, ry / 2),
0,
2 * math.pi,
"CHORD",
fill_color=fill,
stroke_weight=stroke_weight,
stroke_color=stroke,
stroke_cap=ROUND,
stroke_join=default_values["stroke-join"],
)
parser_function = {
# tag: parser
"rect": METHOD_NAME,
"circle": parse_circle,
"line": parse_line,
"ellipse": parse_ellipse,
# "path": parse_path
}
def load_shape(filename):
"""
Loads the given .svg file and converts it into
PShape object.
:param filename: link to .svg file
:type filename: str
"""
tree = etree.parse(filename)
root = tree.getroot()
if root.tag != "{http://www.w3.org/2000/svg}svg":
raise TypeError("file %s does not seem to be a valid SVG file", filename)
width = root.get("width")
height = root.get("height")
svg = transform_shape(parser(root))
return svg
def transform_shape(element):
# apply to current element
element.apply_transform_matrix(element._transform_matrix)
# apply to all its children
for child in element.children:
transform_shape(child)
return element
def parser(element):
shape = PShape([(0, 0)], children=[])
transform = element.get("transform")
transform_matrix = np.identity(4)
if transform:
properties = re.findall(r"(\w+)\(([\w\.]+)[,\s]*([^(]*)\)", transform)
for p in properties:
if p[0] == "scale":
mat = matrix.scale_transform(float(p[1]), float(p[2]))
transform_matrix = transform_matrix.dot(mat)
elif p[0] == "translate":
mat = matrix.translation_matrix(float(p[1]), float(p[2]))
transform_matrix = transform_matrix.dot(mat)
shape.transform_matrix(transform_matrix)
for e in element:
tag = e.tag.replace("{http://www.w3.org/2000/svg}", "")
if tag in parser_function.keys():
shape.add_child(parser_function[tag](e))
elif tag == "g":
shape.add_child(parser(e))
else:
continue
return shape
def shape(shape, x=0, y=0):
"""
Draws shapes to the display window
:param shape: shape object
:type shape: PShape
:param x: x-coordinate of the shape
:type x: float
:param y: y-coordinate of the shape
:type y: float
"""
with transforms.push_matrix():
transforms.translate(x, y)
primitives.draw_shape(shape) |
4,630 | do in | #!/usr/bin/env python3
import linuxcnc, hal
import sys, os
# Set up pins
h = hal.component("test-ui")
h.newpin('d_out', hal.HAL_BIT, hal.HAL_IN) # pin for reading digital out
h.newpin('a_out', hal.HAL_FLOAT, hal.HAL_IN) # pin for reading analog out
h.newpin('d_in', hal.HAL_BIT, hal.HAL_OUT) # pin for setting digital in
h.newpin('a_in', hal.HAL_FLOAT, hal.HAL_OUT) # pin for setting analog in
h.ready() # mark the component as 'ready'
os.system('halcmd source postgui.hal') # Net above pins to motion I/O pins
# Initialization
c = linuxcnc.command()
s = linuxcnc.stat()
c.state(linuxcnc.STATE_ESTOP_RESET)
c.state(linuxcnc.STATE_ON)
c.mode(linuxcnc.MODE_MDI)
###################################################
# M62-M65 digital out
def do_dout(on=True, mpos=None):
# Set up command
#
# for sanity, specify output 1, changed to 0 in remap
code = 64 + int(not on) - 2*int(mpos is not None)
cmd = 'M%d P1' % code
# Print pre-test values
s.poll() # Update status channel
c.mdi('(print,cmd: "%s")' % cmd)
c.mdi('(print, M%d remapping pre: motion dout0=%d; hal d_out=%d)' %
(code, s.dout[0], h['d_out']))
# Run test command
c.mdi(cmd)
if mpos is not None:
c.mdi('G0 X%.2f' % mpos)
c.wait_complete()
# Print post-test values
s.poll() # Update status channel
c.mdi('(print, M%d remapping post: motion dout0=%d; hal d_out=%d)' %
(code, s.dout[0], h['d_out']))
# M62/M63 test: toggle DIO w/motion & verify
c.mdi('(print,----Testing M62/M63 digital output w/motion----)')
do_dout(on=True, mpos=1.0) # M62
do_dout(on=False, mpos=2.0) # M63
do_dout(on=True, mpos=3.0) # M62
do_dout(on=False, mpos=4.0) # M63
# M64/M65 test: toggle DIO & verify
c.mdi('(print,----Testing M64/M65 digital output, immediate----)')
do_dout(on=True) # M64
do_dout(on=False) # M65
do_dout(on=True) # M64
do_dout(on=False) # M65
###################################################
# M66 wait on input
# These tests don't exercise the L- and Q-words, despite partial
# plumbing being there
def METHOD_NAME(inp, d_in=True, wait_mode=None, timeout=None):
# Set up M66 command
cmd = 'M66 %(ad_in)s%(wait_mode)s%(timeout)s' % dict(
# for sanity, specify input 1, changed to 0 in remap
ad_in = ('P1' if d_in else 'E1'),
wait_mode = (' L%d' % wait_mode if wait_mode is not None else ''),
timeout = (' Q%d' % timeout if timeout is not None else ''),
)
# Set input pin
if d_in:
h['d_in'] = inp
else:
h['a_in'] = inp
# Print pre-test values
s.poll() # Update status channel
c.mdi('(print,cmd: "%s"; input: %.4f)' % (cmd, inp))
c.mdi('(print, M66 remapping pre: 5399=#5399; 100=#100; '
'ain0=%.2f; din0=%d)' % (s.ain[0], s.din[0]))
# Run test command and wait for it
c.mdi(cmd)
c.wait_complete()
# Print post-test values
s.poll() # Update status channel
c.mdi('(print, M66 remapping post: 5399=#5399; 100=#100; '
'ain0=%.2f; din0=%d)' % (s.ain[0], s.din[0]))
# Run test cases
#
# Digital input
c.mdi('(print,----Testing M66 digital input----)')
METHOD_NAME(1, d_in=True)
METHOD_NAME(0, d_in=True)
# Analog input
c.mdi('(print,----Testing M66 analog input----)')
METHOD_NAME(42.13, d_in=False, wait_mode=0)
METHOD_NAME(-13.42, d_in=False, wait_mode=0)
###################################################
# M67-M68 analog out
def do_aout(out_val, mpos=None):
# Set up command
#
# for sanity, specify output 1, changed to 0 in remap
code = 67 + int(mpos is None)
cmd = 'M%d E1 Q%.2f' % (code, out_val)
# Print pre-test values
s.poll() # Update status channel
c.mdi('(print,cmd: "%s")' % cmd)
c.mdi('(print, M%d remapping pre: motion aout0=%.2f; hal a_out=%.2f)' %
(code, s.aout[0], h['a_out']))
# Run test command
c.mdi(cmd)
if mpos is not None:
c.mdi('G0 X%.2f' % mpos)
c.wait_complete()
# Print post-test values
s.poll() # Update status channel
c.mdi('(print, M%d remapping post: motion aout0=%.2f; hal a_out=%.2f)' %
(code, s.aout[0], h['a_out']))
# M67 test: set AIO w/motion & verify
c.mdi('(print,----Testing M67 analog output w/motion----)')
do_aout(42.13, mpos=-1.0)
do_aout(-13.42, mpos=-2.0)
do_aout(0.0, mpos=-3.0)
# M68 test: set AIO (immed.) & verify
c.mdi('(print,----Testing M68 analog output, immediate----)')
do_aout(42.13)
do_aout(-13.42)
do_aout(0.0)
###################################################
# Shutdown
c.wait_complete()
sys.exit(0)
|
4,631 | init widgets | from typing import TYPE_CHECKING, Dict, Optional
import networkx
from angr import SIM_PROCEDURES
from angr.code_location import ExternalCodeLocation
from PySide6.QtCore import QSize
from PySide6.QtWidgets import QHBoxLayout
from angrmanagement.ui.widgets.qdep_graph import QDependencyGraph
from angrmanagement.ui.widgets.qdepgraph_block import QDepGraphBlock
from .view import BaseView
if TYPE_CHECKING:
from angr.knowledge_plugins.key_definitions.atoms import Atom
from angr.knowledge_plugins.key_definitions.definition import Definition
class DependencyView(BaseView):
"""
Creates view for dependency analysis.
"""
def __init__(self, workspace, instance, default_docking_position, *args, **kwargs):
super().__init__("dependencies", workspace, instance, default_docking_position, *args, **kwargs)
self.base_caption = "Dependencies"
# UI widgets
self._graph_widget: QDependencyGraph = None
# data
self.sink_atom: Optional[Atom] = None
self.sink_ins_addr: Optional[int] = None
self.closures: Optional[Dict[Definition, networkx.DiGraph]] = None
self._graph: Optional[networkx.DiGraph] = None
self.hovered_block: Optional[QDepGraphBlock] = None
self.METHOD_NAME()
self._register_events()
def hover_enter_block(self, block: QDepGraphBlock):
self.hovered_block = block
if self._graph_widget is not None:
self._graph_widget.on_block_hovered(block)
self.redraw_graph()
def hover_leave_block(self):
self.hovered_block = None
self.redraw_graph()
def on_screen_changed(self):
if self._graph_widget is not None:
self._graph_widget.refresh()
def reload(self):
if self._graph_widget is None:
return
# re-generate the graph
if not self.closures:
self._graph = None
self._graph_widget.graph = None
self._graph_widget.request_relayout()
return
self._graph = self._create_ui_graph()
self._graph_widget.graph = self._graph
def redraw_graph(self):
if self._graph_widget is not None:
self._graph_widget.viewport().update()
def sizeHint(self):
return QSize(400, 800)
def METHOD_NAME(self):
self._graph_widget = QDependencyGraph(self.instance, self)
hlayout = QHBoxLayout()
hlayout.addWidget(self._graph_widget)
hlayout.setContentsMargins(0, 0, 0, 0)
self.setLayout(hlayout)
def _register_events(self):
self.workspace.current_screen.am_subscribe(self.on_screen_changed)
def _convert_node(
self, node: "Definition", converted: Dict["Definition", QDepGraphBlock]
) -> Optional[QDepGraphBlock]:
if node in converted:
return converted[node]
# skip external
if isinstance(node.codeloc, ExternalCodeLocation):
return None
if self.instance.project.is_hooked(node.codeloc.block_addr):
hook = self.instance.project.hooked_by(node.codeloc.block_addr)
if isinstance(
hook,
(SIM_PROCEDURES["stubs"]["UnresolvableJumpTarget"], SIM_PROCEDURES["stubs"]["UnresolvableCallTarget"]),
):
return None
new_node = QDepGraphBlock(False, self, definition=node, addr=node.codeloc.ins_addr)
converted[node] = new_node
return new_node
# def _is_edge_in_graph(self):
def _create_ui_graph(self) -> networkx.DiGraph:
g = networkx.DiGraph()
source_node = QDepGraphBlock(False, self, atom=self.sink_atom, addr=self.sink_ins_addr)
g.add_node(source_node)
all_graphs = networkx.compose_all(self.closures.values())
converted = {}
for node_ in all_graphs.nodes:
node = self._convert_node(node_, converted)
if node is not None:
g.add_node(node)
# this is a hack - we only want our sink as the only root of the dependency tree
# TODO: Figure out why
if all_graphs.out_degree[node_] == 0:
g.add_edge(node, source_node)
for src_, dst_ in all_graphs.edges:
src = self._convert_node(src_, converted)
dst = self._convert_node(dst_, converted)
if src is not None and dst is not None:
g.add_edge(src, dst)
return g |
4,632 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetTargetResult',
'AwaitableGetTargetResult',
'get_target',
'get_target_output',
]
@pulumi.output_type
class GetTargetResult:
"""
Model that represents a Target resource.
"""
def __init__(__self__, METHOD_NAME=None, location=None, name=None, properties=None, system_data=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Location of the target resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
The properties of the target resource.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata of the target resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetTargetResult(GetTargetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTargetResult(
METHOD_NAME=self.METHOD_NAME,
location=self.location,
name=self.name,
properties=self.properties,
system_data=self.system_data,
type=self.type)
def get_target(parent_provider_namespace: Optional[str] = None,
parent_resource_name: Optional[str] = None,
parent_resource_type: Optional[str] = None,
resource_group_name: Optional[str] = None,
target_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTargetResult:
"""
Get a Target resource that extends a tracked regional resource.
Azure REST API version: 2023-04-15-preview.
:param str parent_provider_namespace: String that represents a resource provider namespace.
:param str parent_resource_name: String that represents a resource name.
:param str parent_resource_type: String that represents a resource type.
:param str resource_group_name: String that represents an Azure resource group.
:param str target_name: String that represents a Target resource name.
"""
__args__ = dict()
__args__['parentProviderNamespace'] = parent_provider_namespace
__args__['parentResourceName'] = parent_resource_name
__args__['parentResourceType'] = parent_resource_type
__args__['resourceGroupName'] = resource_group_name
__args__['targetName'] = target_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:chaos:getTarget', __args__, opts=opts, typ=GetTargetResult).value
return AwaitableGetTargetResult(
METHOD_NAME=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_target)
def get_target_output(parent_provider_namespace: Optional[pulumi.Input[str]] = None,
parent_resource_name: Optional[pulumi.Input[str]] = None,
parent_resource_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
target_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTargetResult]:
"""
Get a Target resource that extends a tracked regional resource.
Azure REST API version: 2023-04-15-preview.
:param str parent_provider_namespace: String that represents a resource provider namespace.
:param str parent_resource_name: String that represents a resource name.
:param str parent_resource_type: String that represents a resource type.
:param str resource_group_name: String that represents an Azure resource group.
:param str target_name: String that represents a Target resource name.
"""
... |
4,633 | imag | import sys
from collections.abc import Callable
from decimal import Decimal
from numbers import Integral, Rational, Real
from typing import Any, overload
from typing_extensions import Literal, Self, SupportsIndex, TypeAlias
_ComparableNum: TypeAlias = int | float | Decimal | Real
if sys.version_info >= (3, 9):
__all__ = ["Fraction"]
else:
__all__ = ["Fraction", "gcd"]
@overload
def gcd(a: int, b: int) -> int: ...
@overload
def gcd(a: Integral, b: int) -> Integral: ...
@overload
def gcd(a: int, b: Integral) -> Integral: ...
@overload
def gcd(a: Integral, b: Integral) -> Integral: ...
class Fraction(Rational):
@overload
def __new__(cls, numerator: int | Rational = 0, denominator: int | Rational | None = None) -> Self: ...
@overload
def __new__(cls, __value: float | Decimal | str) -> Self: ...
@classmethod
def from_float(cls, f: float) -> Self: ...
@classmethod
def from_decimal(cls, dec: Decimal) -> Self: ...
def limit_denominator(self, max_denominator: int = 1000000) -> Fraction: ...
if sys.version_info >= (3, 8):
def as_integer_ratio(self) -> tuple[int, int]: ...
if sys.version_info >= (3, 12):
def is_integer(self) -> bool: ...
@property
def numerator(a) -> int: ...
@property
def denominator(a) -> int: ...
@overload
def __add__(a, b: int | Fraction) -> Fraction: ...
@overload
def __add__(a, b: float) -> float: ...
@overload
def __add__(a, b: complex) -> complex: ...
@overload
def __radd__(b, a: int | Fraction) -> Fraction: ...
@overload
def __radd__(b, a: float) -> float: ...
@overload
def __radd__(b, a: complex) -> complex: ...
@overload
def __sub__(a, b: int | Fraction) -> Fraction: ...
@overload
def __sub__(a, b: float) -> float: ...
@overload
def __sub__(a, b: complex) -> complex: ...
@overload
def __rsub__(b, a: int | Fraction) -> Fraction: ...
@overload
def __rsub__(b, a: float) -> float: ...
@overload
def __rsub__(b, a: complex) -> complex: ...
@overload
def __mul__(a, b: int | Fraction) -> Fraction: ...
@overload
def __mul__(a, b: float) -> float: ...
@overload
def __mul__(a, b: complex) -> complex: ...
@overload
def __rmul__(b, a: int | Fraction) -> Fraction: ...
@overload
def __rmul__(b, a: float) -> float: ...
@overload
def __rmul__(b, a: complex) -> complex: ...
@overload
def __truediv__(a, b: int | Fraction) -> Fraction: ...
@overload
def __truediv__(a, b: float) -> float: ...
@overload
def __truediv__(a, b: complex) -> complex: ...
@overload
def __rtruediv__(b, a: int | Fraction) -> Fraction: ...
@overload
def __rtruediv__(b, a: float) -> float: ...
@overload
def __rtruediv__(b, a: complex) -> complex: ...
@overload
def __floordiv__(a, b: int | Fraction) -> int: ...
@overload
def __floordiv__(a, b: float) -> float: ...
@overload
def __rfloordiv__(b, a: int | Fraction) -> int: ...
@overload
def __rfloordiv__(b, a: float) -> float: ...
@overload
def __mod__(a, b: int | Fraction) -> Fraction: ...
@overload
def __mod__(a, b: float) -> float: ...
@overload
def __rmod__(b, a: int | Fraction) -> Fraction: ...
@overload
def __rmod__(b, a: float) -> float: ...
if sys.version_info >= (3, 8):
@overload
def __divmod__(a, b: int | Fraction) -> tuple[int, Fraction]: ...
@overload
def __divmod__(a, b: float) -> tuple[float, Fraction]: ...
@overload
def __rdivmod__(a, b: int | Fraction) -> tuple[int, Fraction]: ...
@overload
def __rdivmod__(a, b: float) -> tuple[float, Fraction]: ...
else:
@overload
def __divmod__(self, other: int | Fraction) -> tuple[int, Fraction]: ...
@overload
def __divmod__(self, other: float) -> tuple[float, Fraction]: ...
@overload
def __rdivmod__(self, other: int | Fraction) -> tuple[int, Fraction]: ...
@overload
def __rdivmod__(self, other: float) -> tuple[float, Fraction]: ...
@overload
def __pow__(a, b: int) -> Fraction: ...
@overload
def __pow__(a, b: float | Fraction) -> float: ...
@overload
def __pow__(a, b: complex) -> complex: ...
@overload
def __rpow__(b, a: float | Fraction) -> float: ...
@overload
def __rpow__(b, a: complex) -> complex: ...
def __pos__(a) -> Fraction: ...
def __neg__(a) -> Fraction: ...
def __abs__(a) -> Fraction: ...
def __trunc__(a) -> int: ...
def __floor__(a) -> int: ...
def __ceil__(a) -> int: ...
@overload
def __round__(self, ndigits: None = None) -> int: ...
@overload
def __round__(self, ndigits: int) -> Fraction: ...
def __hash__(self) -> int: ...
def __eq__(a, b: object) -> bool: ...
def __lt__(a, b: _ComparableNum) -> bool: ...
def __gt__(a, b: _ComparableNum) -> bool: ...
def __le__(a, b: _ComparableNum) -> bool: ...
def __ge__(a, b: _ComparableNum) -> bool: ...
def __bool__(a) -> bool: ...
def __copy__(self) -> Self: ...
def __deepcopy__(self, memo: Any) -> Self: ...
if sys.version_info >= (3, 11):
def __int__(a, _index: Callable[[SupportsIndex], int] = ...) -> int: ...
# Not actually defined within fractions.py, but provides more useful
# overrides
@property
def real(self) -> Fraction: ...
@property
def METHOD_NAME(self) -> Literal[0]: ...
def conjugate(self) -> Fraction: ... |
4,634 | get old to new bnf codes | """
Update the prescribing data in a SQLite file using the `bnf_map` table in
BigQuery which maps old BNF codes to their current versions
"""
import logging
import os.path
import sqlite3
from gcutils.bigquery import Client
from matrixstore.matrix_ops import sparse_matrix, finalise_matrix, is_integer
from matrixstore.serializer import deserialize, serialize_compressed
logger = logging.getLogger(__name__)
def update_bnf_map(sqlite_path):
if not os.path.exists(sqlite_path):
raise RuntimeError("No SQLite file at: {}".format(sqlite_path))
connection = sqlite3.connect(sqlite_path)
# Disable the sqlite module's magical transaction handling features because
# we want to use our own transactions below
connection.isolation_level = None
cursor = connection.cursor()
bigquery_connection = Client("hscic")
bnf_map = METHOD_NAME(bigquery_connection)
for old_code, new_code in bnf_map:
move_values_from_old_code_to_new(cursor, old_code, new_code)
# Until we've completed the BNF code update we don't know which
# presentations actually have prescribing data, so we have to wait until
# now to do this cleanup
delete_presentations_with_no_prescribing(cursor)
connection.commit()
connection.close()
def METHOD_NAME(bigquery_connection):
result = bigquery_connection.query(
"SELECT former_bnf_code, current_bnf_code FROM {hscic}.bnf_map"
)
rows = result.rows
logger.info("Applying %s BNF code updates", len(rows))
return rows
def move_values_from_old_code_to_new(cursor, old_code, new_code):
"""
Move prescribing data stored under `old_code` to `new_code`
If we have data stored under both the old and new codes then sum them.
"""
old_values = get_values_for_bnf_code(cursor, old_code)
# If there's no prescribing under the old code then there's nothing to do
if not old_values:
return
new_values = get_values_for_bnf_code(cursor, new_code)
if not new_values:
logger.info(
"Moving prescribing data from %s to %s (new code had no existing data)",
old_code,
new_code,
)
new_values = old_values
else:
logger.info(
"Merging prescribing data for %s with %s (both codes have existing data)",
old_code,
new_code,
)
new_values = sum_rows([new_values, old_values])
# We want saving the new value and deleting the old to be an atomic
# operation. We use savepoints for this which are equivalent to
# transactions except they're allowed to nest so it doesn't matter if
# we're already inside a transaction when we get here.
cursor.execute("SAVEPOINT bnf_code_update")
cursor.execute(
"INSERT OR IGNORE INTO presentation (bnf_code) VALUES (?)", [new_code]
)
cursor.execute(
"""
UPDATE presentation SET items=?, quantity=?, actual_cost=?, net_cost=?
WHERE bnf_code=?
""",
format_values_for_sqlite(new_values) + [new_code],
)
cursor.execute("DELETE FROM presentation WHERE bnf_code=?", [old_code])
cursor.execute("RELEASE bnf_code_update")
def get_values_for_bnf_code(cursor, code):
"""
Return a list of matrices which are the prescribing values for the supplied
BNF code, or None if there is no prescribing data
"""
result = cursor.execute(
"""
SELECT items, quantity, actual_cost, net_cost
FROM presentation
WHERE bnf_code=? AND items IS NOT NULL
""",
[code],
)
rows = list(result)
if rows:
return [deserialize(value) for value in rows[0]]
def sum_rows(rows):
"""
Accepts mutliple rows of matrices and sums the matrices in each column
"""
first_row = rows[0]
accumulators = [
sparse_matrix(matrix.shape, integer=is_integer(matrix)) for matrix in first_row
]
for row in rows:
for accumulator, matrix in zip(accumulators, row):
accumulator += matrix
return [finalise_matrix(matrix) for matrix in accumulators]
def format_values_for_sqlite(row):
"""
Accepts a list of matrices and formats them ready for insertion into SQLite
"""
return [serialize_compressed(value) for value in row]
def delete_presentations_with_no_prescribing(cursor):
cursor.execute("DELETE FROM presentation WHERE items IS NULL") |
4,635 | get value handle | #!/usr/bin/env python3
import struct
BLUETOOTH_BASE_UUID = bytes ([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0x80, 0x5F, 0x9B, 0x34, 0xFB ]);
def hex_string(bytes):
return " ".join([('%02x' % a) for a in bytes])
def unpack24(data):
(upper, ) = struct.unpack("<H", data[1:])
return data[0] | (upper << 8)
class BD_ADDR(object):
# addr stored in big endian
def __init__(self, addr):
if isinstance(addr, str):
parts = addr.split(':')
if len(parts) != 6:
return
# list comprehension
self.addr = bytes([int(a,16) for a in parts])
elif isinstance(addr, bytes):
self.addr = addr
elif isinstance(addr, bytearray):
self.addr = addr
else:
print('cannot use for bdaddr, len %u' % len(addr))
print(addr)
def get_bytes(self):
data = bytearray(self.addr)
data.reverse()
return data
def __repr__(self):
return ":".join([('%02x' % a) for a in self.addr])
class BT_UUID(object):
# uuid stored in big endian
def __init__(self, uuid):
global BLUETOOTH_BASE_UUID
if isinstance(uuid,bytes):
self.uuid = uuid
if isinstance(uuid,int):
self.uuid = struct.pack(">I", uuid) + BLUETOOTH_BASE_UUID[4:]
if isinstance(uuid,str):
parts = uuid.split('-')
if len(parts) != 5:
return
# list comprehension
self.uuid = bytes([int(a,16) for a in uuid.replace('-','')])
def get_uuid32(self):
global BLUETOOTH_BASE_UUID
result = 0
if self.uuid[4:] == BLUETOOTH_BASE_UUID[4:]:
(result,) = struct.unpack(">I", self.uuid[:4])
return result
def get_bytes(self):
data = bytearray(self.uuid)
data.reverse()
return data
def __repr__(self):
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
self.uuid[0], self.uuid[1], self.uuid[2], self.uuid[3], self.uuid[4], self.uuid[5], self.uuid[6], self.uuid[7],
self.uuid[8], self.uuid[9], self.uuid[10], self.uuid[11], self.uuid[12], self.uuid[13], self.uuid[14], self.uuid[15]);
class GATTCharacteristic(object):
# uint16_t start_handle;
# uint16_t value_handle;
# uint16_t end_handle;
# uint16_t properties;
# uint8_t uuid128[16];
def __init__(self, data):
self.data = data
def get_bytes(self):
return self.data
def get_start_handle(self):
(result, ) = struct.unpack('<H', self.data[0:2])
return result
def METHOD_NAME(self):
(result, ) = struct.unpack('<H', self.data[2:4])
return result
def get_ned_handle(self):
(result, ) = struct.unpack('<H', self.data[4:6])
return result
def get_properties(self):
(result, ) = struct.unpack('<H', self.data[6:8])
return result
def get_uuid(self):
return BT_UUID(self.data[8:])
def __repr__(self):
return "GATTCharacteristic [start_handle={start_handle}, value_handle={value_handle}, end_handle={end_handle}, get_uuid={uuid}]".format(
start_handle=self.get_start_handle(), value_handle=self.METHOD_NAME(), end_handle=self.get_end_handle(), uuid=self.get_uuid())
class GATTCharacteristicDescriptor(object):
# uint16_t handle;
# uint8_t uuid128[16];
def __init__(self, data):
self.data = data
def get_bytes(self):
return self.data
def get_handle(self):
(result, ) = struct.unpack('<H', self.data[0:2])
return result
def get_uuid(self):
return BT_UUID(self.data[2:])
def __repr__(self):
return "GATTCharacteristicDescriptor [handle={handle}, get_uuid={uuid}]".format(
handle=self.get_handle(), uuid=self.get_uuid())
class GATTService(object):
# uint16_t start_group_handle;
# uint16_t end_group_handle;
# uint8_t uuid128[16];
def __init__(self, data):
self.data = data
def get_bytes(self):
return self.data
def get_start_group_handle(self):
(result, ) = struct.unpack('<H', self.data[0:2])
return result
def get_end_group_handle(self):
(result, ) = struct.unpack('<H', self.data[2:4])
return result
def get_uuid(self):
return BT_UUID(self.data[4:])
def __repr__(self):
return "GattService [start_group_handle={start_group_handle}, [end_group_handle={end_group_handle}, get_uuid={uuid}]".format(
start_group_handle=self.get_start_group_handle(), end_group_handle=self.get_end_group_handle(), uuid=self.get_uuid())
class Packet(object):
HCI_COMMAND_PACKET = 1;
HCI_EVENT_PACKET = 4;
L2CAP_DATA_PACKET = 6;
RFCOMM_DATA_PACKET = 7;
def __init__(self, packet_type, channel, payload):
self.packet_type = packet_type
self.channel = channel
self.payload = payload
def get_packet_type(self):
return self.packet_type
def get_channel(self):
return self.channel
def get_payload(self):
return self.payload
def __repr__(self):
return "Packet type {packet_type}, channel {channel}, payload {payload}".format(packet_type={self.get_packet_type()},
channel={self.get_channel()}, payload=hex_string(self.get_payload()))
class Event(Packet):
def __init__(self, payload):
self.packet_type = Packet.HCI_COMMAND_PACKET
self.channel = 0
self.payload = payload
def get_event_type(self):
return self.payload[0]
def __repr__(self):
return "Event type {event_type}, payload {payload}".format(event_type={self.get_event_type()},
payload=hex_string(self.get_payload()))
class BTstackEventState(Packet):
def __init__(self, payload):
self.packet_type = Packet.HCI_COMMAND_PACKET
self.channel = 0
self.payload = payload
def get_state(self):
return self.payload[2]
|
4,636 | test check is legal port false | import datetime
import pytest
# For tests
from pymilvus import *
from pymilvus import MilvusException
from pymilvus.client.check import (
check_pass_param,
)
from pymilvus.client.utils import (
mkts_from_unixtime,
mkts_from_datetime,
mkts_from_hybridts,
hybridts_to_unixtime
)
from pymilvus.client import get_commit
from pymilvus.client.check import (
is_legal_address,
is_legal_host,
is_legal_port,
)
class TestChecks:
@pytest.mark.parametrize("valid_address", [
"localhost:19530",
"example.com:19530"
])
def test_check_is_legal_address_true(self, valid_address):
valid = is_legal_address(valid_address)
assert valid is True
@pytest.mark.parametrize("invalid_address", [
"-1",
"localhost",
":19530",
"localhost:localhost",
])
def test_check_is_legal_address_false(self, invalid_address):
valid = is_legal_address(invalid_address)
assert valid is False
@pytest.mark.parametrize("valid_host", [
"localhost",
"example.com"
])
def test_check_is_legal_host_true(self, valid_host):
valid = is_legal_host(valid_host)
assert valid is True
@pytest.mark.parametrize("invalid_host", [
-1,
1.0,
"",
is_legal_address,
])
def test_check_is_legal_host_false(self, invalid_host):
valid = is_legal_host(invalid_host)
assert valid is False
@pytest.mark.parametrize("valid_port", [
"19530",
"222",
123,
])
def test_check_is_legal_port_true(self, valid_port):
valid = is_legal_port(valid_port)
assert valid is True
@pytest.mark.parametrize("invalid_port", [
is_legal_address,
"abc",
0.3,
])
def METHOD_NAME(self, invalid_port):
valid = is_legal_port(invalid_port)
assert valid is False
class TestCheckPassParam:
def test_check_pass_param_valid(self):
a = [[i * j for i in range(20)] for j in range(20)]
check_pass_param(search_data=a)
import numpy as np
a = np.float32([[1, 2, 3, 4], [1, 2, 3, 4]])
check_pass_param(search_data=a)
def test_check_param_invalid(self):
with pytest.raises(Exception):
a = {[i * j for i in range(20) for j in range(20)]}
check_pass_param(search_data=a)
with pytest.raises(Exception):
a = [{i * j for i in range(40)} for j in range(40)]
check_pass_param(search_data=a)
class TestGenTS:
def test_mkts1(self):
ts = 426152581543231492
msecs = 1000
timestamp = hybridts_to_unixtime(ts)
t1 = mkts_from_hybridts(ts, milliseconds=msecs)
t2 = mkts_from_unixtime(timestamp, msecs)
timestamp1 = hybridts_to_unixtime(t1)
timestamp2 = hybridts_to_unixtime(t2)
assert timestamp1 == timestamp2
dtime = datetime.datetime.fromtimestamp(timestamp)
t3 = mkts_from_datetime(dtime, milliseconds=msecs)
timestamp3 = hybridts_to_unixtime(t3)
assert timestamp1 == timestamp3
def test_mkts2(self):
ts = 426152581543231492
delta = datetime.timedelta(milliseconds=1000)
timestamp = hybridts_to_unixtime(ts)
t1 = mkts_from_hybridts(ts, delta=delta)
t2 = mkts_from_unixtime(timestamp, delta=delta)
timestamp1 = hybridts_to_unixtime(t1)
timestamp2 = hybridts_to_unixtime(t2)
assert timestamp1 == timestamp2
dtime = datetime.datetime.fromtimestamp(timestamp)
t3 = mkts_from_datetime(dtime, delta=delta)
timestamp3 = hybridts_to_unixtime(t3)
assert timestamp1 == timestamp3
class TestGetCommit:
def test_get_commit(self):
s = get_commit("2.0.0rc9.dev22")
assert s == "290d76f"
s = get_commit("2.0.0rc8", False)
assert s == "c9f015a04058638a28e1d2a5b265147cda0b0a23"
def test_version_re(self):
import re
version_info = r'((\d+)\.(\d+)\.(\d+))((rc)(\d+))?(\.dev(\d+))?'
p = re.compile(version_info)
versions = [
'2.0.0',
'2.0.0rc3',
'2.0.0rc4.dev8',
'2.0.0rc4.dev22',
'2.0.'
]
for v in versions:
rv = p.match(v)
if rv is not None:
assert rv.group() == v
print(f"group {rv.group()}")
print(f"group {rv.groups()}") |
4,637 | test hybrid recurrent ppo | import attr
import pytest
from mlagents.trainers.tests.simple_test_envs import (
SimpleEnvironment,
MemoryEnvironment,
)
from mlagents.trainers.settings import NetworkSettings
from mlagents.trainers.tests.dummy_config import ppo_dummy_config, sac_dummy_config
from mlagents.trainers.tests.check_env_trains import check_environment_trains
BRAIN_NAME = "1D"
PPO_TORCH_CONFIG = ppo_dummy_config()
SAC_TORCH_CONFIG = sac_dummy_config()
@pytest.mark.slow
@pytest.mark.parametrize("action_size", [(1, 1), (2, 2), (1, 2), (2, 1)])
def test_hybrid_ppo(action_size):
env = SimpleEnvironment([BRAIN_NAME], action_sizes=action_size, step_size=0.8)
new_network_settings = attr.evolve(PPO_TORCH_CONFIG.network_settings)
new_hyperparams = attr.evolve(
PPO_TORCH_CONFIG.hyperparameters,
batch_size=64,
buffer_size=1024,
learning_rate=1e-3,
)
config = attr.evolve(
PPO_TORCH_CONFIG,
hyperparameters=new_hyperparams,
network_settings=new_network_settings,
max_steps=10000,
)
check_environment_trains(env, {BRAIN_NAME: config}, success_threshold=0.9)
@pytest.mark.slow
@pytest.mark.parametrize("num_visual,training_seed", [(1, 1336), (2, 1338)])
def test_hybrid_visual_ppo(num_visual, training_seed):
env = SimpleEnvironment(
[BRAIN_NAME], num_visual=num_visual, num_vector=0, action_sizes=(1, 1)
)
new_hyperparams = attr.evolve(
PPO_TORCH_CONFIG.hyperparameters,
batch_size=64,
buffer_size=1024,
learning_rate=1e-4,
)
config = attr.evolve(
PPO_TORCH_CONFIG, hyperparameters=new_hyperparams, max_steps=8000
)
check_environment_trains(env, {BRAIN_NAME: config}, training_seed=training_seed)
@pytest.mark.slow
def METHOD_NAME():
env = MemoryEnvironment([BRAIN_NAME], action_sizes=(1, 1), step_size=0.5)
new_network_settings = attr.evolve(
PPO_TORCH_CONFIG.network_settings,
memory=NetworkSettings.MemorySettings(memory_size=16),
)
new_hyperparams = attr.evolve(
PPO_TORCH_CONFIG.hyperparameters,
learning_rate=1.0e-3,
batch_size=64,
buffer_size=512,
)
config = attr.evolve(
PPO_TORCH_CONFIG,
hyperparameters=new_hyperparams,
network_settings=new_network_settings,
max_steps=5000,
)
check_environment_trains(env, {BRAIN_NAME: config}, success_threshold=0.9)
@pytest.mark.slow
@pytest.mark.parametrize("action_size", [(1, 1), (2, 2), (1, 2), (2, 1)])
def test_hybrid_sac(action_size):
env = SimpleEnvironment([BRAIN_NAME], action_sizes=action_size, step_size=0.8)
new_hyperparams = attr.evolve(
SAC_TORCH_CONFIG.hyperparameters,
buffer_size=50000,
batch_size=256,
buffer_init_steps=0,
)
config = attr.evolve(
SAC_TORCH_CONFIG, hyperparameters=new_hyperparams, max_steps=4000
)
check_environment_trains(env, {BRAIN_NAME: config}, success_threshold=0.9)
@pytest.mark.slow
@pytest.mark.parametrize("num_visual,training_seed", [(1, 1337), (2, 1338)])
def test_hybrid_visual_sac(num_visual, training_seed):
env = SimpleEnvironment(
[BRAIN_NAME], num_visual=num_visual, num_vector=0, action_sizes=(1, 1)
)
new_hyperparams = attr.evolve(
SAC_TORCH_CONFIG.hyperparameters,
buffer_size=50000,
batch_size=128,
learning_rate=3.0e-4,
)
config = attr.evolve(
SAC_TORCH_CONFIG, hyperparameters=new_hyperparams, max_steps=3000
)
check_environment_trains(env, {BRAIN_NAME: config}, training_seed=training_seed)
@pytest.mark.slow
def test_hybrid_recurrent_sac():
env = MemoryEnvironment([BRAIN_NAME], action_sizes=(1, 1), step_size=0.5)
new_networksettings = attr.evolve(
SAC_TORCH_CONFIG.network_settings,
memory=NetworkSettings.MemorySettings(memory_size=16, sequence_length=16),
)
new_hyperparams = attr.evolve(
SAC_TORCH_CONFIG.hyperparameters,
batch_size=256,
learning_rate=3e-4,
buffer_init_steps=1000,
steps_per_update=2,
)
config = attr.evolve(
SAC_TORCH_CONFIG,
hyperparameters=new_hyperparams,
network_settings=new_networksettings,
max_steps=4000,
)
check_environment_trains(env, {BRAIN_NAME: config}, training_seed=1212) |
4,638 | test adaptive avg pool 2d grad grad | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _check_equal(test_case, lhs, rhs, name="", rtol=1e-5, atol=1e-5):
is_equal = np.allclose(
lhs.detach().cpu().numpy(),
rhs.detach().cpu().numpy(),
rtol=rtol,
atol=atol,
equal_nan=True,
)
test_case.assertTrue(is_equal, f"{name} is not equal" if name else "")
def _test_avg_pool_grad_grad_impl(test_case, placement, ndim):
x_shape = [8, 8] + [5] * ndim
m = eval(f"torch.nn.AvgPool{ndim}d")(kernel_size=random(2, 5).to(int))
x = random_tensor(len(x_shape), *x_shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
y = m(x)
_check_equal(test_case, y.pytorch, y.oneflow, "y")
init_grad_y = random_tensor(len(y.oneflow.shape), *y.oneflow.shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
init_grad_x = random_tensor(len(x.oneflow.shape), *x.oneflow.shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
dx = torch.autograd.grad(y, x, init_grad_y, True, True)[0]
_check_equal(test_case, dx.pytorch, dx.oneflow, "dx")
ddx_ddy = torch.autograd.grad(dx, [x, init_grad_y], init_grad_x, True, True)
ddx, ddy = ddx_ddy[0], ddx_ddy[1]
_check_equal(test_case, ddx.pytorch, ddx.oneflow, "ddx")
_check_equal(test_case, ddy.pytorch, ddy.oneflow, "ddy")
def _test_max_pool_grad_grad_impl(test_case, placement, ndim):
x_shape = [8, 8] + [5] * ndim
m = eval(f"torch.nn.MaxPool{ndim}d")(kernel_size=random(2, 5).to(int))
x = random_tensor(len(x_shape), *x_shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
y = m(x)
_check_equal(test_case, y.pytorch, y.oneflow, "y")
init_grad_y = random_tensor(len(y.oneflow.shape), *y.oneflow.shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
init_grad_x = random_tensor(len(x.oneflow.shape), *x.oneflow.shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
dx = torch.autograd.grad(y, x, init_grad_y, True, True)[0]
_check_equal(test_case, dx.pytorch, dx.oneflow, "dx")
ddx_ddy = torch.autograd.grad(dx, [x, init_grad_y], init_grad_x, True, True)
ddx, ddy = ddx_ddy[0], ddx_ddy[1]
_check_equal(test_case, ddx.pytorch, ddx.oneflow, "ddx")
_check_equal(test_case, ddy.pytorch, ddy.oneflow, "ddy")
def _test_adaptive_pool_grad_grad_impl(test_case, placement, ndim, mode):
x_shape = [8, 8] + [5] * ndim
m = eval(f"torch.nn.Adaptive{mode.title()}Pool{ndim}d")(
output_size=random(2, 5).to(int)
)
x = random_tensor(len(x_shape), *x_shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
y = m(x)
_check_equal(test_case, y.pytorch, y.oneflow, "y")
init_grad_y = random_tensor(len(y.oneflow.shape), *y.oneflow.shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
init_grad_x = random_tensor(len(x.oneflow.shape), *x.oneflow.shape).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
dx = torch.autograd.grad(y, x, init_grad_y, True, True)[0]
_check_equal(test_case, dx.pytorch, dx.oneflow, "dx")
ddx_ddy = torch.autograd.grad(dx, [x, init_grad_y], init_grad_x, True, True)
ddx, ddy = ddx_ddy[0], ddx_ddy[1]
_check_equal(test_case, ddx.pytorch, ddx.oneflow, "ddx")
_check_equal(test_case, ddy.pytorch, ddy.oneflow, "ddy")
@flow.unittest.skip_unless_1n1d()
class TestGlobalPoolHigherDerivative(flow.unittest.TestCase):
@globaltest
def test_max_pool_1d_grad_grad(test_case):
for placement in all_placement():
_test_max_pool_grad_grad_impl(test_case, placement, 1)
@globaltest
def test_max_pool_2d_grad_grad(test_case):
for placement in all_placement():
_test_max_pool_grad_grad_impl(test_case, placement, 2)
@globaltest
def test_max_pool_3d_grad_grad(test_case):
for placement in all_placement():
_test_max_pool_grad_grad_impl(test_case, placement, 3)
@globaltest
def test_avg_pool_1d_grad_grad(test_case):
for placement in all_placement():
_test_avg_pool_grad_grad_impl(test_case, placement, ndim=1)
@globaltest
def test_avg_pool_2d_grad_grad(test_case):
for placement in all_placement():
_test_avg_pool_grad_grad_impl(test_case, placement, ndim=2)
@globaltest
def test_avg_pool_3d_grad_grad(test_case):
for placement in all_placement():
_test_avg_pool_grad_grad_impl(test_case, placement, ndim=3)
@globaltest
def test_adaptive_avg_pool_1d_grad_grad(test_case):
for placement in all_placement():
_test_adaptive_pool_grad_grad_impl(test_case, placement, ndim=1, mode="avg")
@globaltest
def METHOD_NAME(test_case):
for placement in all_placement():
_test_adaptive_pool_grad_grad_impl(test_case, placement, ndim=2, mode="avg")
@globaltest
def test_adaptive_avg_pool_3d_grad_grad(test_case):
for placement in all_placement():
_test_adaptive_pool_grad_grad_impl(test_case, placement, ndim=3, mode="avg")
if __name__ == "__main__":
unittest.main() |
4,639 | is synonym | """ cheminformatics related functions """
import functools
import logging
import ipywidgets as widgets
import matchms
import numpy as np
from matchms.filtering.load_adducts import load_adducts_dict
from rdkit import Chem
from metatlas.interfaces.compounds import structure_cleaning as cleaning
logger = logging.getLogger(__name__)
@functools.lru_cache
def get_parent_mass(precursor_mz: float, adduct: str) -> float:
"""Returns the mass of the input molecule that would result in the supplied precursor_mz and adduct"""
dummy = matchms.Spectrum(
mz=np.array([]), intensities=np.array([]), metadata={"precursor_mz": precursor_mz, "adduct": adduct}
)
updated = matchms.filtering.add_parent_mass(dummy)
return updated.metadata["parent_mass"]
@functools.lru_cache
def get_precursor_mz(parent_mass: float, adduct: str) -> float:
"""For an input molecule with parent_mass that generates adduct, return the resutling precursor_mz"""
adducts = load_adducts_dict()
if adduct not in adducts:
raise KeyError("Adduct '%s' is not supported")
multiplier = adducts[adduct]["mass_multiplier"]
correction_mass = adducts[adduct]["correction_mass"]
return (parent_mass + correction_mass) / multiplier
@functools.lru_cache
def is_positive_mode(adduct: str) -> bool:
"""Returns True if the MS mode for an adduct is positive"""
adducts = load_adducts_dict()
if adduct not in adducts:
raise KeyError("Adduct '%s' is not supported")
return adducts[adduct]["ionmode"] == "positive"
@functools.lru_cache
def is_valid_inchi_pair(test_inchi: str, test_inchi_key: str) -> bool:
"""True if if test_inchi has the inchi key test_inchi_key"""
if not matchms.metadata_utils.is_valid_inchi(test_inchi):
return False
return test_inchi_key == Chem.inchi.InchiToInchiKey(test_inchi)
@functools.lru_cache
def is_valid_inchi_smiles_pair(test_inchi: str, test_smiles: str) -> bool:
"""
True if test_inchi and test_smiles have the same structure.
"""
mol_from_inchi = Chem.inchi.MolFromInchi(test_inchi)
if mol_from_inchi is None:
return False
mol_from_smiles = Chem.MolFromSmiles(test_smiles)
if mol_from_smiles is None:
return False
return are_equal(mol_from_inchi, mol_from_smiles)
@functools.lru_cache
def inchi_to_smiles(inchi: str) -> str:
"""Convert Inchi to smiles"""
out = Chem.MolToSmiles(Chem.inchi.MolFromInchi(inchi))
if out is None:
raise ValueError(f"'{inchi}' is not a valid Inchi")
return out
@functools.lru_cache
def smiles_to_inchi(smiles: str) -> str:
"""Convert smiles to Inchi"""
out = Chem.inchi.MolFromInchi(Chem.MolFromSmiles(smiles))
if out is None:
raise ValueError(f"'{smiles}' is not a valid smiles")
return out
@functools.lru_cache
def inchi_or_smiles_to_molecule(molecule_id: str) -> Chem.rdchem.Mol:
"""Convert Inchi or smiles to rdkit Mol"""
out = Chem.inchi.MolFromInchi(molecule_id) or Chem.MolFromSmiles(molecule_id)
if out is None:
raise ValueError(f"'{molecule_id}' is not a valid Inchi or smiles")
return out
@functools.lru_cache
def inchi_or_smiles_to_inchi(molecule_id: str) -> str:
"""Inchi or smiles string to smiles string"""
out = Chem.inchi.MolToInchi(inchi_or_smiles_to_molecule(molecule_id))
if out is None:
raise ValueError(f"'{molecule_id}' is not a valid Inchi or smiles")
return out
@functools.lru_cache
def inchi_or_smiles_to_smiles(molecule_id: str) -> str:
"""Inchi or smiles string to smiles string"""
out = Chem.MolToSmiles(inchi_or_smiles_to_molecule(molecule_id))
if out is None:
raise ValueError(f"'{molecule_id}' is not a valid Inchi or smiles")
return out
@functools.lru_cache
def normalize_molecule(mol: Chem.rdchem.Mol) -> Chem.rdchem.Mol:
"""Removes salt and neutralizes charges"""
desalted, _ = cleaning.desalt(mol)
if desalted is not None:
neutralized, _ = cleaning.NeutraliseCharges(desalted)
else:
return None
if neutralized is not None:
return neutralized
else:
return None
@functools.lru_cache
def are_equal(molecule1: Chem.rdchem.Mol, molecule2: Chem.rdchem.Mol) -> bool:
"""True if both molecules are substructures of each other"""
return molecule1.HasSubstructMatch(molecule2) and molecule2.HasSubstructMatch(molecule1)
@functools.lru_cache
def METHOD_NAME(name: str, synonym_string: str) -> bool:
"""
Inputs:
name: string to check for within synonym_string
synonym_string: string with /// between names
Returns True if case insensitive match of name to full name in synonym_string
"""
return name.lower() in [x.lower() for x in synonym_string.split("///")]
@functools.lru_cache
def valid_adduct(value: str) -> bool:
"""
True if the value is an adduct listed supported by the matchms package
This is not a comprehensive list, so it will return False for some uncommon adducts
"""
adducts = load_adducts_dict()
return value in adducts
def mol_to_image(mol: Chem.rdchem.Mol, **kwargs) -> widgets.Image:
"""Generated a ipywidget.Image of a molecule from a rkit Mol"""
d2d = Chem.Draw.MolDraw2DSVG(300, 300)
d2d.DrawMolecule(mol)
d2d.FinishDrawing()
text = d2d.GetDrawingText()
return widgets.Image(value=text.encode("utf-8"), format="svg+xml", **kwargs) |
4,640 | system data | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetNotificationHubResult',
'AwaitableGetNotificationHubResult',
'get_notification_hub',
'get_notification_hub_output',
]
@pulumi.output_type
class GetNotificationHubResult:
"""
Notification Hub Resource.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, sku=None, METHOD_NAME=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", METHOD_NAME)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. E.g. "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}"
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.NotificationHubPropertiesResponse':
"""
NotificationHub properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The Sku description for a namespace
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetNotificationHubResult(GetNotificationHubResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNotificationHubResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
METHOD_NAME=self.METHOD_NAME,
tags=self.tags,
type=self.type)
def get_notification_hub(namespace_name: Optional[str] = None,
notification_hub_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNotificationHubResult:
"""
Notification Hub Resource.
Azure REST API version: 2023-01-01-preview.
:param str namespace_name: Namespace name
:param str notification_hub_name: Notification Hub name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['namespaceName'] = namespace_name
__args__['notificationHubName'] = notification_hub_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:notificationhubs:getNotificationHub', __args__, opts=opts, typ=GetNotificationHubResult).value
return AwaitableGetNotificationHubResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
sku=pulumi.get(__ret__, 'sku'),
METHOD_NAME=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_notification_hub)
def get_notification_hub_output(namespace_name: Optional[pulumi.Input[str]] = None,
notification_hub_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNotificationHubResult]:
"""
Notification Hub Resource.
Azure REST API version: 2023-01-01-preview.
:param str namespace_name: Namespace name
:param str notification_hub_name: Notification Hub name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
4,641 | sanitize general | """pytest configuration
Extends output capture as needed by pybind11: ignore constructors, optional unordered lines.
Adds docstring and exceptions message sanitizers: ignore Python 2 vs 3 differences.
"""
import pytest
import textwrap
import difflib
import re
import sys
import contextlib
import platform
import gc
_unicode_marker = re.compile(r'u(\'[^\']*\')')
_long_marker = re.compile(r'([0-9])L')
_hexadecimal = re.compile(r'0x[0-9a-fA-F]+')
def _strip_and_dedent(s):
"""For triple-quote strings"""
return textwrap.dedent(s.lstrip('\n').rstrip())
def _split_and_sort(s):
"""For output which does not require specific line order"""
return sorted(_strip_and_dedent(s).splitlines())
def _make_explanation(a, b):
"""Explanation for a failed assert -- the a and b arguments are List[str]"""
return ["--- actual / +++ expected"] + [line.strip('\n') for line in difflib.ndiff(a, b)]
class Output(object):
"""Basic output post-processing and comparison"""
def __init__(self, string):
self.string = string
self.explanation = []
def __str__(self):
return self.string
def __eq__(self, other):
# Ignore constructor/destructor output which is prefixed with "###"
a = [line for line in self.string.strip().splitlines() if not line.startswith("###")]
b = _strip_and_dedent(other).splitlines()
if a == b:
return True
else:
self.explanation = _make_explanation(a, b)
return False
class Unordered(Output):
"""Custom comparison for output without strict line ordering"""
def __eq__(self, other):
a = _split_and_sort(self.string)
b = _split_and_sort(other)
if a == b:
return True
else:
self.explanation = _make_explanation(a, b)
return False
class Capture(object):
def __init__(self, capfd):
self.capfd = capfd
self.out = ""
self.err = ""
def __enter__(self):
self.capfd.readouterr()
return self
def __exit__(self, *args):
self.out, self.err = self.capfd.readouterr()
def __eq__(self, other):
a = Output(self.out)
b = other
if a == b:
return True
else:
self.explanation = a.explanation
return False
def __str__(self):
return self.out
def __contains__(self, item):
return item in self.out
@property
def unordered(self):
return Unordered(self.out)
@property
def stderr(self):
return Output(self.err)
@pytest.fixture
def capture(capsys):
"""Extended `capsys` with context manager and custom equality operators"""
return Capture(capsys)
class SanitizedString(object):
def __init__(self, sanitizer):
self.sanitizer = sanitizer
self.string = ""
self.explanation = []
def __call__(self, thing):
self.string = self.sanitizer(thing)
return self
def __eq__(self, other):
a = self.string
b = _strip_and_dedent(other)
if a == b:
return True
else:
self.explanation = _make_explanation(a.splitlines(), b.splitlines())
return False
def METHOD_NAME(s):
s = s.strip()
s = s.replace("pybind11_tests.", "m.")
s = s.replace("unicode", "str")
s = _long_marker.sub(r"\1", s)
s = _unicode_marker.sub(r"\1", s)
return s
def _sanitize_docstring(thing):
s = thing.__doc__
s = METHOD_NAME(s)
return s
@pytest.fixture
def doc():
"""Sanitize docstrings and add custom failure explanation"""
return SanitizedString(_sanitize_docstring)
def _sanitize_message(thing):
s = str(thing)
s = METHOD_NAME(s)
s = _hexadecimal.sub("0", s)
return s
@pytest.fixture
def msg():
"""Sanitize messages and add custom failure explanation"""
return SanitizedString(_sanitize_message)
# noinspection PyUnusedLocal
def pytest_assertrepr_compare(op, left, right):
"""Hook to insert custom failure explanation"""
if hasattr(left, 'explanation'):
return left.explanation
@contextlib.contextmanager
def suppress(exception):
"""Suppress the desired exception"""
try:
yield
except exception:
pass
def gc_collect():
''' Run the garbage collector twice (needed when running
reference counting tests with PyPy) '''
gc.collect()
gc.collect()
def pytest_configure():
"""Add import suppression and test requirements to `pytest` namespace"""
try:
import numpy as np
except ImportError:
np = None
try:
import scipy
except ImportError:
scipy = None
try:
from pybind11_tests.eigen import have_eigen
except ImportError:
have_eigen = False
pypy = platform.python_implementation() == "PyPy"
skipif = pytest.mark.skipif
pytest.suppress = suppress
pytest.requires_numpy = skipif(not np, reason="numpy is not installed")
pytest.requires_scipy = skipif(not np, reason="scipy is not installed")
pytest.requires_eigen_and_numpy = skipif(not have_eigen or not np,
reason="eigen and/or numpy are not installed")
pytest.requires_eigen_and_scipy = skipif(
not have_eigen or not scipy, reason="eigen and/or scipy are not installed")
pytest.unsupported_on_pypy = skipif(pypy, reason="unsupported on PyPy")
pytest.unsupported_on_py2 = skipif(sys.version_info.major < 3,
reason="unsupported on Python 2.x")
pytest.gc_collect = gc_collect
def _test_import_pybind11():
"""Early diagnostic for test module initialization errors
When there is an error during initialization, the first import will report the
real error while all subsequent imports will report nonsense. This import test
is done early (in the pytest configuration file, before any tests) in order to
avoid the noise of having all tests fail with identical error messages.
Any possible exception is caught here and reported manually *without* the stack
trace. This further reduces noise since the trace would only show pytest internals
which are not useful for debugging pybind11 module issues.
"""
# noinspection PyBroadException
try:
import pybind11_tests # noqa: F401 imported but unused
except Exception as e:
print("Failed to import pybind11_tests from pytest:")
print(" {}: {}".format(type(e).__name__, e))
sys.exit(1)
_test_import_pybind11() |
4,642 | test invalid get | # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
from dataclasses import dataclass
from typing import Callable
import pytest
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.unions import union
class AClass:
pass
@dataclass(frozen=True)
class BClass:
pass
def test_create_get() -> None:
get1 = Get(AClass)
assert get1.output_type is AClass
assert get1.input_types == []
assert get1.inputs == []
get2 = Get(AClass, int, 42)
assert get2.output_type is AClass
assert get2.input_types == [int]
assert get2.inputs == [42]
# Also test the equivalence of the 1-arg and 2-arg versions.
get3 = Get(AClass, int(42))
assert get2.output_type == get3.output_type
assert get2.input_types == get3.input_types
assert get2.inputs == get3.inputs
# And finally the multiple parameter syntax.
get4 = Get(AClass, {42: int, "hello": str})
assert get4.output_type is AClass
assert get4.input_types == [int, str]
assert get4.inputs == [42, "hello"]
def assert_invalid_get(create_get: Callable[[], Get], *, expected: str) -> None:
with pytest.raises(TypeError) as exc:
create_get()
assert str(exc.value) == expected
def METHOD_NAME() -> None:
# Bad output type.
assert_invalid_get(
lambda: Get(1, str, "bob"), # type: ignore[call-overload, no-any-return]
expected=(
"Invalid Get. The first argument (the output type) must be a type, but given "
f"`1` with type {int}."
),
)
# Bad second argument.
assert_invalid_get(
lambda: Get(AClass, BClass),
expected=(
"Invalid Get. Because you are using the shorthand form "
"Get(OutputType, InputType(constructor args)), the second argument should be "
f"a constructor call, rather than a type, but given {BClass}."
),
)
assert_invalid_get(
lambda: Get(AClass, 1, BClass),
expected=(
"Invalid Get. Because you are using the longhand form Get(OutputType, InputType, "
"input), the second argument must be a type, but given `1` of type "
f"{int}."
),
)
# Bad third argument.
assert_invalid_get(
lambda: Get(AClass, BClass, BClass),
expected=(
"Invalid Get. Because you are using the longhand form Get(OutputType, InputType, "
"input), the third argument should be an object, rather than a type, but given "
f"{BClass}."
),
)
def test_invalid_get_input_does_not_match_type() -> None:
assert_invalid_get(
lambda: Get(AClass, str, 1),
expected=(
f"Invalid Get. The third argument `1` must have the exact same type as the "
f"second argument, {str}, but had the type {int}."
),
)
# However, if the `input_type` is a `@union`, then we do not eagerly validate.
@union
class UnionBase:
pass
union_get = Get(AClass, UnionBase, 1)
assert union_get.input_types == [UnionBase]
assert union_get.inputs == [1]
def test_multiget_invalid_types() -> None:
with pytest.raises(
expected_exception=TypeError,
match=re.escape("Unexpected MultiGet argument types: Get(AClass, BClass, BClass()), 'bob'"),
):
next(MultiGet(Get(AClass, BClass()), "bob").__await__()) # type: ignore[call-overload]
def test_multiget_invalid_Nones() -> None:
with pytest.raises(
expected_exception=ValueError,
match=re.escape("Unexpected MultiGet None arguments: None, Get(AClass, BClass, BClass())"),
):
next(
MultiGet(None, Get(AClass, BClass()), None, None).__await__() # type: ignore[call-overload]
)
# N.B.: MultiGet takes either:
# 1. One homogenous Get collection.
# 2. Up to 10 homogeneous or heterogeneous Gets
# 3. 11 or more homogenous Gets.
#
# Here we test that the runtime actually accepts 11 or more Gets. This is really just a regression
# test that checks MultiGet retains a trailing *args slot.
@pytest.mark.parametrize("count", list(range(1, 20)))
def test_homogenous(count) -> None:
gets = tuple(Get(AClass, BClass()) for _ in range(count))
assert gets == next(MultiGet(*gets).__await__()) |
4,643 | register to server | """ Within this module is defined the class from which all other accounting types are defined
"""
import datetime
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.Client import Client
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
class BaseAccountingType:
def __init__(self):
self.keyFieldsList = []
self.valueFieldsList = []
self.valuesList = []
self.fieldsList = []
self.startTime = 0
self.endTime = 0
self.dataTimespan = 0
self.bucketsLength = [
(86400 * 8, 3600), # <1w+1 = 1h
(15552000, 86400), # >1w+1d <6m = 1d
(31104000, 604800), # >6m = 1w
]
self.definitionKeyFields = []
self.definitionAccountingFields = []
def checkType(self):
"""
Check that everything is defined
"""
if not self.definitionKeyFields:
raise Exception("definitionKeyFields has to be filled prior to utilization")
if not self.definitionAccountingFields:
raise Exception("definitionAccountingFields has to be filled prior to utilization")
for key in self.definitionKeyFields:
self.keyFieldsList.append(key[0])
for value in self.definitionAccountingFields:
self.valueFieldsList.append(value[0])
self.fieldsList = []
self.fieldsList.extend(self.keyFieldsList)
self.fieldsList.extend(self.valueFieldsList)
if len(self.valuesList) != len(self.fieldsList):
self.valuesList = [None] * len(self.fieldsList)
def getDataTimespan(self):
"""
Get the data timespan for the time. Data older than dataTimespan will be deleted
"""
return self.dataTimespan
def setStartTime(self, startTime=False):
"""
Give a start time for the report
By default use now
"""
if not startTime:
self.startTime = datetime.datetime.utcnow()
else:
self.startTime = startTime
def setEndTime(self, endTime=False):
"""
Give a end time for the report
By default use now
"""
if not endTime:
self.endTime = datetime.datetime.utcnow()
else:
self.endTime = endTime
def setNowAsStartAndEndTime(self):
"""
Set current time as start and end time of the report
"""
self.startTime = datetime.datetime.utcnow()
self.endTime = self.startTime
def setValueByKey(self, key, value):
"""
Add value for key
"""
if key not in self.fieldsList:
return S_ERROR(f"Key {key} is not defined")
keyPos = self.fieldsList.index(key)
self.valuesList[keyPos] = value
return S_OK()
def setValuesFromDict(self, dataDict):
"""
Set values from key-value dictionary
"""
errKeys = []
for key in dataDict:
if key not in self.fieldsList:
errKeys.append(key)
if errKeys:
return S_ERROR(f"Key(s) {', '.join(errKeys)} are not valid")
for key in dataDict:
self.setValueByKey(key, dataDict[key])
return S_OK()
def getValue(self, key):
try:
return S_OK(self.valuesList[self.fieldsList.index(key)])
except IndexError:
return S_ERROR(f"{key} does not have a value")
except ValueError:
return S_ERROR(f"{key} is not a valid key")
def checkValues(self):
"""
Check that all values are defined and valid
"""
errorList = []
for i in range(len(self.valuesList)):
key = self.fieldsList[i]
if self.valuesList[i] is None:
errorList.append(f"no value for {key}")
if key in self.valueFieldsList and not isinstance(self.valuesList[i], (int, float)):
errorList.append(f"value for key {key} is not numerical type")
if errorList:
return S_ERROR(f"Invalid values: {', '.join(errorList)}")
if not self.startTime:
return S_ERROR("Start time has not been defined")
if not isinstance(self.startTime, datetime.datetime):
return S_ERROR("Start time is not a datetime object")
if not self.endTime:
return S_ERROR("End time has not been defined")
if not isinstance(self.endTime, datetime.datetime):
return S_ERROR("End time is not a datetime object")
return self.checkRecord()
def checkRecord(self):
"""To be overwritten by child class"""
return S_OK()
def getDefinition(self):
"""
Get a tuple containing type definition
"""
return (self.__class__.__name__, self.definitionKeyFields, self.definitionAccountingFields, self.bucketsLength)
def getValues(self):
"""
Get a tuple containing report values
"""
return (self.__class__.__name__, self.startTime, self.endTime, self.valuesList)
def getContents(self):
"""
Get the contents
"""
cD = {}
if self.startTime:
cD["starTime"] = self.startTime
if self.endTime:
cD["endTime"] = self.endTime
for iPos in range(len(self.fieldsList)):
if self.valuesList[iPos]:
cD[self.fieldsList[iPos]] = self.valuesList[iPos]
return cD
def METHOD_NAME(self):
"""
Register type in server
"""
rpcClient = Client(url="Accounting/DataStore")
return rpcClient.registerType(*self.getDefinition())
def commit(self):
"""
Commit register to server
"""
retVal = gDataStoreClient.addRegister(self)
if not retVal["OK"]:
return retVal
return gDataStoreClient.commit()
def delayedCommit(self):
"""
Commit register to the server. Delayed commit allows to speed up
the operation as more registers will be sent at once.
"""
retVal = gDataStoreClient.addRegister(self)
if not retVal["OK"]:
return retVal
return gDataStoreClient.delayedCommit()
def remove(self):
"""
Remove a register from server
"""
return gDataStoreClient.remove(self) |
4,644 | main | #!/usr/bin/env python3
############################################################
# Program is part of MintPy #
# Copyright (c) 2013, Zhang Yunjun, Heresh Fattahi #
# Author: Antonio Valentino, Zhang Yunjun, Aug 2022 #
############################################################
import sys
from mintpy.utils.arg_utils import create_argument_parser
##################################################################################################
EXAMPLE = """example:
multilook.py velocity.h5 -r 15 -a 15
multilook.py srtm30m.dem -x 10 -y 10 -o srtm300m.dem
multilook.py filt_fine.int -r 2 -a 2 -o filt_fine_mli.int
# support GDAL VRT file from ISCE2 as input
multilook.py lat.rdr.full.vrt lon.rdr.full.vrt -x 9 -y 3
# --off-file option: use as reference to adjust for the irregular size from isce2 dense offsets
multilook.py lat.rdr.full.vrt -x 128 -y 64 -o lat.rdr.mli --off-file dense_offsets.bil
multilook.py ../../geom_reference/lat.rdr.full -x 300 -y 100 -o lat.rdr --off-file offset.bip
"""
def create_parser(subparsers=None):
synopsis = 'Multilook the input file'
epilog = EXAMPLE
name = __name__.split('.')[-1]
parser = create_argument_parser(
name, synopsis=synopsis, description=synopsis, epilog=epilog, subparsers=subparsers)
# basic
parser.add_argument('file', nargs='+', help='File(s) to multilook')
parser.add_argument('-r','--range','-x', dest='lks_x', type=int, default=1,
help='number of multilooking in range /x direction (default: %(default)s).')
parser.add_argument('-a','--azimuth','-y', dest='lks_y', type=int, default=1,
help='number of multilooking in azimuth/y direction (default: %(default)s).')
parser.add_argument('-o', '--outfile',
help='Output file name. Disabled when more than 1 input files')
parser.add_argument('-m','--method', dest='method', type=str, default='mean', choices=['mean', 'median', 'nearest'],
help='downsampling method (default: %(default)s) \n'
'e.g. nearest for geometry, average for observations')
# offset
ampcor = parser.add_argument_group('Ampcor options', 'Ampcor options for dense offsets to account for the extra margin')
ampcor.add_argument('--search','--search-win', dest='search_win', type=int, nargs=2, metavar=('X','Y'),
help='Ampcor (half) search window in (width, height) in pixel, e.g. 20 x 20.')
ampcor.add_argument('--xcorr','--xcorr-win', dest='xcorr_win', type=int, nargs=2, metavar=('X','Y'),
help='Ampcor cross-correlation window in (width, height) in pixel e.g. 32 x 32.')
ampcor.add_argument('--margin', dest='margin', type=int, default=0,
help='Ampcor margin offset (default: %(default)s).')
ampcor.add_argument('--off-file', dest='off_file', type=str,
help='Ampcor offset file as reference for the size.')
return parser
def cmd_line_parse(iargs=None):
# parse
parser = create_parser()
inps = parser.parse_args(args=iargs)
# import
from mintpy.utils import utils1 as ut
# check: input file list
inps.file = ut.get_file_list(inps.file)
# check: -x/y options (num of multilooks)
if inps.lks_x == 1 and inps.lks_y == 1:
raise SystemExit('ERROR: no multilooking specified: lks_x/y=1!')
# check: -o / --outfile (output file name)
if len(inps.file) > 1 and inps.outfile:
inps.outfile = None
print('more than one file is input, disable custom output filename.')
return inps
##################################################################################################
def METHOD_NAME(iargs=None):
# parse
inps = cmd_line_parse(iargs)
# import
from mintpy.multilook import multilook_file
# run
for infile in inps.file:
multilook_file(
infile,
lks_y=inps.lks_y,
lks_x=inps.lks_x,
outfile=inps.outfile,
method=inps.method,
search_win=inps.search_win,
xcorr_win=inps.xcorr_win,
margin=inps.margin,
off_file=inps.off_file,
)
print('Done.')
###################################################################################################
if __name__ == '__main__':
METHOD_NAME(sys.argv[1:]) |
4,645 | get corpus directory | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builtin fuzzer."""
# NOTE: This module is deprecated and will be replaced with
# lib.clusterfuzz.fuzz.engine.
import os
import random
import sys
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.bot import testcase_manager
from clusterfuzz._internal.bot.fuzzers import engine_common
from clusterfuzz._internal.bot.fuzzers import utils as fuzzers_utils
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.system import shell
class BuiltinFuzzerResult:
"""Result of running a builtin fuzzer."""
def __init__(self, output, corpus_directory=None):
self.output = output
self.corpus_directory = corpus_directory
class BuiltinFuzzerError(Exception):
"""Exception that should be thrown when there is an issue preventing a builtin
fuzzer from running, or if there is a very unusual exception encountered
during a run."""
class BuiltinFuzzer:
"""Builtin fuzzer."""
def run(self, input_directory, output_directory, no_of_files):
raise NotImplementedError
@property
def fuzzer_directory(self):
return os.path.abspath(
os.path.dirname(sys.modules[self.__module__].__file__))
def METHOD_NAME(input_directory, project_qualified_name):
"""Get the corpus directory given a project qualified fuzz target name."""
corpus_directory = os.path.join(input_directory, project_qualified_name)
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import file_host
corpus_directory = file_host.rebase_to_worker_root(corpus_directory)
# Create corpus directory if it does not exist already.
if environment.is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import file_host
file_host.create_directory(corpus_directory, create_intermediates=True)
else:
shell.create_directory(corpus_directory)
return corpus_directory
class EngineFuzzer(BuiltinFuzzer):
"""Builtin fuzzer for fuzzing engines such as libFuzzer."""
def generate_arguments(self, fuzzer_path):
"""Generate arguments for the given fuzzer."""
raise NotImplementedError
def _get_fuzzer_binary_name_and_path(self):
"""Returns the fuzzer binary name and its path."""
# Fuchsia doesn't use file paths to call fuzzers, just the name of the
# fuzzer, so we set both from FUZZ_TARGET here.
if environment.platform() == 'FUCHSIA':
fuzzer_binary_name = fuzzer_path = environment.get_value('FUZZ_TARGET')
return fuzzer_binary_name, fuzzer_path
build_directory = environment.get_value('BUILD_DIR')
if not build_directory:
raise BuiltinFuzzerError('BUILD_DIR environment variable is not set.')
fuzzers = fuzzers_utils.get_fuzz_targets(build_directory)
if not fuzzers:
raise BuiltinFuzzerError(
'No fuzzer binaries found in |BUILD_DIR| directory.')
fuzzer_binary_name = environment.get_value('FUZZ_TARGET')
if fuzzer_binary_name:
fuzzer_path = _get_fuzzer_path(fuzzers, fuzzer_binary_name)
else:
fuzzer_path = random.SystemRandom().choice(fuzzers)
fuzzer_binary_name = os.path.basename(fuzzer_path)
return fuzzer_binary_name, fuzzer_path
def run(self, input_directory, output_directory, no_of_files):
"""Run the fuzzer to generate testcases."""
fuzzer_binary_name, fuzzer_path = self._get_fuzzer_binary_name_and_path()
project_qualified_name = data_types.fuzz_target_project_qualified_name(
utils.current_project(), fuzzer_binary_name)
arguments = self.generate_arguments(fuzzer_path)
corpus_directory = METHOD_NAME(input_directory,
project_qualified_name)
# Create fuzz testcases.
for i in range(no_of_files):
# Contents of testcase file don't matter at this point. Need to create
# something non-null so that it is not ignored.
testcase_file_path = os.path.join(
output_directory, '%s%d' % (testcase_manager.FUZZ_PREFIX, i))
utils.write_data_to_file(' ', testcase_file_path)
# Write the flags file containing command line for running launcher
# script.
flags_file_path = os.path.join(
output_directory, '%s%d' % (testcase_manager.FLAGS_PREFIX, i))
flags = ['%TESTCASE%', fuzzer_binary_name]
if arguments:
flags.append(arguments)
flags_file_content = ' '.join(flags)
utils.write_data_to_file(flags_file_content, flags_file_path)
output = 'Generated %d testcase for fuzzer %s.\n' % (no_of_files,
fuzzer_binary_name)
output += 'metadata::fuzzer_binary_name: %s\n' % fuzzer_binary_name
issue_owners = engine_common.get_issue_owners(fuzzer_path)
if issue_owners:
output += 'metadata::issue_owners: %s\n' % ','.join(issue_owners)
issue_labels = engine_common.get_issue_labels(fuzzer_path)
if issue_labels:
output += 'metadata::issue_labels: %s\n' % ','.join(issue_labels)
issue_components = engine_common.get_issue_components(fuzzer_path)
if issue_components:
output += 'metadata::issue_components: %s\n' % ','.join(issue_components)
# Update *SAN_OPTIONS in current environment from .options file. This
# environment is used in fuzz task later for deriving the environment
# string in |get_environment_settings_as_string| and embedding this as
# part of stacktrace.
engine_common.process_sanitizer_options_overrides(fuzzer_path)
return BuiltinFuzzerResult(output=output, corpus_directory=corpus_directory)
def _get_fuzzer_path(target_list, fuzzer_name):
"""Return the full fuzzer path and actual binary name of |fuzzer_name|."""
fuzzer_filename = environment.get_executable_filename(fuzzer_name)
for path in target_list:
if os.path.basename(path) == fuzzer_filename:
return path
raise BuiltinFuzzerError('Failed to find chosen target ' + fuzzer_name) |
4,646 | item from index | # /*##########################################################################
#
# Copyright (c) 2017-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""
This module provides the :class:`SceneWidget` content and parameters model.
"""
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "11/01/2018"
import weakref
from ... import qt
from .core import BaseRow
from .items import Settings, nodeFromItem
def visitQAbstractItemModel(model, parent=qt.QModelIndex()):
"""Iterate over indices in the model starting from parent
It iterates column by column and row by row
(i.e., from left to right and from top to bottom).
Parent are returned before their children.
It only iterates through the children for the first column of a row.
:param QAbstractItemModel model: The model to visit
:param QModelIndex parent:
Index from which to start visiting the model.
Default: start from the root
"""
assert isinstance(model, qt.QAbstractItemModel)
assert isinstance(parent, qt.QModelIndex)
assert parent.model() is model or not parent.isValid()
for row in range(model.rowCount(parent)):
for column in range(model.columnCount(parent)):
index = model.index(row, column, parent)
yield index
index = model.index(row, 0, parent)
for index in visitQAbstractItemModel(model, index):
yield index
class Root(BaseRow):
"""Root node of :class:`SceneWidget` parameters.
It has two children:
- Settings
- Scene group
"""
def __init__(self, model, sceneWidget):
super(Root, self).__init__()
self._sceneWidget = weakref.ref(sceneWidget)
self.setParent(model) # Needed for Root
def children(self):
sceneWidget = self._sceneWidget()
if sceneWidget is None:
return ()
else:
return super(Root, self).children()
class SceneModel(qt.QAbstractItemModel):
"""Model of a :class:`SceneWidget`.
:param SceneWidget parent: The SceneWidget this model represents.
"""
def __init__(self, parent):
self._sceneWidget = weakref.ref(parent)
super(SceneModel, self).__init__(parent)
self._root = Root(self, parent)
self._root.addRow(Settings(parent))
self._root.addRow(nodeFromItem(parent.getSceneGroup()))
def sceneWidget(self):
"""Returns the :class:`SceneWidget` this model represents.
In case the widget has already been deleted, it returns None
:rtype: SceneWidget
"""
return self._sceneWidget()
def METHOD_NAME(self, index):
"""Returns the corresponding :class:`Node` or :class:`Item3D`.
:param QModelIndex index:
:rtype: Node or Item3D
"""
return index.internalPointer() if index.isValid() else self._root
def index(self, row, column, parent=qt.QModelIndex()):
"""See :meth:`QAbstractItemModel.index`"""
if column >= self.columnCount(parent) or row >= self.rowCount(parent):
return qt.QModelIndex()
item = self.METHOD_NAME(parent)
return self.createIndex(row, column, item.children()[row])
def parent(self, index):
"""See :meth:`QAbstractItemModel.parent`"""
if not index.isValid():
return qt.QModelIndex()
item = self.METHOD_NAME(index)
parent = item.parent()
ancestor = parent.parent()
if ancestor is not self: # root node
children = ancestor.children()
row = children.index(parent)
return self.createIndex(row, 0, parent)
return qt.QModelIndex()
def rowCount(self, parent=qt.QModelIndex()):
"""See :meth:`QAbstractItemModel.rowCount`"""
item = self.METHOD_NAME(parent)
return item.rowCount()
def columnCount(self, parent=qt.QModelIndex()):
"""See :meth:`QAbstractItemModel.columnCount`"""
item = self.METHOD_NAME(parent)
return item.columnCount()
def data(self, index, role=qt.Qt.DisplayRole):
"""See :meth:`QAbstractItemModel.data`"""
item = self.METHOD_NAME(index)
column = index.column()
return item.data(column, role)
def setData(self, index, value, role=qt.Qt.EditRole):
"""See :meth:`QAbstractItemModel.setData`"""
item = self.METHOD_NAME(index)
column = index.column()
if item.setData(column, value, role):
self.dataChanged.emit(index, index)
return True
return False
def flags(self, index):
"""See :meth:`QAbstractItemModel.flags`"""
item = self.METHOD_NAME(index)
column = index.column()
return item.flags(column)
def headerData(self, section, orientation, role=qt.Qt.DisplayRole):
"""See :meth:`QAbstractItemModel.headerData`"""
if orientation == qt.Qt.Horizontal and role == qt.Qt.DisplayRole:
return 'Item' if section == 0 else 'Value'
else:
return None |
4,647 | test encode datamap grid | import warnings
import pytest
from sqlalchemy.exc import SAWarning
from ichnaea.models.content import (
decode_datamap_grid,
encode_datamap_grid,
DataMap,
RegionStat,
Stat,
StatKey,
)
from ichnaea import util
class TestDataMapCodec(object):
def test_decode_datamap_grid(self):
assert decode_datamap_grid(b"\x00\x00\x00\x00\x00\x00\x00\x00") == (
-90000,
-180000,
)
assert decode_datamap_grid(b"AAAAAAAAAAA=", codec="base64") == (-90000, -180000)
assert decode_datamap_grid(b"\x00\x01_\x90\x00\x02\xbf ") == (0, 0)
assert decode_datamap_grid(b"AAFfkAACvyA=", codec="base64") == (0, 0)
assert decode_datamap_grid(b"\x00\x02\xbf \x00\x05~@") == (90000, 180000)
assert decode_datamap_grid(b"\x00\x02\xbf \x00\x05~@", scale=True) == (
90.0,
180.0,
)
assert decode_datamap_grid(b"AAK/IAAFfkA=", codec="base64") == (90000, 180000)
assert decode_datamap_grid(b"AAK/IAAFfkA=", scale=True, codec="base64") == (
90.0,
180.0,
)
def METHOD_NAME(self):
assert (
encode_datamap_grid(-90000, -180000) == b"\x00\x00\x00\x00\x00\x00\x00\x00"
)
assert encode_datamap_grid(-90000, -180000, codec="base64") == b"AAAAAAAAAAA="
assert encode_datamap_grid(0, 0) == b"\x00\x01_\x90\x00\x02\xbf "
assert encode_datamap_grid(0, 0, codec="base64") == b"AAFfkAACvyA="
assert (
encode_datamap_grid(90.0, 180.0, scale=True) == b"\x00\x02\xbf \x00\x05~@"
)
assert encode_datamap_grid(90000, 180000) == b"\x00\x02\xbf \x00\x05~@"
assert encode_datamap_grid(90000, 180000, codec="base64") == b"AAK/IAAFfkA="
class TestDataMap(object):
def test_fields(self, session):
today = util.utcnow().date()
lat = 12345
lon = -23456
model = DataMap.shard_model(lat, lon)
session.add(model(grid=(lat, lon), created=today, modified=today))
session.flush()
result = session.query(model).first()
assert result.grid == (lat, lon)
assert result.created == today
assert result.modified == today
def test_scale(self):
assert DataMap.scale(-1.12345678, 2.23456789) == (-1123, 2235)
def test_shard_id(self):
assert DataMap.shard_id(None, None) is None
assert DataMap.shard_id(85000, 180000) == "ne"
assert DataMap.shard_id(36000, 5000) == "ne"
assert DataMap.shard_id(35999, 5000) == "se"
assert DataMap.shard_id(-85000, 180000) == "se"
assert DataMap.shard_id(85000, -180000) == "nw"
assert DataMap.shard_id(36000, 4999) == "nw"
assert DataMap.shard_id(35999, 4999) == "sw"
assert DataMap.shard_id(-85000, -180000) == "sw"
def test_grid_bytes(self, session):
lat = 12000
lon = 34000
grid = encode_datamap_grid(lat, lon)
model = DataMap.shard_model(lat, lon)
session.add(model(grid=grid))
session.flush()
result = session.query(model).first()
assert result.grid == (lat, lon)
def test_grid_none(self, session):
with warnings.catch_warnings():
warnings.simplefilter("ignore", SAWarning)
session.add(DataMap.shard_model(0, 0)(grid=None))
with pytest.raises(Exception):
session.flush()
def test_grid_length(self, session):
session.add(DataMap.shard_model(0, 9)(grid=b"\x00" * 9))
with pytest.raises(Exception):
session.flush()
def test_grid_list(self, session):
lat = 1000
lon = -2000
session.add(DataMap.shard_model(lat, lon)(grid=[lat, lon]))
with pytest.raises(Exception):
session.flush()
class TestRegionStat(object):
def test_fields(self, session):
session.add(RegionStat(region="GB", gsm=1, wcdma=2, lte=3, blue=4, wifi=5))
session.flush()
result = session.query(RegionStat).first()
assert result.region == "GB"
assert result.gsm == 1
assert result.wcdma == 2
assert result.lte == 3
assert result.blue == 4
assert result.wifi == 5
class TestStat(object):
def test_fields(self, session):
utcday = util.utcnow().date()
session.add(Stat(key=StatKey.cell, time=utcday, value=13))
session.flush()
result = session.query(Stat).first()
assert result.key == StatKey.cell
assert result.time == utcday
assert result.value == 13
def test_enum(self, session):
utcday = util.utcnow().date()
session.add(Stat(key=StatKey.cell, time=utcday, value=13))
session.flush()
result = session.query(Stat).first()
assert result.key == StatKey.cell
assert int(result.key) == 1
assert result.key.name == "cell" |
4,648 | test delete cascade transaction to splits | # Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
from functools import partial
import pytest
from sqlalchemy import select, func, text
from sqlalchemy.exc import IntegrityError, DataError
from pycroft.model.finance import (
Transaction,
IllegalTransactionError,
Split,
Account,
BankAccount,
)
from tests.factories import AccountFactory, UserFactory
from tests.factories.finance import BankAccountFactory, BankAccountActivityFactory
@pytest.fixture
def author(session):
return UserFactory()
@pytest.fixture(scope='module')
def asset_account(module_session):
return AccountFactory(type='ASSET')
@pytest.fixture(scope='module')
def revenue_account(module_session):
return AccountFactory(type='REVENUE')
@pytest.fixture(scope='module')
def liability_account(module_session):
return AccountFactory(type='LIABILITY')
@pytest.fixture(name='t')
def transaction(author):
return Transaction(description='Transaction', author=author)
def build_split(t, account, amount):
return Split(amount=amount, account=account, transaction=t)
def test_empty_t(session, t):
with pytest.raises(IllegalTransactionError):
with session.begin_nested():
session.add(t)
def test_fail_on_unbalance(session, t, asset_account):
split = build_split(t, asset_account, 100)
with pytest.raises(IllegalTransactionError):
with session.begin_nested():
session.add_all([t, split])
def test_insert_balanced(session, t, asset_account, revenue_account):
s1 = build_split(t, asset_account, 100)
s2 = build_split(t, revenue_account, -100)
try:
with session.begin_nested():
session.add_all([s1, s2])
except IllegalTransactionError:
pytest.fail("Threw illegalterror")
def METHOD_NAME(
session, t, asset_account, revenue_account
):
s1 = build_split(t, asset_account, 100)
s2 = build_split(t, revenue_account, -100)
with session.begin_nested():
session.add_all([t, s1, s2])
with session.begin_nested():
session.delete(t) # should delete associated splits
assert session.scalars(select(func.count(Split.id))).one() == 0
def test_fail_on_self_transaction(session, t, asset_account):
s1 = build_split(t, asset_account, 100)
s2 = build_split(t, asset_account, -100)
with pytest.raises(IntegrityError):
with session.begin_nested():
session.add_all([t, s1, s2])
def test_fail_on_multiple_split_same_account(
session, t, asset_account, revenue_account
):
s1 = build_split(t, asset_account, 100)
s2 = build_split(t, revenue_account, -50)
s3 = build_split(t, revenue_account, -50)
with pytest.raises(IntegrityError):
with session.begin_nested():
session.add_all([t, s1, s2, s3])
@pytest.fixture
def balanced_splits(session, t, asset_account, revenue_account):
s1 = build_split(t, asset_account, 100)
s2 = build_split(t, revenue_account, -100)
with session.begin_nested():
session.add_all([t, s1, s2])
return s1, s2
def test_unbalance_with_insert(
session, t, balanced_splits, liability_account
):
with pytest.raises(IllegalTransactionError), session.begin_nested():
session.add(build_split(t, liability_account, 50))
def test_unbalance_with_update(session, balanced_splits):
_, s2 = balanced_splits
with pytest.raises(IllegalTransactionError):
with session.begin_nested():
s2.amount = -50
def test_unbalance_with_delete(session, t, balanced_splits):
with pytest.raises(IllegalTransactionError):
with session.begin_nested():
t.splits.pop()
@pytest.fixture(name='immediate_trigger')
def immediate_activity_matches_split_trigger(session):
session.execute(text(
"SET CONSTRAINTS bank_account_activity_matches_referenced_split_trigger"
" IMMEDIATE"
))
yield None
session.execute(text(
"SET CONSTRAINTS bank_account_activity_matches_referenced_split_trigger"
" DEFERRED"
))
@pytest.fixture
def bank_account() -> BankAccount:
return BankAccountFactory()
@pytest.fixture
def build_activity(bank_account, utcnow):
return partial(
BankAccountActivityFactory.build,
bank_account=bank_account, imported_at=utcnow,
)
def test_correct(session, build_activity, bank_account, t, asset_account):
s1 = build_split(t, asset_account, 100)
s2 = build_split(t, bank_account.account, -100)
a = build_activity(amount=-10, split=s2)
session.add_all([t, s1, s2, a])
session.flush()
def test_wrong_split_amount(
session, immediate_trigger,
build_activity, bank_account, t, asset_account
):
s1 = build_split(t, asset_account, 100)
s2 = build_split(t, bank_account.account, -100)
a = build_activity(amount=-50, split=s2)
with pytest.raises(IntegrityError):
with session.begin_nested():
session.add_all([t, s1, s2, a])
def test_wrong_split_account(
session, immediate_trigger,
build_activity, revenue_account, t, asset_account
):
s1 = build_split(t, asset_account, 100)
s2 = build_split(t, revenue_account, -100)
a = build_activity(amount=-100, split=s2)
with pytest.raises(IntegrityError):
with session.begin_nested():
session.add_all([t, s1, s2, a])
def test_create_account_type(session):
with session.begin_nested():
session.add(Account(name="foo", type="USER_ASSET"))
def test_create_account_bad_type(session):
with pytest.raises(DataError), session.begin_nested():
session.add(Account(name="foo", type="BadType")) |
4,649 | test str | from django.test import TestCase
from nav.models.arnold import Justification
from nav.auditlog import find_modelname
from nav.auditlog.models import LogEntry
from nav.auditlog.utils import get_auditlog_entries
class AuditlogModelTestCase(TestCase):
def setUp(self):
# This specific model is used because it is very simple
self.justification = Justification.objects.create(name='testarossa')
def METHOD_NAME(self):
LogEntry.add_log_entry(self.justification, u'str test', 'foo')
l = LogEntry.objects.filter(verb='str test').get()
self.assertEqual(str(l), 'foo')
l.delete()
def test_add_log_entry_bad_template(self):
LogEntry.add_log_entry(
self.justification, u'bad template test', u'this is a {bad} template'
)
l = LogEntry.objects.filter(verb='bad template test').get()
self.assertEqual(l.summary, u'Error creating summary - see error log')
l.delete()
# # When on python3:
# with self.assertLogs(level='ERROR') as log:
# # run body
# self.assertEqual(len(log.output), 1)
# self.assertEqual(len(log.records), 1)
# self.assertIn('KeyError when creating summary:', log.output[0])
def test_add_log_entry_actor_only(self):
LogEntry.add_log_entry(
self.justification, u'actor test', u'actor "{actor}" only is tested'
)
l = LogEntry.objects.filter(verb='actor test').get()
self.assertEqual(l.summary, u'actor "testarossa" only is tested')
l.delete()
def test_add_create_entry(self):
LogEntry.add_create_entry(self.justification, self.justification)
l = LogEntry.objects.filter(verb=u'create-justification').get()
self.assertEqual(l.summary, u'testarossa created testarossa')
l.delete()
def test_add_delete_entry(self):
LogEntry.add_delete_entry(self.justification, self.justification)
l = LogEntry.objects.filter(verb=u'delete-justification').get()
self.assertEqual(l.summary, u'testarossa deleted testarossa')
l.delete()
def test_compare_objects(self):
j1 = Justification.objects.create(name='ferrari', description='Psst!')
j2 = Justification.objects.create(name='lambo', description='Hush')
LogEntry.compare_objects(
self.justification, j1, j2, ('name', 'description'), ('description',)
)
l = LogEntry.objects.filter(verb=u'edit-justification-name').get()
self.assertEqual(
l.summary,
u'testarossa edited lambo: name changed' u" from 'ferrari' to 'lambo'",
)
l.delete()
l = LogEntry.objects.filter(verb=u'edit-justification-description').get()
self.assertEqual(l.summary, u'testarossa edited lambo: description changed')
l.delete()
def test_addLog_entry_before(self):
LogEntry.add_log_entry(self.justification, u'actor test', u'blbl', before=1)
l = LogEntry.objects.filter(verb='actor test').get()
self.assertEqual(l.before, u'1')
l.delete()
def test_find_name(self):
name = find_modelname(self.justification)
self.assertEqual(name, 'blocked_reason')
class AuditlogUtilsTestCase(TestCase):
def setUp(self):
# This specific model is used because it is very simple
self.justification = Justification.objects.create(name='testarossa')
def test_get_auditlog_entries(self):
modelname = 'blocked_reason' # Justification's db_table
j1 = Justification.objects.create(name='j1')
j2 = Justification.objects.create(name='j2')
LogEntry.add_create_entry(self.justification, j1)
LogEntry.add_log_entry(
self.justification,
u'greet',
u'{actor} greets {object}',
object=j2,
subsystem="hello",
)
LogEntry.add_log_entry(
self.justification,
u'deliver',
u'{actor} delivers {object} to {target}',
object=j1,
target=j2,
subsystem='delivery',
)
entries = get_auditlog_entries(modelname=modelname)
self.assertEqual(entries.count(), 3)
entries = get_auditlog_entries(modelname=modelname, subsystem='hello')
self.assertEqual(entries.count(), 1)
entries = get_auditlog_entries(modelname=modelname, pks=[j1.pk])
self.assertEqual(entries.count(), 2) |
4,650 | get counts | import json
import logging
import time
from multiprocessing import Pool
from urllib import urlencode
import requests
import transaction
from AccessControl.SecurityManagement import newSecurityManager
from Acquisition import aq_parent
from BTrees.OOBTree import OOBTree
from castle.cms.indexing.hps import index_in_es
from castle.cms.social import COUNT_ANNOTATION_KEY
from castle.cms.utils import clear_object_cache, retriable
from plone.app.redirector.interfaces import IRedirectionStorage
from plone.registry.interfaces import IRegistry
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone import defaultpage
from Products.CMFPlone.interfaces.siteroot import IPloneSiteRoot
from tendo import singleton
from zope.annotation.interfaces import IAnnotations
from zope.component import getUtility
from zope.component.hooks import setSite
USE_MULTIPROCESSING = True
MATOMO_TOKEN_AUTH = 'castle.matomo_token_auth'
MATOMO_BASE_URL = 'castle.matomo_base_url'
MATOMO_SITE_ID = 'castle.matomo_site_id'
logger = logging.getLogger('castle.cms')
def get_matomo_api_url(url):
registry = getUtility(IRegistry)
site_id = registry.get(MATOMO_SITE_ID, None)
base_url = registry.get(MATOMO_BASE_URL, None)
token_auth = registry.get(MATOMO_TOKEN_AUTH, None)
params = {
'module': 'API',
'method': 'Actions.getOutlinks',
'idSite': site_id,
'period': 'year',
'date': 'today',
'format': 'json',
'token_auth': token_auth,
'expanded': '1',
'filter_pattern_recursive': '',
'u': url
}
return '{}/?{}'.format(
base_url,
urlencode(params)
)
_matomo_data_mapping = {
'facebook_matomo': 'www.facebook.com',
'twitter_matomo': 'twitter.com'
}
def get_matomo_url_data(urls):
data = {
'facebook_matomo': 0,
'twitter_matomo': 0
}
for url in urls:
query_url = get_matomo_api_url(url)
resp = requests.get(query_url, timeout=10).content
datatable = json.loads(resp)
for d in datatable:
for key, label in _matomo_data_mapping.items():
if d['label'] == label:
data[key] += d['nb_visits']
return data
COUNT_TYPES = {
'pinterest': {
'url': 'http://api.pinterest.com/v1/urls/count.json?callback=foobar&url=%s',
'slash_matters': True
},
'matomo': {
'generator': get_matomo_url_data,
}
}
def _get_url_data(args):
config, url = args
access_url = config['url'] % url
resp = requests.get(access_url).content.lstrip('foobar(').rstrip(')')
try:
data = json.loads(resp)
except ValueError:
return 0
if 'count' in data:
return data['count']
else:
return 0
_req_pool = Pool(6)
def _get_urls_data(args):
results = []
config, urls = args
if config.get('generator'):
results.append(config['generator'](urls))
else:
req_urls = []
for orig_url in urls:
req_urls.append((config, orig_url))
if config.get('slash_matters'):
req_urls.append((config, orig_url.rstrip('/') + '/'))
if USE_MULTIPROCESSING:
results = _req_pool.map(_get_url_data, req_urls)
else:
for req_url in req_urls:
results.append(_get_url_data(req_url))
return results
_pool = Pool(processes=3)
def METHOD_NAME(urls, count_types):
counts = {}
for type_, config in count_types.items():
for result in _get_urls_data((config, urls)):
if isinstance(result, dict):
# manually setting keys on social data here
for key, value in result.items():
if key not in counts:
counts[key] = 0
counts[key] += value
else:
if type_ not in counts:
counts[type_] = 0
counts[type_] += result
return counts
def _merge_counts(one, two):
for key, count in two.items():
if key in one:
one[key] += count
else:
one[key] = count
return one
def _has_data(data):
found = False
for key, val in data.items():
if val > 0:
found = True
break
return found
def _count_diff(existing, new):
diff = False
for name, value in new.items():
if value != existing.get(name):
diff = True
break
return diff
@retriable(sync=True)
def get_social_counts(site, obj, site_url, count_types, count=0):
counts = {}
site_path = '/'.join(site.getPhysicalPath())
obj_path = '/'.join(obj.getPhysicalPath())
rel_path = obj_path[len(site_path):].strip('/')
print('Looking up ' + obj_path)
urls = [site_url.rstrip('/') + '/' + rel_path]
registry = getUtility(IRegistry)
if obj.portal_type in registry.get('plone.types_use_view_action_in_listings', []):
urls.append(urls[0] + '/view')
container = aq_parent(obj)
if defaultpage.is_default_page(container, obj):
container_path = '/'.join(container.getPhysicalPath())
rel_path = container_path[len(site_path):].strip('/')
urls.append(site_url.rstrip('/') + '/' + rel_path)
redirector = getUtility(IRedirectionStorage)
for redirect in redirector.redirects(obj_path):
rel_path = redirect[len(site_path):].strip('/')
urls.append(site_url.rstrip('/') + '/' + rel_path)
counts = METHOD_NAME(urls, count_types)
if not _has_data(counts):
return
obj._p_jar.sync()
annotations = IAnnotations(obj)
existing = annotations.get(COUNT_ANNOTATION_KEY, OOBTree())
if not _count_diff(existing, counts):
return
# XXX check if value different first before transaction!
existing.update(counts)
annotations[COUNT_ANNOTATION_KEY] = existing
transaction.commit()
index_in_es(obj)
if count % 200 == 0:
clear_object_cache(site)
def retrieve(site):
setSite(site)
registry = getUtility(IRegistry)
site_url = registry.get('plone.public_url')
if not site_url:
logger.info("No public URL is set; skipping site %s" % site)
return
# Which counting strategies should we use?
count_types = COUNT_TYPES.copy()
site_id = registry.get(MATOMO_SITE_ID, None)
base_url = registry.get(MATOMO_BASE_URL, None)
token_auth = registry.get(MATOMO_TOKEN_AUTH, None)
if not site_id or not base_url or not token_auth:
# filter all _matomo count types
for ctype in list(count_types.keys()):
if ctype.endswith('_matomo'):
del count_types[ctype]
catalog = getToolByName(site, 'portal_catalog')
count = 0
for brain in catalog(review_state='published'):
# we ignore some types...
if brain.portal_type in ('Image', 'Dashboard',):
continue
path = brain.getPath()
count += 1
try:
obj = brain.getObject()
get_social_counts(site, obj, site_url, count_types, count)
logger.info('retrieved social stats for: %s' % path)
except Exception:
logger.warn('error getting social count totals for: %s' % path,
exc_info=True)
time.sleep(2)
def run(app):
singleton.SingleInstance('socialcounts')
user = app.acl_users.getUser('admin') # noqa
newSecurityManager(None, user.__of__(app.acl_users)) # noqa
for oid in app.objectIds(): # noqa
obj = app[oid] # noqa
if IPloneSiteRoot.providedBy(obj):
retrieve(obj)
if __name__ == '__main__':
run(app) # noqa |
4,651 | begin and validate translation async | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from devtools_testutils import (
AzureRecordedTestCase
)
from testcase import DocumentTranslationTest, Document
from azure.ai.translation.document import DocumentTranslationInput, TranslationTarget
class AsyncDocumentTranslationTest(DocumentTranslationTest, AzureRecordedTestCase):
async def METHOD_NAME(self, async_client, translation_inputs, total_docs_count, language=None):
# submit operation
poller = await async_client.begin_translation(translation_inputs)
assert poller.id is not None
assert poller.details.id is not None
# wait for result
doc_statuses = await poller.result()
# validate
self._validate_translation_metadata(poller=poller, status='Succeeded', total=total_docs_count, succeeded=total_docs_count)
async for doc in doc_statuses:
self._validate_doc_status(doc, language)
return poller.id
# client helpers
async def _begin_multiple_translations_async(self, async_client, operations_count, **kwargs):
container_suffix = kwargs.pop('container_suffix', "")
variables = kwargs.pop('variables', {})
wait_for_operation = kwargs.pop('wait', True)
language = kwargs.pop('language', "es")
docs_per_operation = kwargs.pop('docs_per_operation', 2)
result_ids = []
for i in range(operations_count):
# prepare containers and test data
'''
# note
since we're only testing the client library
we can use sync container calls in here
no need for async container clients!
'''
blob_data = Document.create_dummy_docs(docs_per_operation)
source_container_sas_url = self.create_source_container(data=blob_data, variables=variables, container_suffix=str(i)+container_suffix)
target_container_sas_url = self.create_target_container(variables=variables, container_suffix=str(i)+container_suffix)
# prepare translation inputs
translation_inputs = [
DocumentTranslationInput(
source_url=source_container_sas_url,
targets=[
TranslationTarget(
target_url=target_container_sas_url,
language=language
)
]
)
]
# submit multiple operations
poller = await async_client.begin_translation(translation_inputs)
assert poller.id is not None
if wait_for_operation:
await poller.result()
else:
await poller.wait()
result_ids.append(poller.id)
return result_ids
async def _begin_and_validate_translation_with_multiple_docs_async(self, async_client, docs_count, **kwargs):
# get input params
variables = kwargs.pop('variables', {})
wait_for_operation = kwargs.pop('wait', False)
language = kwargs.pop('language', "es")
# prepare containers and test data
blob_data = Document.create_dummy_docs(docs_count=docs_count)
source_container_sas_url = self.create_source_container(data=blob_data, variables=variables)
target_container_sas_url = self.create_target_container(variables=variables)
# prepare translation inputs
translation_inputs = [
DocumentTranslationInput(
source_url=source_container_sas_url,
targets=[
TranslationTarget(
target_url=target_container_sas_url,
language=language
)
]
)
]
# submit operation
poller = await async_client.begin_translation(translation_inputs)
assert poller.id is not None
# wait for result
if wait_for_operation:
result = await poller.result()
async for doc in result:
self._validate_doc_status(doc, "es")
# validate
self._validate_translation_metadata(poller=poller)
return poller |
4,652 | get secondary fire ammo | # ../weapons/_base.py
"""Provides simplified weapon functionality."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python Imports
# Entities
from entities.entity import Entity
# Weapons
from _weapons._entity import WeaponMixin
from weapons.manager import weapon_manager
# =============================================================================
# >> GLOBAL VARIABLES
# =============================================================================
# Add all the global variables to __all__
__all__ = ('Weapon',
)
# =============================================================================
# >> CLASSES
# =============================================================================
class Weapon(WeaponMixin, Entity):
"""Allows easy usage of the weapon's attributes."""
def __init__(self, index, caching=True):
"""Initialize the object.
:param int index:
A valid weapon index.
:param bool caching:
Whether to lookup the cache for an existing instance or not.
:raise ValueError:
Raised if the index is invalid.
"""
WeaponMixin.__init__(self, index)
Entity.__init__(self, index)
def _validate_clip(self):
"""Test if the weapon has a clip."""
if (
self.classname in weapon_manager and
weapon_manager[self.classname].clip is None
) or self._clip == -1:
raise ValueError('Weapon does not have a clip.')
def get_clip(self):
"""Return the amount of ammo in the weapon's clip."""
self._validate_clip()
return self._clip
def set_clip(self, value):
"""Set the amount of ammo in the weapon's clip."""
self._validate_clip()
self._clip = value
# Set the "clip" property methods
clip = property(
get_clip, set_clip,
doc="""Property to get/set the weapon's clip.""")
def _validate_ammo(self):
"""Test if the weapon has a valid ammoprop and an owner."""
if (
self.classname in weapon_manager and
weapon_manager[self.classname].ammoprop is None
) or self.ammoprop == -1:
raise ValueError(
'Unable to get ammoprop for {weapon}'.format(
weapon=self.classname
)
)
player = self.owner
if player is None:
raise ValueError('Unable to get the owner of the weapon.')
return player
def get_ammo(self):
"""Return the amount of ammo the player has for the weapon."""
player = self._validate_ammo()
return player.get_property_int(
'{base}{prop:03d}'.format(
base=weapon_manager.ammoprop,
prop=self.ammoprop,
)
)
def set_ammo(self, value):
"""Set the player's ammo property for the weapon."""
player = self._validate_ammo()
player.set_property_int(
'{base}{prop:03d}'.format(
base=weapon_manager.ammoprop,
prop=self.ammoprop,
),
value,
)
# Set the "ammo" property methods
ammo = property(
get_ammo, set_ammo,
doc="""Property to get/set the weapon's ammo.""")
def _validate_secondary_fire_clip(self):
"""Test if the weapon has a secondary fire clip."""
if self._secondary_fire_clip == -1:
raise ValueError('Weapon does not have a secondary fire clip.')
def get_secondary_fire_clip(self):
"""Return the amount of ammo in the weapon's secondary fire clip."""
self._validate_secondary_fire_clip()
return self._secondary_fire_clip
def set_secondary_fire_clip(self, value):
"""Set the amount of ammo in the weapon's secondary fire clip."""
self._validate_secondary_fire_clip()
self._secondary_fire_clip = value
# Set the "secondary_fire_clip" property methods
secondary_fire_clip = property(
get_secondary_fire_clip, set_secondary_fire_clip,
doc="""Property to get/set the weapon's secondary fire clip.""")
def _validate_secondary_fire_ammo(self):
"""Test if the weapon has a valid secondary fire ammoprop and an owner."""
if self.secondary_fire_ammoprop == -1:
raise ValueError(
'Unable to get secondary fire ammoprop for {0}'.format(
self.classname))
player = self.owner
if player is None:
raise ValueError('Unable to get the owner of the weapon.')
return player
def METHOD_NAME(self):
"""Return the secondary fire ammo the player has for the weapon."""
player = self._validate_secondary_fire_ammo()
return player.get_property_int(
'{base}{prop:03d}'.format(
base=weapon_manager.ammoprop,
prop=self.secondary_fire_ammoprop,
)
)
def set_secondary_fire_ammo(self, value):
"""Set the player's secondary fire ammo property for the weapon."""
player = self._validate_secondary_fire_ammo()
player.set_property_int(
'{base}{prop:03d}'.format(
base=weapon_manager.ammoprop,
prop=self.secondary_fire_ammoprop,
),
value,
)
# Set the "secondary_fire_ammo" property methods
secondary_fire_ammo = property(
METHOD_NAME, set_secondary_fire_ammo,
doc="""Property to get/set the weapon's secondary fire ammo.""")
@property
def weapon_name(self):
"""Return the full class name of the weapon."""
return self.classname
def remove(self):
"""Remove the weapon."""
# Avoid a cyclic import
from players.entity import Player
owner = self.owner
if owner is not None and owner.is_player():
player = Player(owner.index)
player.drop_weapon(self)
super().remove() |
4,653 | test default value | # (C) Copyright 2005-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import unittest
from traits.api import (
Bytes, DefaultValue, Float, HasTraits, Instance, Int, List, Str,
TraitError, TraitType, Type, Union)
class CustomClass(HasTraits):
value = Int
class CustomStrType(TraitType):
#: The default value type to use.
default_value_type = DefaultValue.constant
#: The default value.
default_value = "a string value"
def validate(self, obj, name, value):
if not isinstance(value, Str):
return value
self.error(obj, name, value)
class TestUnion(unittest.TestCase):
def test_union_incompatible_trait(self):
with self.assertRaises(ValueError) as exception_context:
Union(Str(), "none")
self.assertEqual(
str(exception_context.exception),
"Union trait declaration expects a trait type or an instance of "
"trait type or None, but got 'none' instead"
)
def test_list_trait_types(self):
class TestClass(HasTraits):
int_or_str_type = Union(Type, Int, Str)
TestClass(int_or_str_type=3)
TestClass(int_or_str_type="3.5")
with self.assertRaises(TraitError):
TestClass(int_or_str_type=3.5)
with self.assertRaises(TraitError):
TestClass(int_or_str_type=Int(3))
def test_malformed_declaration(self):
with self.assertRaises(ValueError):
class TestClass(HasTraits):
a = Union(int, Float)
TestClass(a=2.4)
with self.assertRaises(ValueError):
class TestClass(HasTraits):
a = Union([1, 2], Float)
TestClass(a=2.4)
def test_list_trait_instances(self):
class TestClass(HasTraits):
float_or_str_obj = Union(Instance(Float), Instance(Str))
TestClass(float_or_str_obj=Float(3.5))
TestClass(float_or_str_obj=Str("3.5"))
with self.assertRaises(TraitError):
TestClass(float_or_str_obj=Float)
with self.assertRaises(TraitError):
TestClass(float_or_str_obj=3.5)
def test_union_with_none(self):
class TestClass(HasTraits):
int_or_none = Union(None, Int)
TestClass(int_or_none=None)
def test_union_unspecified_arguments(self):
class TestClass(HasTraits):
none = Union()
TestClass(none=None)
def METHOD_NAME(self):
class TestClass(HasTraits):
atr = Union(Int(3), Float(4.1), Str("Something"))
self.assertEqual(TestClass().atr, 3)
class TestClass(HasTraits):
atr = Union(
Int(3), Float(4.1), Str("Something"),
default_value="XYZ",
)
self.assertEqual(TestClass().atr, "XYZ")
class TestClass(HasTraits):
atr = Union()
self.assertEqual(TestClass().atr, None)
class TestClass(HasTraits):
atr = Union(None)
self.assertEqual(TestClass().atr, None)
def test_default_raise_error(self):
# If 'default' is defined, it could be caused by migration from
# ``Either``. Raise an error to aid migrations from ``Either``
# to ``Union``
with self.assertRaises(ValueError) as exception_context:
Union(Int(), Float(), default=1.0)
self.assertEqual(
str(exception_context.exception),
"Union default value should be set via 'default_value', not "
"'default'."
)
def test_inner_traits(self):
class TestClass(HasTraits):
atr = Union(Float, Int, Str)
obj = TestClass()
t1, t2, t3 = obj.trait('atr').inner_traits
self.assertEqual(type(t1.trait_type), Float)
self.assertEqual(type(t2.trait_type), Int)
self.assertEqual(type(t3.trait_type), Str)
def test_union_user_defined_class(self):
class TestClass(HasTraits):
obj = Union(Instance(CustomClass), Int)
TestClass(obj=CustomClass(value=5))
TestClass(obj=5)
with self.assertRaises(TraitError):
TestClass(obj=CustomClass)
def test_union_user_defined_type(self):
class TestClass(HasTraits):
type_value = Union(CustomStrType, Int)
TestClass(type_value="new string")
def test_notification(self):
class TestClass(HasTraits):
union_attr = Union(Int)
shadow_union_trait = None
def _union_attr_changed(self, new):
self.shadow_union_trait = new
obj = TestClass(union_attr=-1)
obj.union_attr = 1
self.assertEqual(obj.shadow_union_trait, 1)
def test_extending_union_trait(self):
class UnionAllowStr(Union):
def validate(self, obj, name, value):
if isinstance(value, str):
return value
return super().validate(obj, name, value)
class TestClass(HasTraits):
s = UnionAllowStr(Int, Float)
TestClass(s="sdf")
def test_list_inside_union_default(self):
class HasUnionWithList(HasTraits):
foo = Union(List(Int), Str)
has_union = HasUnionWithList()
value = has_union.foo
self.assertIsInstance(value, list)
with self.assertRaises(TraitError):
value.append("not an integer")
def test_constant_default(self):
# Exercise the branch where the default is constant.
class HasUnionWithList(HasTraits):
foo = Union(Int(23), Float)
nested = Union(Union(Str(), Bytes()), Union(Int(), Float(), None))
has_union = HasUnionWithList()
value = has_union.foo
self.assertEqual(value, 23)
self.assertEqual(
has_union.trait("foo").default_value(),
(DefaultValue.constant, 23),
)
self.assertEqual(
has_union.trait("nested").default_value(),
(DefaultValue.constant, ""),
) |
4,654 | main instance | """Relay method calls to interfaces of multiple implementations.
A Relayer instance keeps track of multiple subclasses of an interface and forwards
method calls to one of the instances based on a priority list. Using this example:
* Metadata
- DmapMetadata
- MrpMetadata
- CompanionMetadata
- AirPlayMetadata
If all of these protocols were available, the general priority order would likely be
MrpMetadata, DmapMetadata, AirPlayMetadata, CompanionMetadata (the last two currently
does not support metadata). So the relayer would first and foremost call a method in
MrpMetadata, then DmapMetadata if no implementation exists, and so on. If no instance
provides an implementation, an `exceptions.NotSupportedError` is raised.
A code example:
relayer = Relayer(
interface.Metadata,
[MrpMetadata, DmapMetadata, CompanionMetadata, AirPlayMetadata]
)
relayer.register(MrpMetadata())
relayer.register(DmapMetadata())
relayer.register(CompanionMetadata())
relayer.register(AirPlayMetadata())
artwork = await relayer.relay("artwork")(width=640)
"""
from itertools import chain
from typing import Dict, Generic, List, Optional, Sequence, Type, TypeVar
from pyatv import exceptions
from pyatv.const import Protocol
T = TypeVar("T")
class Relayer(Generic[T]):
"""Relay method calls to instances based on priority."""
def __init__(
self, base_interface: Type[T], protocol_priority: List[Protocol]
) -> None:
"""Initialize a new Relayer instance."""
self._base_interface = base_interface
self._priorities = protocol_priority
self._interfaces: Dict[Protocol, T] = {}
self._takeover_protocol: List[Protocol] = []
@property
def count(self):
"""Return number of registered instances."""
return len(self._interfaces)
@property
def METHOD_NAME(self) -> T:
"""Return main instance based on priority."""
for priority in chain(self._takeover_protocol, self._priorities):
if priority in self._interfaces:
return self._interfaces[priority]
raise exceptions.NotSupportedError()
@property
def main_protocol(self) -> Optional[Protocol]:
"""Return Protocol for main instance."""
for priority in chain(self._takeover_protocol, self._priorities):
if priority in self._interfaces:
return priority
return None
@property
def instances(self) -> Sequence[T]:
"""Return all instances added to this relayer."""
return list(self._interfaces.values())
def register(self, instance: T, protocol: Protocol) -> None:
"""Register a new instance for an interface."""
if protocol not in self._priorities:
raise RuntimeError(f"{protocol} not in priority list")
self._interfaces[protocol] = instance
def get(self, protocol: Protocol) -> Optional[T]:
"""Return instance for protocol if available."""
return self._interfaces.get(protocol)
def relay(self, target: str, priority: Optional[List[Protocol]] = None):
"""Return method (or property value) of target instance based on priority."""
instance = self._find_instance(
target, chain(self._takeover_protocol, priority or self._priorities)
)
return getattr(instance, target)
def _find_instance(self, target: str, priority):
for priority_iface in priority:
interface = self._interfaces.get(priority_iface)
# Interface defined in priority list but no instance for that interface
# are just ignored as no implementation probably exists
if not interface:
continue
# Trying to call a method not in the target interface
relay_target = getattr(type(interface), target, None)
if not relay_target:
raise RuntimeError(f"{target} not in {priority_iface}")
# Method must be overridden in base interface
if relay_target != getattr(self._base_interface, target):
return interface
# An existing method not implemented by any instance is "not supported"
raise exceptions.NotSupportedError(f"{target} is not supported")
def takeover(self, protocol: Protocol) -> None:
"""Temporary override priority list with a specific protocol."""
if self._takeover_protocol:
raise exceptions.InvalidStateError(
f"{self._takeover_protocol[0]} has already done takeover"
)
self._takeover_protocol = [protocol]
def release(self) -> None:
"""Release temporary takeover."""
self._takeover_protocol = [] |
4,655 | stop tuning | import collections
import os
import re
import traceback
import tuned.exceptions
import tuned.logs
import tuned.plugins.exceptions
import tuned.consts as consts
from tuned.utils.global_config import GlobalConfig
from tuned.utils.commands import commands
log = tuned.logs.get()
__all__ = ["Manager"]
class Manager(object):
"""
Manager creates plugin instances and keeps a track of them.
"""
def __init__(self, plugins_repository, monitors_repository,
def_instance_priority, hardware_inventory, config = None):
super(Manager, self).__init__()
self._plugins_repository = plugins_repository
self._monitors_repository = monitors_repository
self._def_instance_priority = def_instance_priority
self._hardware_inventory = hardware_inventory
self._instances = []
self._plugins = []
self._config = config or GlobalConfig()
self._cmd = commands()
@property
def plugins(self):
return self._plugins
@property
def instances(self):
return self._instances
@property
def plugins_repository(self):
return self._plugins_repository
def _unit_matches_cpuinfo(self, unit):
if unit.cpuinfo_regex is None:
return True
cpuinfo_string = self._config.get(consts.CFG_CPUINFO_STRING)
if cpuinfo_string is None:
cpuinfo_string = self._cmd.read_file("/proc/cpuinfo")
return re.search(unit.cpuinfo_regex, cpuinfo_string,
re.MULTILINE) is not None
def _unit_matches_uname(self, unit):
if unit.uname_regex is None:
return True
uname_string = self._config.get(consts.CFG_UNAME_STRING)
if uname_string is None:
uname_string = " ".join(os.uname())
return re.search(unit.uname_regex, uname_string,
re.MULTILINE) is not None
def create(self, instances_config):
instance_info_list = []
for instance_name, instance_info in list(instances_config.items()):
if not instance_info.enabled:
log.debug("skipping disabled instance '%s'" % instance_name)
continue
if not self._unit_matches_cpuinfo(instance_info):
log.debug("skipping instance '%s', cpuinfo does not match" % instance_name)
continue
if not self._unit_matches_uname(instance_info):
log.debug("skipping instance '%s', uname does not match" % instance_name)
continue
instance_info.options.setdefault("priority", self._def_instance_priority)
instance_info.options["priority"] = int(instance_info.options["priority"])
instance_info_list.append(instance_info)
instance_info_list.sort(key=lambda x: x.options["priority"])
plugins_by_name = collections.OrderedDict()
for instance_info in instance_info_list:
instance_info.options.pop("priority")
plugins_by_name[instance_info.type] = None
for plugin_name, none in list(plugins_by_name.items()):
try:
plugin = self._plugins_repository.create(plugin_name)
plugins_by_name[plugin_name] = plugin
self._plugins.append(plugin)
except tuned.plugins.exceptions.NotSupportedPluginException:
log.info("skipping plugin '%s', not supported on your system" % plugin_name)
continue
except Exception as e:
log.error("failed to initialize plugin %s" % plugin_name)
log.exception(e)
continue
instances = []
for instance_info in instance_info_list:
plugin = plugins_by_name[instance_info.type]
if plugin is None:
continue
log.debug("creating '%s' (%s)" % (instance_info.name, instance_info.type))
new_instance = plugin.create_instance(instance_info.name, instance_info.devices, instance_info.devices_udev_regex, \
instance_info.script_pre, instance_info.script_post, instance_info.options)
instances.append(new_instance)
for instance in instances:
instance.plugin.init_devices()
instance.plugin.assign_free_devices(instance)
instance.plugin.initialize_instance(instance)
# At this point we should be able to start the HW events
# monitoring/processing thread, without risking race conditions
self._hardware_inventory.start_processing_events()
self._instances.extend(instances)
def _try_call(self, caller, exc_ret, f, *args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
trace = traceback.format_exc()
log.error("BUG: Unhandled exception in %s: %s"
% (caller, str(e)))
log.error(trace)
return exc_ret
def destroy_all(self):
for instance in self._instances:
log.debug("destroying instance %s" % instance.name)
self._try_call("destroy_all", None,
instance.plugin.destroy_instance,
instance)
for plugin in self._plugins:
log.debug("cleaning plugin '%s'" % plugin.name)
self._try_call("destroy_all", None, plugin.cleanup)
del self._plugins[:]
del self._instances[:]
def update_monitors(self):
for monitor in self._monitors_repository.monitors:
log.debug("updating monitor %s" % monitor)
self._try_call("update_monitors", None, monitor.update)
def start_tuning(self):
for instance in self._instances:
self._try_call("start_tuning", None,
instance.apply_tuning)
def verify_tuning(self, ignore_missing):
ret = True
for instance in self._instances:
res = self._try_call("verify_tuning", False,
instance.verify_tuning, ignore_missing)
if res == False:
ret = False
return ret
def update_tuning(self):
for instance in self._instances:
self._try_call("update_tuning", None,
instance.update_tuning)
# rollback parameter is a helper telling plugins whether soft or full
# rollback is needed, e.g. for bootloader plugin we need grub.cfg
# tuning to persist across reboots and restarts of the daemon, so in
# this case the rollback is usually set to consts.ROLLBACK_SOFT,
# but we also need to clean it all up when TuneD is disabled or the
# profile is changed. In this case the rollback is set to
# consts.ROLLBACK_FULL. In practice it means to remove all temporal
# or helper files, unpatch third party config files, etc.
def METHOD_NAME(self, rollback = consts.ROLLBACK_SOFT):
self._hardware_inventory.stop_processing_events()
for instance in reversed(self._instances):
self._try_call("stop_tuning", None,
instance.unapply_tuning, rollback) |
4,656 | test get valid primitives selected primitives | import pytest
from woodwork.column_schema import ColumnSchema
from featuretools.primitives import (
AggregationPrimitive,
Count,
Hour,
IsIn,
Not,
TimeSincePrevious,
TransformPrimitive,
)
from featuretools.synthesis.get_valid_primitives import get_valid_primitives
from featuretools.utils.gen_utils import Library
def METHOD_NAME(es):
agg_prims, trans_prims = get_valid_primitives(
es,
"log",
selected_primitives=[Hour, Count],
)
assert set(agg_prims) == set([Count])
assert set(trans_prims) == set([Hour])
agg_prims, trans_prims = get_valid_primitives(
es,
"products",
selected_primitives=[Hour],
max_depth=1,
)
assert set(agg_prims) == set()
assert set(trans_prims) == set()
def test_get_valid_primitives_selected_primitives_strings(es):
agg_prims, trans_prims = get_valid_primitives(
es,
"log",
selected_primitives=["hour", "count"],
)
assert set(agg_prims) == set([Count])
assert set(trans_prims) == set([Hour])
agg_prims, trans_prims = get_valid_primitives(
es,
"products",
selected_primitives=["hour"],
max_depth=1,
)
assert set(agg_prims) == set()
assert set(trans_prims) == set()
def test_invalid_primitive(es):
with pytest.raises(ValueError, match="'foobar' is not a recognized primitive name"):
get_valid_primitives(
es,
target_dataframe_name="log",
selected_primitives=["foobar"],
)
msg = (
"Selected primitive <enum 'Library'> "
"is not an AggregationPrimitive, TransformPrimitive, or str"
)
with pytest.raises(ValueError, match=msg):
get_valid_primitives(
es,
target_dataframe_name="log",
selected_primitives=[Library],
)
def test_primitive_compatibility(es):
_, trans_prims = get_valid_primitives(
es,
"customers",
selected_primitives=[TimeSincePrevious],
)
if es.dataframe_type != Library.PANDAS:
assert len(trans_prims) == 0
else:
assert len(trans_prims) == 1
def test_get_valid_primitives_custom_primitives(pd_es):
class ThreeMostCommonCat(AggregationPrimitive):
name = "n_most_common_categorical"
input_types = [ColumnSchema(semantic_tags={"category"})]
return_type = ColumnSchema(semantic_tags={"category"})
number_output_features = 3
class AddThree(TransformPrimitive):
name = "add_three"
input_types = [
ColumnSchema(semantic_tags="numeric"),
ColumnSchema(semantic_tags="numeric"),
ColumnSchema(semantic_tags="numeric"),
]
return_type = ColumnSchema(semantic_tags="numeric")
commutative = True
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
agg_prims, trans_prims = get_valid_primitives(pd_es, "log")
assert ThreeMostCommonCat not in agg_prims
assert AddThree not in trans_prims
with pytest.raises(
ValueError,
match="'add_three' is not a recognized primitive name",
):
agg_prims, trans_prims = get_valid_primitives(
pd_es,
"log",
2,
[ThreeMostCommonCat, "add_three"],
)
def test_get_valid_primitives_all_primitives(es):
agg_prims, trans_prims = get_valid_primitives(es, "customers")
assert Count in agg_prims
assert Hour in trans_prims
def test_get_valid_primitives_single_table(transform_es):
msg = "Only one dataframe in entityset, changing max_depth to 1 since deeper features cannot be created"
with pytest.warns(UserWarning, match=msg):
agg_prims, trans_prims = get_valid_primitives(transform_es, "first")
assert set(agg_prims) == set()
assert IsIn in trans_prims
def test_get_valid_primitives_with_dfs_kwargs(es):
agg_prims, trans_prims = get_valid_primitives(
es,
"customers",
selected_primitives=[Hour, Count, Not],
)
assert set(agg_prims) == set([Count])
assert set(trans_prims) == set([Hour, Not])
# Can use other dfs parameters and they get applied
agg_prims, trans_prims = get_valid_primitives(
es,
"customers",
selected_primitives=[Hour, Count, Not],
ignore_columns={"customers": ["loves_ice_cream"]},
)
assert set(agg_prims) == set([Count])
assert set(trans_prims) == set([Hour])
agg_prims, trans_prims = get_valid_primitives(
es,
"products",
selected_primitives=[Hour, Count],
ignore_dataframes=["log"],
)
assert set(agg_prims) == set()
assert set(trans_prims) == set() |
4,657 | get os name | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
This file contains functions used for telemetry.
"""
import math
import os
import platform
import sys
import distro
import mozpack.path as mozpath
from .base import BuildEnvironmentNotFoundException
def cpu_brand_linux():
"""
Read the CPU brand string out of /proc/cpuinfo on Linux.
"""
with open("/proc/cpuinfo", "r") as f:
for line in f:
if line.startswith("model name"):
_, brand = line.split(": ", 1)
return brand.rstrip()
# not found?
return None
def cpu_brand_windows():
"""
Read the CPU brand string from the registry on Windows.
"""
try:
import _winreg
except ImportError:
import winreg as _winreg
try:
h = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r"HARDWARE\DESCRIPTION\System\CentralProcessor\0",
)
(brand, ty) = _winreg.QueryValueEx(h, "ProcessorNameString")
if ty == _winreg.REG_SZ:
return brand
except WindowsError:
pass
return None
def cpu_brand_mac():
"""
Get the CPU brand string via sysctl on macos.
"""
import ctypes
import ctypes.util
libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
# First, find the required buffer size.
bufsize = ctypes.c_size_t(0)
result = libc.sysctlbyname(
b"machdep.cpu.brand_string", None, ctypes.byref(bufsize), None, 0
)
if result != 0:
return None
bufsize.value += 1
buf = ctypes.create_string_buffer(bufsize.value)
# Now actually get the value.
result = libc.sysctlbyname(
b"machdep.cpu.brand_string", buf, ctypes.byref(bufsize), None, 0
)
if result != 0:
return None
return buf.value.decode()
def get_cpu_brand():
"""
Get the CPU brand string as returned by CPUID.
"""
return {
"Linux": cpu_brand_linux,
"Windows": cpu_brand_windows,
"Darwin": cpu_brand_mac,
}.get(platform.system(), lambda: None)()
def METHOD_NAME():
return {"Linux": "linux", "Windows": "windows", "Darwin": "macos"}.get(
platform.system(), "other"
)
def get_psutil_stats():
"""Return whether psutil exists and its associated stats.
@returns (bool, int, int, int) whether psutil exists, the logical CPU count,
physical CPU count, and total number of bytes of memory.
"""
try:
import psutil
return (
True,
psutil.cpu_count(),
psutil.cpu_count(logical=False),
psutil.virtual_memory().total,
)
except ImportError:
return False, None, None, None
def get_system_info():
"""
Gather info to fill the `system` keys in the schema.
"""
# Normalize OS names a bit, and bucket non-tier-1 platforms into "other".
has_psutil, logical_cores, physical_cores, memory_total = get_psutil_stats()
info = {"os": METHOD_NAME()}
if has_psutil:
# `total` on Linux is gathered from /proc/meminfo's `MemTotal`, which is the
# total amount of physical memory minus some kernel usage, so round up to the
# nearest GB to get a sensible answer.
info["memory_gb"] = int(math.ceil(float(memory_total) / (1024 * 1024 * 1024)))
info["logical_cores"] = logical_cores
if physical_cores is not None:
info["physical_cores"] = physical_cores
cpu_brand = get_cpu_brand()
if cpu_brand is not None:
info["cpu_brand"] = cpu_brand
# TODO: drive_is_ssd, virtual_machine: https://bugzilla.mozilla.org/show_bug.cgi?id=1481613
return info
def get_build_opts(substs):
"""
Translate selected items from `substs` into `build_opts` keys in the schema.
"""
try:
opts = {
k: ty(substs.get(s, None))
for (k, s, ty) in (
# Selected substitutions.
("artifact", "MOZ_ARTIFACT_BUILDS", bool),
("debug", "MOZ_DEBUG", bool),
("opt", "MOZ_OPTIMIZE", bool),
("ccache", "CCACHE", bool),
("sccache", "MOZ_USING_SCCACHE", bool),
)
}
compiler = substs.get("CC_TYPE", None)
if compiler:
opts["compiler"] = str(compiler)
if substs.get("CXX_IS_ICECREAM", None):
opts["icecream"] = True
return opts
except BuildEnvironmentNotFoundException:
return {}
def get_build_attrs(attrs):
"""
Extracts clobber and cpu usage info from command attributes.
"""
res = {}
clobber = attrs.get("clobber")
if clobber:
res["clobber"] = clobber
usage = attrs.get("usage")
if usage:
cpu_percent = usage.get("cpu_percent")
if cpu_percent:
res["cpu_percent"] = int(round(cpu_percent))
return res
def filter_args(command, argv, topsrcdir, topobjdir, cwd=None):
"""
Given the full list of command-line arguments, remove anything up to and including `command`,
and attempt to filter absolute pathnames out of any arguments after that.
"""
if cwd is None:
cwd = os.getcwd()
# Each key is a pathname and the values are replacement sigils
paths = {
topsrcdir: "$topsrcdir/",
topobjdir: "$topobjdir/",
mozpath.normpath(os.path.expanduser("~")): "$HOME/",
# This might override one of the existing entries, that's OK.
# We don't use a sigil here because we treat all arguments as potentially relative
# paths, so we'd like to get them back as they were specified.
mozpath.normpath(cwd): "",
}
args = list(argv)
while args:
a = args.pop(0)
if a == command:
break
def filter_path(p):
p = mozpath.abspath(p)
base = mozpath.basedir(p, paths.keys())
if base:
return paths[base] + mozpath.relpath(p, base)
# Best-effort.
return "<path omitted>"
return [filter_path(arg) for arg in args]
def get_distro_and_version():
if sys.platform.startswith("linux"):
dist, version, _ = distro.linux_distribution(full_distribution_name=False)
return dist, version
elif sys.platform.startswith("darwin"):
return "macos", platform.mac_ver()[0]
elif sys.platform.startswith("win32") or sys.platform.startswith("msys"):
ver = sys.getwindowsversion()
return "windows", "%s.%s.%s" % (ver.major, ver.minor, ver.build)
else:
return sys.platform, ""
def get_shell_info():
"""Returns if the current shell was opened by vscode and if it's a SSH connection"""
return (
True if "vscode" in os.getenv("TERM_PROGRAM", "") else False,
bool(os.getenv("SSH_CLIENT", False)),
)
def get_vscode_running():
"""Return if the vscode is currently running."""
try:
import psutil
for proc in psutil.process_iter():
try:
# On Windows we have "Code.exe"
# On MacOS we have "Code Helper (Renderer)"
# On Linux we have ""
if (
proc.name == "Code.exe"
or proc.name == "Code Helper (Renderer)"
or proc.name == "code"
):
return True
except Exception:
# may not be able to access process info for all processes
continue
except Exception:
# On some platforms, sometimes, the generator throws an
# exception preventing us to enumerate.
return False
return False |
4,658 | load detection model | # Copyright (c) Facebook, Inc. and its affiliates.
# install `vqa-maskrcnn-benchmark` from
# https://github.com/ronghanghu/vqa-maskrcnn-benchmark-m4c
import argparse
import os
import sys
import cv2
import numpy as np
import torch
import tqdm
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.layers import nms
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
from PIL import Image
sys.path.append("/private/home/ronghanghu/workspace/vqa-maskrcnn-benchmark") # NoQA
def METHOD_NAME(yaml_file, yaml_ckpt):
cfg.merge_from_file(yaml_file)
cfg.freeze()
model = build_detection_model(cfg)
checkpoint = torch.load(yaml_ckpt, map_location=torch.device("cpu"))
load_state_dict(model, checkpoint.pop("model"))
model.to("cuda")
model.eval()
return model
def _image_transform(image_path):
img = Image.open(image_path)
im = np.array(img).astype(np.float32)
# handle a few corner cases
if im.ndim == 2: # gray => RGB
im = np.tile(im[:, :, None], (1, 1, 3))
if im.shape[2] > 3: # RGBA => RGB
im = im[:, :, :3]
im = im[:, :, ::-1] # RGB => BGR
im -= np.array([102.9801, 115.9465, 122.7717])
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(800) / float(im_size_min)
# Prevent the biggest axis from being more than max_size
if np.round(im_scale * im_size_max) > 1333:
im_scale = float(1333) / float(im_size_max)
im = cv2.resize(
im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR
)
img = torch.from_numpy(im).permute(2, 0, 1)
return img, im_scale
def _process_feature_extraction(output, im_scales, feat_name="fc6"):
batch_size = len(output[0]["proposals"])
n_boxes_per_image = [len(_) for _ in output[0]["proposals"]]
score_list = output[0]["scores"].split(n_boxes_per_image)
score_list = [torch.nn.functional.softmax(x, -1) for x in score_list]
feats = output[0][feat_name].split(n_boxes_per_image)
cur_device = score_list[0].device
feat_list = []
bbox_list = []
for i in range(batch_size):
dets = output[0]["proposals"][i].bbox / im_scales[i]
scores = score_list[i]
max_conf = torch.zeros(scores.shape[0]).to(cur_device)
for cls_ind in range(1, scores.shape[1]):
cls_scores = scores[:, cls_ind]
keep = nms(dets, cls_scores, 0.5)
max_conf[keep] = torch.where(
cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep]
)
keep_boxes = torch.argsort(max_conf, descending=True)[:100]
feat_list.append(feats[i][keep_boxes])
bbox_list.append(output[0]["proposals"][i].bbox[keep_boxes])
return feat_list, bbox_list
def extract_features(detection_model, image_path, input_boxes=None, feat_name="fc6"):
im, im_scale = _image_transform(image_path)
if input_boxes is not None:
if isinstance(input_boxes, np.ndarray):
input_boxes = torch.from_numpy(input_boxes.copy())
input_boxes *= im_scale
img_tensor, im_scales = [im], [im_scale]
current_img_list = to_image_list(img_tensor, size_divisible=32)
current_img_list = current_img_list.to("cuda")
with torch.no_grad():
output = detection_model(current_img_list, input_boxes=input_boxes)
if input_boxes is None:
feat_list, bbox_list = _process_feature_extraction(output, im_scales, feat_name)
feat = feat_list[0].cpu().numpy()
bbox = bbox_list[0].cpu().numpy() / im_scale
else:
feat = output[0][feat_name].cpu().numpy()
bbox = output[0]["proposals"][0].bbox.cpu().numpy() / im_scale
return feat, bbox
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--detection_cfg",
type=str,
default="/private/home/ronghanghu/workspace/pythia/data/"
+ "frcn_feature_extraction/detectron_model.yaml",
help="Detectron config file; download it from "
+ "https://dl.fbaipublicfiles.com/pythia/detectron_model/"
+ "detectron_model.yaml",
)
parser.add_argument(
"--detection_model",
type=str,
default="/private/home/ronghanghu/workspace/pythia/data/"
+ "frcn_feature_extraction/detectron_model.pth",
help="Detectron model file; download it"
+ " from https://dl.fbaipublicfiles.com/pythia/detectron_model/"
+ "detectron_model.pth",
)
parser.add_argument(
"--imdb_file",
type=str,
default="/private/home/ronghanghu/workspace/pythia/data/"
+ "imdb/m4c_textvqa/imdb_train_ocr_en.npy",
help="The imdb to extract features",
)
parser.add_argument(
"--image_dir",
type=str,
default="/private/home/ronghanghu/workspace/DATASETS/TextVQA",
help="The directory containing images",
)
parser.add_argument(
"--save_dir",
type=str,
default="/private/home/ronghanghu/workspace/pythia/data/"
+ "m4c_textvqa_ocr_en_frcn_features_2/train_images",
help="The directory to save extracted features",
)
args = parser.parse_args()
DETECTION_YAML = args.detection_cfg
DETECTION_CKPT = args.detection_model
IMDB_FILE = args.imdb_file
IMAGE_DIR = args.image_dir
SAVE_DIR = args.save_dir
imdb = np.load(IMDB_FILE, allow_pickle=True)[1:]
# keep only one entry per image_id
image_id2info = {info["image_id"]: info for info in imdb}
imdb = list(image_id2info[k] for k in sorted(image_id2info))
detection_model = METHOD_NAME(DETECTION_YAML, DETECTION_CKPT)
print("Faster R-CNN OCR features")
print("\textracting from", IMDB_FILE)
print("\tsaving to", SAVE_DIR)
for _, info in enumerate(tqdm.tqdm(imdb)):
image_path = os.path.join(IMAGE_DIR, info["image_path"])
save_feat_path = os.path.join(SAVE_DIR, info["feature_path"])
save_info_path = save_feat_path.replace(".npy", "_info.npy")
os.makedirs(os.path.dirname(save_feat_path), exist_ok=True)
w = info["image_width"]
h = info["image_height"]
ocr_normalized_boxes = np.array(info["ocr_normalized_boxes"])
ocr_boxes = ocr_normalized_boxes.reshape(-1, 4) * [w, h, w, h]
ocr_tokens = info["ocr_tokens"]
if len(ocr_boxes) > 0:
extracted_feat, _ = extract_features(
detection_model, image_path, input_boxes=ocr_boxes
)
else:
extracted_feat = np.zeros((0, 2048), np.float32)
np.save(save_info_path, {"ocr_boxes": ocr_boxes, "ocr_tokens": ocr_tokens})
np.save(save_feat_path, extracted_feat)
if __name__ == "__main__":
main() |
4,659 | validate pricing rules | # Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.model.document import Document
pricing_rule_fields = [
"apply_on",
"mixed_conditions",
"is_cumulative",
"other_item_code",
"other_item_group",
"apply_rule_on_other",
"other_brand",
"selling",
"buying",
"applicable_for",
"valid_from",
"valid_upto",
"customer",
"customer_group",
"territory",
"sales_partner",
"campaign",
"supplier",
"supplier_group",
"company",
"currency",
"apply_multiple_pricing_rules",
]
other_fields = [
"min_qty",
"max_qty",
"min_amount",
"max_amount",
"priority",
"warehouse",
"threshold_percentage",
"rule_description",
]
price_discount_fields = [
"rate_or_discount",
"apply_discount_on",
"apply_discount_on_rate",
"rate",
"discount_amount",
"discount_percentage",
"validate_applied_rule",
"apply_multiple_pricing_rules",
]
product_discount_fields = [
"free_item",
"free_qty",
"free_item_uom",
"free_item_rate",
"same_item",
"is_recursive",
"apply_multiple_pricing_rules",
]
class TransactionExists(frappe.ValidationError):
pass
class PromotionalScheme(Document):
def validate(self):
if not self.selling and not self.buying:
frappe.throw(_("Either 'Selling' or 'Buying' must be selected"), title=_("Mandatory"))
if not (self.price_discount_slabs or self.product_discount_slabs):
frappe.throw(_("Price or product discount slabs are required"))
self.validate_applicable_for()
self.METHOD_NAME()
def validate_applicable_for(self):
if self.applicable_for:
applicable_for = frappe.scrub(self.applicable_for)
if not self.get(applicable_for):
msg = f"The field {frappe.bold(self.applicable_for)} is required"
frappe.throw(_(msg))
def METHOD_NAME(self):
if self.is_new():
return
transaction_exists = False
docnames = []
# If user has changed applicable for
if self._doc_before_save.applicable_for == self.applicable_for:
return
docnames = frappe.get_all("Pricing Rule", filters={"promotional_scheme": self.name})
for docname in docnames:
if frappe.db.exists(
"Pricing Rule Detail", {"pricing_rule": docname.name, "docstatus": ("<", 2)}
):
raise_for_transaction_exists(self.name)
if docnames and not transaction_exists:
for docname in docnames:
frappe.delete_doc("Pricing Rule", docname.name)
def on_update(self):
pricing_rules = (
frappe.get_all(
"Pricing Rule",
fields=["promotional_scheme_id", "name", "creation"],
filters={"promotional_scheme": self.name, "applicable_for": self.applicable_for},
order_by="creation asc",
)
or {}
)
self.update_pricing_rules(pricing_rules)
def update_pricing_rules(self, pricing_rules):
rules = {}
count = 0
names = []
for rule in pricing_rules:
names.append(rule.name)
rules[rule.get("promotional_scheme_id")] = names
docs = get_pricing_rules(self, rules)
for doc in docs:
doc.run_method("validate")
if doc.get("__islocal"):
count += 1
doc.insert()
else:
doc.save()
frappe.msgprint(_("Pricing Rule {0} is updated").format(doc.name))
if count:
frappe.msgprint(_("New {0} pricing rules are created").format(count))
def on_trash(self):
for rule in frappe.get_all("Pricing Rule", {"promotional_scheme": self.name}):
frappe.delete_doc("Pricing Rule", rule.name)
def raise_for_transaction_exists(name):
msg = f"""You can't change the {frappe.bold(_('Applicable For'))}
because transactions are present against the Promotional Scheme {frappe.bold(name)}. """
msg += "Kindly disable this Promotional Scheme and create new for new Applicable For."
frappe.throw(_(msg), TransactionExists)
def get_pricing_rules(doc, rules=None):
if rules is None:
rules = {}
new_doc = []
for child_doc, fields in {
"price_discount_slabs": price_discount_fields,
"product_discount_slabs": product_discount_fields,
}.items():
if doc.get(child_doc):
new_doc.extend(_get_pricing_rules(doc, child_doc, fields, rules))
return new_doc
def _get_pricing_rules(doc, child_doc, discount_fields, rules=None):
if rules is None:
rules = {}
new_doc = []
args = get_args_for_pricing_rule(doc)
applicable_for = frappe.scrub(doc.get("applicable_for"))
for idx, d in enumerate(doc.get(child_doc)):
if d.name in rules:
if not args.get(applicable_for):
docname = get_pricing_rule_docname(d)
pr = prepare_pricing_rule(args, doc, child_doc, discount_fields, d, docname)
new_doc.append(pr)
else:
for applicable_for_value in args.get(applicable_for):
docname = get_pricing_rule_docname(d, applicable_for, applicable_for_value)
pr = prepare_pricing_rule(
args, doc, child_doc, discount_fields, d, docname, applicable_for, applicable_for_value
)
new_doc.append(pr)
elif args.get(applicable_for):
applicable_for_values = args.get(applicable_for) or []
for applicable_for_value in applicable_for_values:
pr = prepare_pricing_rule(
args,
doc,
child_doc,
discount_fields,
d,
applicable_for=applicable_for,
value=applicable_for_value,
)
new_doc.append(pr)
else:
pr = prepare_pricing_rule(args, doc, child_doc, discount_fields, d)
new_doc.append(pr)
return new_doc
def get_pricing_rule_docname(
row: dict, applicable_for: str = None, applicable_for_value: str = None
) -> str:
fields = ["promotional_scheme_id", "name"]
filters = {"promotional_scheme_id": row.name}
if applicable_for:
fields.append(applicable_for)
filters[applicable_for] = applicable_for_value
docname = frappe.get_all("Pricing Rule", fields=fields, filters=filters)
return docname[0].name if docname else ""
def prepare_pricing_rule(
args, doc, child_doc, discount_fields, d, docname=None, applicable_for=None, value=None
):
if docname:
pr = frappe.get_doc("Pricing Rule", docname)
else:
pr = frappe.new_doc("Pricing Rule")
pr.title = doc.name
temp_args = args.copy()
if value:
temp_args[applicable_for] = value
return set_args(temp_args, pr, doc, child_doc, discount_fields, d)
def set_args(args, pr, doc, child_doc, discount_fields, child_doc_fields):
pr.update(args)
for field in other_fields + discount_fields:
target_field = field
if target_field in ["min_amount", "max_amount"]:
target_field = "min_amt" if field == "min_amount" else "max_amt"
pr.set(target_field, child_doc_fields.get(field))
pr.promotional_scheme_id = child_doc_fields.name
pr.promotional_scheme = doc.name
pr.disable = child_doc_fields.disable if child_doc_fields.disable else doc.disable
pr.price_or_product_discount = "Price" if child_doc == "price_discount_slabs" else "Product"
for field in ["items", "item_groups", "brands"]:
if doc.get(field):
pr.set(field, [])
apply_on = frappe.scrub(doc.get("apply_on"))
for d in doc.get(field):
pr.append(field, {apply_on: d.get(apply_on), "uom": d.uom})
return pr
def get_args_for_pricing_rule(doc):
args = {"promotional_scheme": doc.name}
applicable_for = frappe.scrub(doc.get("applicable_for"))
for d in pricing_rule_fields:
if d == applicable_for:
items = []
for applicable_for_values in doc.get(applicable_for):
items.append(applicable_for_values.get(applicable_for))
args[d] = items
else:
args[d] = doc.get(d)
return args |
4,660 | tear down | # ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright (c) 2008-2022
# National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
import json
import os
from os.path import join
from filecmp import cmp
import pyomo.common.unittest as unittest
import pyomo.common
from pyomo.common.fileutils import this_file_dir
from pyomo.common.tempfiles import TempfileManager
from pyomo.core import ConcreteModel
from pyomo.opt import ResultsFormat, SolverResults, SolverFactory
currdir = this_file_dir()
deleteFiles = True
old_ignore_time = None
def setUpModule():
global old_ignore_time
old_ignore_time = SolverResults.default_print_options.ignore_time
SolverResults.default_print_options.ignore_time = True
def tearDownModule():
SolverResults.default_print_options.ignore_time = old_ignore_time
cplexamp_available = False
class mock_all(unittest.TestCase):
@classmethod
def setUpClass(cls):
global cplexamp_available
import pyomo.environ
from pyomo.solvers.tests.solvers import test_solver_cases
cplexamp_available = test_solver_cases('cplex', 'nl').available
def setUp(self):
self.do_setup(False)
def do_setup(self, flag):
TempfileManager.push()
if flag:
if not cplexamp_available:
self.skipTest("The 'cplexamp' command is not available")
self.asl = SolverFactory('asl:cplexamp')
else:
self.asl = SolverFactory('_mock_asl:cplexamp')
def METHOD_NAME(self):
TempfileManager.pop(remove=deleteFiles or self.currentTestPassed())
self.asl = None
def test_path(self):
"""Verify that the ASL path is what is expected"""
if type(self.asl) == 'ASL':
self.assertEqual(
self.asl.executable.split(os.sep)[-1],
"ASL" + pyomo.common.executable_extension,
)
def test_solve4(self):
"""Test ASL - test4.nl"""
_log = TempfileManager.create_tempfile(".test_solve4.log")
_out = TempfileManager.create_tempfile(".test_solve4.txt")
results = self.asl.solve(
join(currdir, "test4.nl"), logfile=_log, suffixes=['.*']
)
results.write(filename=_out, times=False, format='json')
_baseline = join(currdir, "test4_asl.txt")
with open(_out, 'r') as out, open(_baseline, 'r') as txt:
self.assertStructuredAlmostEqual(
json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True
)
#
# This test is disabled, but it's useful for interactively exercising
# the option specifications of a solver
#
def Xtest_options(self):
"""Test ASL options behavior"""
results = self.asl.solve(
currdir + "bell3a.mps",
logfile=currdir + "test_options.log",
options="sec=0.1 foo=1 bar='a=b c=d' xx_zz=yy",
suffixes=['.*'],
)
results.write(filename=currdir + "test_options.txt", times=False)
_out, _log = join(currdir, "test_options.txt"), join(currdir, "test4_asl.txt")
self.assertTrue(cmp(_out, _log), msg="Files %s and %s differ" % (_out, _log))
# os.remove(currdir+"test4.sol")
# os.remove(currdir+"test_solve4.log")
def test_error1(self):
"""Bad results format"""
try:
model = ConcreteModel()
results = self.asl.solve(model, format=ResultsFormat.sol, suffixes=['.*'])
self.fail("test_error1")
except ValueError:
pass
def test_error2(self):
"""Bad solve option"""
try:
model = ConcreteModel()
results = self.asl.solve(model, foo="bar")
self.fail("test_error2")
except ValueError:
pass
def test_error3(self):
"""Bad solve option"""
try:
results = self.asl.solve(currdir + "model.py", foo="bar")
self.fail("test_error3")
except ValueError:
pass
class mip_all(mock_all):
def setUp(self):
self.do_setup(True)
if __name__ == "__main__":
deleteFiles = False
unittest.main() |
4,661 | handler | #!/usr/bin/env python2
"""
posix_test.py: Tests for our posix_ module subset.
NOTE: There are more tests in Python-2.7.13/Lib/test/test_posix.py.
Notes on stripping posixmodule.c:
I left in:
- putenv, unsetenv: Isn't it simpler to use these than os.environ? I'm not
sure how it works.
- tcgetpgrp, tcsetpgrp, setsid, getsid: is this for job control?
- times: This is a builtin! It's like 'time' for the shell prosecs itself.
- symlink - useful for writing tools?
- waitpid - because we're using wait
- set*uid, etc. - for container tools?
- kill, killpg - would the kill builtin need these?
- getppid - I think for $PPID
- mkdir, rmdir() -- might be useful for tools
Other notes:
- The shell uses dup2 but not dup?
"""
from __future__ import print_function
import signal
import subprocess
import unittest
import posix_ # module under test
from mycpp.mylib import log
# Taken from build/oil-defs/.../posix_methods.def
FUNCS = [
"access",
"chdir",
"getcwd",
"listdir",
"lstat",
"readlink",
"stat",
"umask",
"uname",
"_exit",
"execv",
"execve",
"fork",
"geteuid",
"getpid",
"getuid",
"wait",
"open",
"close",
"dup2",
"read",
"write",
"fdopen",
"isatty",
"pipe",
"strerror",
"WIFSIGNALED",
"WIFEXITED",
"WEXITSTATUS",
"WTERMSIG",
# Additional names found by grepping
'X_OK',
'R_OK',
'W_OK',
'O_APPEND',
'O_CREAT',
'O_RDONLY',
'O_RDWR',
'O_TRUNC',
'O_WRONLY',
]
class PosixTest(unittest.TestCase):
def testFoo(self):
print(posix_.getcwd())
# Testing this because I removed a lot of #ifdef
entries = posix_.listdir('.')
self.assert_('doc' in entries)
def testFunctionsExist(self):
for name in FUNCS:
func = getattr(posix_, name)
print(func)
def testEmptyReadAndWrite(self):
# Regression for bug where this would hang
posix_.read(0, 0)
posix_.write(1, '')
def testRead(self):
if posix_.environ.get('EINTR_TEST'):
# Now we can do kill -TERM PID can get EINTR.
# Or Ctrl-C for KeyboardInterrupt
signal.signal(signal.SIGTERM, METHOD_NAME)
log('Hanging on read in pid %d', posix_.getpid())
posix_.read(0, 1)
def testWait(self):
if posix_.environ.get('EINTR_TEST'):
# Now we can do kill -TERM PID can get EINTR.
signal.signal(signal.SIGTERM, METHOD_NAME)
p = subprocess.Popen(['sleep', '5'])
log('started sleep pid %d', p.pid)
log('Hanging on wait in pid %d', posix_.getpid())
posix_.wait()
def testWaitpid(self):
if posix_.environ.get('EINTR_TEST'):
# Now we can do kill -TERM PID can get EINTR.
signal.signal(signal.SIGTERM, METHOD_NAME)
p = subprocess.Popen(['sleep', '5'])
log('started sleep pid %d', p.pid)
log('Hanging on waitpid in pid %d', posix_.getpid())
posix_.waitpid(-1, 0)
def testWrite(self):
if posix_.environ.get('EINTR_TEST'):
signal.signal(signal.SIGTERM, METHOD_NAME)
r, w = posix_.pipe()
log('Hanging on write in pid %d', posix_.getpid())
# 1 byte bigger than pipe size
n = posix_.write(w, 'x'*65537)
log('1: Wrote %d bytes', n)
# write returns early when a signal interrupts it, and we read at least
# one byte! We do NOT get EINTR>
# On the second try, it didn't write anything, and we get EINTR!
log('Second try (pid %d)', posix_.getpid())
n = posix_.write(w, 'x'*65537)
log('2: Wrote %d bytes', n)
def testPrint(self):
# Conclusion: print CAN raise IOError with EINTR.
if posix_.environ.get('EINTR_TEST'):
signal.signal(signal.SIGTERM, METHOD_NAME)
r, w = posix_.pipe()
log('Hanging on write in pid %d', posix_.getpid())
f = posix_.fdopen(w, 'w')
# 1 byte bigger than pipe size
print('x'*65537, file=f)
log('1: done')
# write returns early when a signal interrupts it, and we read at least
# one byte! We do NOT get EINTR>
# On the second try, it didn't write anything, and we get EINTR!
log('Second try (pid %d)', posix_.getpid())
print('x'*65537, file=f)
log('2: done')
def METHOD_NAME(x, y):
log('Got signal %s %s', x, y)
if __name__ == '__main__':
unittest.main() |
4,662 | rclick menu | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import BoolProperty, FloatProperty, EnumProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, list_match_func, numpy_list_match_modes, numpy_list_match_func
from sverchok.utils.sv_itertools import recurse_f_level_control
import numpy as np
def map_range(params, constant, matching_f):
result = []
clamp, auto_limits, match_mode, out_numpy = constant
params = matching_f(params)
numpy_match = numpy_list_match_func[match_mode]
for props in zip(*params):
np_props = [np.array(prop) for prop in props]
val, old_min, old_max, new_min, new_max = numpy_match(np_props)
if auto_limits:
old_min = np.min(val)
old_max = np.max(val)
old_difference = old_max - old_min
res = new_min + (val - old_min) * ((new_max - new_min)/(old_difference))
if res.shape == old_min.shape:
nan_mask = old_difference == 0
res[nan_mask] = old_min[nan_mask]
else:
res[old_difference == 0] = old_min
if clamp and not auto_limits:
mask = new_min < new_max
invert_mask = np.invert(mask)
res[mask] = np.clip(res[mask], new_min[mask], new_max[mask])
res[invert_mask] = np.clip(res[invert_mask], new_max[invert_mask], new_min[invert_mask])
result.append(res if out_numpy else res.tolist())
return result
class SvMapRangeNode(SverchCustomTreeNode, bpy.types.Node):
"""
Triggers: Map a range from one to another
Tooltip: Map input list setting input range limits and output range limits\n\tList Limits (On/Off)\n\tClamps (On/Off)\n\tValue: [.0]\n\tOld Min: [.0]\n\tOld Max: [1.]\n\tNew Min: [.0]\n\tNew Max: [10.]
"""
bl_idname = 'SvMapRangeNode'
bl_label = 'Map Range'
bl_icon = 'MOD_OFFSET'
def update_sockets(self, context):
if not self.inputs["Old Min"].is_linked:
self.inputs["Old Min"].hide_safe = self.auto_limits
if not self.inputs["Old Max"].is_linked:
self.inputs["Old Max"].hide_safe = self.auto_limits
updateNode(self, context)
old_min: FloatProperty(
name='Old Min', description='Old Min',
default=0, update=updateNode)
old_max: FloatProperty(
name='Old Max', description='Old Max',
default=1, update=updateNode)
new_min: FloatProperty(
name='New Min', description='New Min',
default=0, update=updateNode)
new_max: FloatProperty(
name='New Max', description='New Max',
default=10, update=updateNode)
value: FloatProperty(
name='Value', description='New Max',
default=.5, update=updateNode)
clamp: BoolProperty(
name='Clamp', description='clamp the values if they are outside the range',
default=True, update=updateNode)
auto_limits: BoolProperty(
name='List limits', description='Use old min and old max from list',
default=False, update=update_sockets)
list_match: EnumProperty(
name="List Match",
description="Behavior on different list lengths",
items=numpy_list_match_modes, default="REPEAT",
update=updateNode)
output_numpy: BoolProperty(
name='Output NumPy',
description='Output NumPy arrays',
default=False, update=updateNode)
def sv_init(self, context):
self.inputs.new('SvStringsSocket', "Value").prop_name = 'value'
self.inputs.new('SvStringsSocket', "Old Min").prop_name = 'old_min'
self.inputs.new('SvStringsSocket', "Old Max").prop_name = 'old_max'
self.inputs.new('SvStringsSocket', "New Min").prop_name = 'new_min'
self.inputs.new('SvStringsSocket', "New Max").prop_name = 'new_max'
self.outputs.new('SvStringsSocket', "Value")
def draw_buttons(self, context, layout):
layout.prop(self, "auto_limits")
if not self.auto_limits:
layout.prop(self, "clamp")
def draw_buttons_ext(self, ctx, layout):
layout.prop(self, "auto_limits")
if not self.auto_limits:
layout.prop(self, "clamp")
layout.prop(self, "list_match", expand=False)
layout.prop(self, "output_numpy", expand=False)
def METHOD_NAME(self, context, layout):
layout.prop(self, "auto_limits")
if not self.auto_limits:
layout.prop(self, "clamp")
layout.prop_menu_enum(self, "list_match", text="List Match")
layout.prop(self, "output_numpy", expand=False)
def process(self):
inputs = self.inputs
outputs = self.outputs
# no outputs, end early.
if not outputs['Value'].is_linked:
return
params = [si.sv_get(default=[[]], deepcopy=False) for si in inputs]
matching_f = list_match_func[self.list_match]
desired_levels = [2 for p in params]
ops = [self.clamp, self.auto_limits, self.list_match, self.output_numpy]
result = recurse_f_level_control(params, ops, map_range, matching_f, desired_levels)
self.outputs[0].sv_set(result)
def register():
bpy.utils.register_class(SvMapRangeNode)
def unregister():
bpy.utils.unregister_class(SvMapRangeNode) |
4,663 | comma list | #! /usr/bin/env python
# Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.stats import sem
import matplotlib.pyplot as plt
from os import path, listdir
from itertools import product
from map_simulator.utils import mkdir_p
def METHOD_NAME(list_string):
return list_string.strip.split(',')
if __name__ == "__main__":
import argparse
def_data_path = path.join("~", "Desktop", "Experiments", "MethodComparison", "err")
def_out_path = path.join(def_data_path, "hist")
file_filters = {
'n_moves': {'arg_abr': 'n', 'desc': 'number of moves', 'index': 0, 'values': []},
'm_model': {'arg_abr': 'm', 'desc': 'map models', 'index': 1, 'values': []},
'p_weight': {'arg_abr': 'w', 'desc': 'particle weighting methods', 'index': 2, 'values': []},
'imp_pose': {'arg_abr': 'i', 'desc': 'pose improve methods', 'index': 3, 'values': []},
'err_type': {'arg_abr': 'e', 'desc': 'error types', 'index': 4, 'values': []},
'test_env': {'arg_abr': 't', 'desc': 'test environments', 'index': 5, 'values': []}
}
chc_file_filters = sorted(list(file_filters.keys()), key=lambda x: file_filters[x]['index'])
parser = argparse.ArgumentParser(description='Read the data collected into csv files in a given directory \
and plot histograms.')
parser.add_argument('-d', '--dir', action='store', type=str, default=def_data_path,
help='Path of the directory where the CSV error files are stored.')
parser.add_argument('-x', '--extension', action='store', type=str, default='csv',
help='Data file extension. [Default: csv].')
parser.add_argument('-o', '--out_dir', action='store', type=str, default=def_out_path,
help='Output Directory where histograms will be saved.')
# Combine histograms in same plot by this field
parser.add_argument('-c', '--combine_by', action='store', type=str, choices=chc_file_filters,
default='m_model', help='Combine histograms into same plot by field. [Default: "map_model".]')
# Filter files by
for file_filter in chc_file_filters:
file_filter_dict = file_filters[file_filter]
short_optn = '-{}'.format(file_filter_dict['arg_abr'])
long_optn = '--{}'.format(file_filter)
desc = 'Comma separated list of {desc}. If None, then all different {desc}' \
'in the directory will be used. [Default None].'.format(desc=file_filter_dict['desc'])
parser.add_argument(short_optn, long_optn, action='store', type=METHOD_NAME, default=None, help=desc)
args = parser.parse_args()
arg_dict = vars(args)
data_path = path.expandvars(path.expanduser(args.dir))
out_path = path.expandvars(path.expanduser(args.out_dir))
# Available data options in data path
avl_options = {}
# If any of the filters was set to None, then check the available types from the files in the path.
get_types_from_file = False
for file_filter in chc_file_filters:
if arg_dict[file_filter] is None:
get_types_from_file = True
break
if get_types_from_file:
tmp_ext = ".{}".format(args.extension)
path_files = listdir(data_path)
path_files = [f[:f.find(tmp_ext)] for f in path_files if tmp_ext in f]
path_files = [f.split("_") for f in path_files]
path_files = (zip(*path_files))
path_files = [set(c) for c in path_files]
for file_filter in chc_file_filters:
avl_options[file_filter] = path_files[file_filters[file_filter]['index']]
for file_filter in chc_file_filters:
if arg_dict[file_filter] is None:
file_filters[file_filter]['values'] = sorted(list(avl_options[file_filter]))
else:
file_filters[file_filter]['values'] = sorted(list(arg_dict[file_filter]))
if not path.exists(out_path):
mkdir_p(out_path)
FS = ","
LS = "\n"
extension = "csv"
data_header = ["Experiment_Num"]
data = []
max_experiments = -1
plt.ioff()
combination_filters = sorted(file_filters.keys(), key=lambda x: file_filters[x]['index'])
combination_filters.remove(args.combine_by)
file_combinations = product(*[file_filters[k]['values'] for k in combination_filters])
file_combination_orders = [file_filters[k]['index'] for k in combination_filters]
file_combination_orders.append(file_filters[args.combine_by]['index'])
combine_options = file_filters[args.combine_by]['values']
print("Saving figures to: " + out_path)
for file_comb in file_combinations:
hist_plots = []
for combine_by in combine_options:
file_name_list = [f for f in file_comb]
file_name_list.append(combine_by)
file_name_list = sorted(zip(file_combination_orders, file_name_list))
file_name_list = [f[1] for f in file_name_list]
file_name = "_".join(file_name_list)
err_file_path = path.join(data_path, file_name + '.' + extension)
data_header.append(file_name)
err = []
no_experiments = 0
with open(err_file_path, 'r') as f:
for line in f:
err.append(float(line.split(FS)[-1]))
no_experiments += 1
hist_plots.append({'lbl': combine_by, 'dat': err, 'cnt': no_experiments})
max_experiments = max(max_experiments, no_experiments)
data.append(err)
fig_name = "_".join(file_comb)
print("Plotting figure: " + fig_name)
plt.figure()
err_data = [e['dat'] for e in hist_plots]
bins = np.histogram(np.hstack(err_data), bins=20)[1]
for hist in hist_plots:
label = '{} ({} exp.)'.format(hist['lbl'], hist['cnt'])
plt.hist(hist['dat'], bins, label=label, density=True, alpha=1.0/len(combine_options))
plt.legend()
plt.title(fig_name)
plt.savefig(path.join(out_path, fig_name + "_hist.svg"))
plt.close()
data_file_path = path.join(out_path, "hist_data.dat")
print("Saving data file to: " + data_file_path)
lines = [FS.join(data_header)]
mean_line = ["Means"]
var_line = ["Variances"]
sem_line = ["Standard_Error_of_the_Mean"]
for e_data in data:
mean_line.append(str(np.mean(e_data)))
var_line.append(str(np.var(e_data)))
sem_line.append(str(sem(e_data)))
lines.append(FS.join(mean_line))
lines.append(FS.join(var_line))
lines.append(FS.join(sem_line))
for i in range(max_experiments):
line = [str(i + 1)]
for exp_data in data:
if i < len(exp_data):
value = str(exp_data[i])
else:
value = ""
line.append(value)
lines.append(FS.join(line))
with open(data_file_path, "w") as f:
for line in lines:
f.write(line + LS) |
4,664 | test transpiling | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
noise_model_inserter module tests
"""
import unittest
from qiskit_aer.noise import NoiseModel
from qiskit_aer.noise.errors.standard_errors import pauli_error
from qiskit_aer.utils import insert_noise
from test.terra.common import QiskitAerTestCase
from qiskit import QuantumRegister, QuantumCircuit, transpile
from qiskit.quantum_info import SuperOp
class TestNoiseInserter(QiskitAerTestCase):
def test_no_noise(self):
qr = QuantumRegister(3, "qr")
circuit = QuantumCircuit(qr)
circuit.x(qr[0])
circuit.y(qr[1])
circuit.z(qr[2])
target_circuit = QuantumCircuit(qr)
target_circuit.x(qr[0])
target_circuit.y(qr[1])
target_circuit.z(qr[2])
noise_model = NoiseModel() # empty
result_circuit = insert_noise(circuit, noise_model)
self.assertEqual(SuperOp(target_circuit), SuperOp(result_circuit))
def test_all_qubit_quantum_errors(self):
qr = QuantumRegister(3, "qr")
circuit = QuantumCircuit(qr)
circuit.x(qr[0])
circuit.y(qr[1])
circuit.z(qr[2])
error_x = pauli_error([("Y", 0.25), ("I", 0.75)])
error_y = pauli_error([("X", 0.35), ("Z", 0.65)])
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error_x, "x")
noise_model.add_all_qubit_quantum_error(error_y, "y")
target_circuit = QuantumCircuit(qr)
target_circuit.x(qr[0])
target_circuit.append(error_x.to_instruction(), [qr[0]])
target_circuit.y(qr[1])
target_circuit.append(error_y.to_instruction(), [qr[1]])
target_circuit.z(qr[2])
result_circuit = insert_noise(circuit, noise_model)
self.assertEqual(target_circuit, result_circuit)
def test_local_quantum_errors(self):
qr = QuantumRegister(3, "qr")
circuit = QuantumCircuit(qr)
circuit.x(qr[0])
circuit.x(qr[1])
circuit.y(qr[2])
error_x = pauli_error([("Y", 0.25), ("I", 0.75)])
error_y = pauli_error([("X", 0.35), ("Z", 0.65)])
noise_model = NoiseModel()
noise_model.add_quantum_error(error_x, "x", [0])
noise_model.add_quantum_error(error_y, "y", [2])
target_circuit = QuantumCircuit(qr)
target_circuit.x(qr[0])
target_circuit.append(error_x.to_instruction(), [qr[0]])
target_circuit.x(qr[1])
target_circuit.y(qr[2])
target_circuit.append(error_y.to_instruction(), [qr[2]])
result_circuit = insert_noise(circuit, noise_model)
self.assertEqual(SuperOp(target_circuit), SuperOp(result_circuit))
def METHOD_NAME(self):
qr = QuantumRegister(3, "qr")
circuit = QuantumCircuit(qr)
circuit.x(qr[0])
circuit.y(qr[1])
circuit.z(qr[2])
error_x = pauli_error([("Y", 0.25), ("I", 0.75)])
error_y = pauli_error([("X", 0.35), ("Z", 0.65)])
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error_x, "x")
noise_model.add_all_qubit_quantum_error(error_y, "y")
target_circuit = QuantumCircuit(qr)
target_circuit.x(qr[0])
target_circuit.append(error_x, [qr[0]])
target_circuit.y(qr[1])
target_circuit.append(error_y, [qr[1]])
target_circuit.z(qr[2])
target_basis = ["quantum_channel"] + noise_model.basis_gates
target_circuit = transpile(target_circuit, basis_gates=target_basis)
result_circuit = insert_noise(circuit, noise_model, transpile=True)
self.assertEqual(SuperOp(target_circuit), SuperOp(result_circuit))
def test_multiple_inputs(self):
qr = QuantumRegister(1, "qr")
circuit1 = QuantumCircuit(qr)
circuit1.x(qr[0])
circuit2 = QuantumCircuit(qr)
circuit2.y(qr[0])
circuits_list = [circuit1, circuit2]
circuits_tuple = (circuit1, circuit2)
noise_model = NoiseModel()
error_x = pauli_error([("Y", 0.25), ("I", 0.75)])
error_y = pauli_error([("X", 0.35), ("Z", 0.65)])
noise_model.add_all_qubit_quantum_error(error_x, "x")
noise_model.add_all_qubit_quantum_error(error_y, "y")
target_circuit1 = QuantumCircuit(qr)
target_circuit1.x(qr[0])
target_circuit1.append(error_x.to_instruction(), [qr[0]])
target_circuit2 = QuantumCircuit(qr)
target_circuit2.y(qr[0])
target_circuit2.append(error_y.to_instruction(), [qr[0]])
target_circuits = [target_circuit1, target_circuit2]
result_circuits = insert_noise(circuits_list, noise_model)
self.assertEqual(target_circuits, result_circuits)
targets = [SuperOp(i) for i in [target_circuit1, target_circuit2]]
results = [SuperOp(i) for i in insert_noise(circuits_tuple, noise_model)]
self.assertEqual(targets, results)
if __name__ == "__main__":
unittest.main() |
4,665 | job view cb | from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.Harddisk import harddiskmanager
from Components.MenuList import MenuList
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.Task import job_manager
from Components.config import config
from Components.Sources.StaticText import StaticText
from Screens.MessageBox import MessageBox
from Tools.BoundFunction import boundFunction
import Screens.InfoBar
class HarddiskSetup(Screen):
def __init__(self, session, hdd, action, text, question):
Screen.__init__(self, session)
self.setTitle(text)
self.action = action
self.question = question
self.curentservice = None
self["model"] = Label(_("Model: ") + hdd.model())
self["capacity"] = Label(_("Capacity: ") + hdd.capacity())
self["bus"] = Label(_("Bus: ") + hdd.bus())
self["key_red"] = Label(_("Cancel"))
self["key_green"] = Label(text) # text can be either "Initialize" or "Check"
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.hddQuestion,
"cancel": self.close
})
self["shortcuts"] = ActionMap(["ShortcutActions"],
{
"red": self.close,
"green": self.hddQuestion
})
def hddQuestion(self, answer=False):
print('[HarddiskSetup] answer:', answer)
if Screens.InfoBar.InfoBar.instance.timeshiftEnabled():
message = self.question + "\n\n" + _("You seem to be in timeshft, the service will briefly stop as timeshift stops.")
message += '\n' + _("Do you want to continue?")
self.session.openWithCallback(self.stopTimeshift, MessageBox, message)
else:
message = self.question + "\n" + _("You can continue watching TV while this is running.")
self.session.openWithCallback(self.hddConfirmed, MessageBox, message)
def stopTimeshift(self, confirmed):
if confirmed:
self.curentservice = self.session.nav.getCurrentlyPlayingServiceReference()
self.session.nav.stopService()
Screens.InfoBar.InfoBar.instance.stopTimeshiftcheckTimeshiftRunningCallback(True)
self.hddConfirmed(True)
def hddConfirmed(self, confirmed):
if not confirmed:
return
try:
job_manager.AddJob(self.action())
for job in job_manager.getPendingJobs():
if job.name in (_("Initializing storage device..."), _("Checking filesystem..."), _("Converting ext3 to ext4...")):
self.showJobView(job)
break
except Exception as ex:
self.session.open(MessageBox, str(ex), type=MessageBox.TYPE_ERROR, timeout=10)
if self.curentservice:
self.session.nav.playService(self.curentservice)
self.close()
def showJobView(self, job):
from Screens.TaskView import JobView
job_manager.in_background = False
self.session.openWithCallback(self.METHOD_NAME, JobView, job, cancelable=False, afterEventChangeable=False, afterEvent="close")
def METHOD_NAME(self, in_background):
job_manager.in_background = in_background
class HarddiskSelection(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.setTitle(_("Initialize Devices"))
self.skinName = "HarddiskSelection" # For derived classes
if harddiskmanager.HDDCount() == 0:
tlist = [(_("no storage devices found"), 0)]
self["hddlist"] = MenuList(tlist)
else:
self["hddlist"] = MenuList(harddiskmanager.HDDList())
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.okbuttonClick,
"cancel": self.close
})
def doIt(self, selection):
self.session.openWithCallback(self.close, HarddiskSetup, selection,
action=selection.createInitializeJob,
text=_("Initialize"),
question=_("Do you really want to initialize this device?\nAll the data on the device will be lost!"))
def okbuttonClick(self):
selection = self["hddlist"].getCurrent()
if selection[1] != 0:
self.doIt(selection[1])
self.close(True)
# This is actually just HarddiskSelection but with correct type
class HarddiskFsckSelection(HarddiskSelection):
def __init__(self, session):
HarddiskSelection.__init__(self, session)
self.setTitle(_("Filesystem Check"))
self.skinName = "HarddiskSelection"
def doIt(self, selection):
self.session.openWithCallback(self.close, HarddiskSetup, selection,
action=selection.createCheckJob,
text=_("Check"),
question=_("Do you really want to check the filesystem?\nThis could take a long time!")) |
4,666 | test prediction scores | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import sys
import unittest
from os import path
# noinspection PyProtectedMember
from numpy.testing import assert_allclose
from numpy.testing import assert_array_less
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from scipy.io import loadmat
from scipy.stats import rankdata
from sklearn.base import clone
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.utils.validation import check_X_y
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pyod.models.xgbod import XGBOD
from pyod.utils.data import generate_data
class TestXGBOD(unittest.TestCase):
def setUp(self):
# Define data file and read X and y
# Generate some data if the source data is missing
this_directory = path.abspath(path.dirname(__file__))
mat_file = 'pima.mat'
try:
mat = loadmat(path.join(*[this_directory, 'data', mat_file]))
except TypeError:
print('{data_file} does not exist. Use generated data'.format(
data_file=mat_file))
X, y = generate_data(train_only=True) # load data
except IOError:
print('{data_file} does not exist. Use generated data'.format(
data_file=mat_file))
X, y = generate_data(train_only=True) # load data
else:
X = mat['X']
y = mat['y'].ravel()
X, y = check_X_y(X, y)
self.X_train, self.X_test, self.y_train, self.y_test = \
train_test_split(X, y, test_size=0.4, random_state=42)
self.clf = XGBOD(random_state=42)
self.clf.fit(self.X_train, self.y_train)
self.roc_floor = 0.75
def test_parameters(self):
assert (hasattr(self.clf, 'clf_') and
self.clf.decision_scores_ is not None)
assert (hasattr(self.clf, '_scalar') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'n_detector_') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'X_train_add_') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'decision_scores_') and
self.clf.decision_scores_ is not None)
assert (hasattr(self.clf, 'labels_') and
self.clf.labels_ is not None)
def test_train_scores(self):
assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])
def METHOD_NAME(self):
pred_scores = self.clf.decision_function(self.X_test)
# check score shapes
assert_equal(pred_scores.shape[0], self.X_test.shape[0])
# check performance
assert (roc_auc_score(self.y_test, pred_scores) >= self.roc_floor)
def test_prediction_labels(self):
pred_labels = self.clf.predict(self.X_test)
assert_equal(pred_labels.shape, self.y_test.shape)
def test_prediction_proba(self):
pred_proba = self.clf.predict_proba(self.X_test)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
# def test_prediction_proba_linear(self):
# pred_proba = self.clf.predict_proba(self.X_test, method='linear')
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# def test_prediction_proba_unify(self):
# pred_proba = self.clf.predict_proba(self.X_test, method='unify')
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# def test_prediction_proba_parameter(self):
# with assert_raises(ValueError):
# self.clf.predict_proba(self.X_test, method='something')
# def test_prediction_labels_confidence(self):
# pred_labels, confidence = self.clf.predict(self.X_test,
# return_confidence=True)
# assert_equal(pred_labels.shape, self.y_test.shape)
# assert_equal(confidence.shape, self.y_test.shape)
# assert (confidence.min() >= 0)
# assert (confidence.max() <= 1)
#
# def test_prediction_proba_linear_confidence(self):
# pred_proba, confidence = self.clf.predict_proba(self.X_test,
# method='linear',
# return_confidence=True)
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# assert_equal(confidence.shape, self.y_test.shape)
# assert (confidence.min() >= 0)
# assert (confidence.max() <= 1)
def test_fit_predict(self):
pred_labels = self.clf.fit_predict(self.X_train, self.y_train)
assert_equal(pred_labels.shape, self.y_train.shape)
def test_fit_predict_score(self):
self.clf.fit_predict_score(self.X_test, self.y_test)
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='roc_auc_score')
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='prc_n_score')
with assert_raises(NotImplementedError):
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='something')
def test_predict_rank(self):
pred_socres = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test)
print(pred_ranks)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), rtol=4)
assert_array_less(pred_ranks, self.X_train.shape[0] + 1)
assert_array_less(-0.1, pred_ranks)
def test_predict_rank_normalized(self):
pred_socres = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test, normalized=True)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), rtol=4)
assert_array_less(pred_ranks, 1.01)
assert_array_less(-0.1, pred_ranks)
def test_model_clone(self):
clone_clf = clone(self.clf)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main() |
4,667 | torch sparse mm | import warnings
import backend as F
import pytest
import torch
from dgl.sparse import bspmm, diag, from_coo, val_like
from dgl.sparse.matmul import matmul
from .utils import (
clone_detach_and_grad,
dense_mask,
rand_coo,
rand_csc,
rand_csr,
rand_stride,
sparse_matrix_to_dense,
sparse_matrix_to_torch_sparse,
)
def METHOD_NAME(torch_A1, torch_A2):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
return torch.sparse.mm(torch_A1, torch_A2)
@pytest.mark.parametrize("create_func", [rand_coo, rand_csr, rand_csc])
@pytest.mark.parametrize("shape", [(2, 7), (5, 2)])
@pytest.mark.parametrize("nnz", [1, 10])
@pytest.mark.parametrize("out_dim", [None, 10])
def test_spmm(create_func, shape, nnz, out_dim):
dev = F.ctx()
A = create_func(shape, nnz, dev)
if out_dim is not None:
X = torch.randn(shape[1], out_dim, requires_grad=True, device=dev)
else:
X = torch.randn(shape[1], requires_grad=True, device=dev)
X = rand_stride(X)
sparse_result = matmul(A, X)
grad = torch.randn_like(sparse_result)
sparse_result.backward(grad)
adj = sparse_matrix_to_dense(A)
XX = clone_detach_and_grad(X)
dense_result = torch.matmul(adj, XX)
if out_dim is None:
dense_result = dense_result.view(-1)
dense_result.backward(grad)
assert torch.allclose(sparse_result, dense_result, atol=1e-05)
assert torch.allclose(X.grad, XX.grad, atol=1e-05)
assert torch.allclose(
dense_mask(adj.grad, A),
sparse_matrix_to_dense(val_like(A, A.val.grad)),
atol=1e-05,
)
@pytest.mark.parametrize("create_func", [rand_coo, rand_csr, rand_csc])
@pytest.mark.parametrize("shape", [(2, 7), (5, 2)])
@pytest.mark.parametrize("nnz", [1, 10])
def test_bspmm(create_func, shape, nnz):
dev = F.ctx()
A = create_func(shape, nnz, dev, 2)
X = torch.randn(shape[1], 10, 2, requires_grad=True, device=dev)
X = rand_stride(X)
sparse_result = matmul(A, X)
grad = torch.randn_like(sparse_result)
sparse_result.backward(grad)
XX = clone_detach_and_grad(X)
torch_A = A.to_dense().clone().detach().requires_grad_()
torch_result = torch_A.permute(2, 0, 1) @ XX.permute(2, 0, 1)
torch_result.backward(grad.permute(2, 0, 1))
assert torch.allclose(
sparse_result.permute(2, 0, 1), torch_result, atol=1e-05
)
assert torch.allclose(X.grad, XX.grad, atol=1e-05)
assert torch.allclose(
dense_mask(torch_A.grad, A),
sparse_matrix_to_dense(val_like(A, A.val.grad)),
atol=1e-05,
)
@pytest.mark.parametrize("create_func1", [rand_coo, rand_csr, rand_csc])
@pytest.mark.parametrize("create_func2", [rand_coo, rand_csr, rand_csc])
@pytest.mark.parametrize("shape_n_m", [(5, 5), (5, 6)])
@pytest.mark.parametrize("shape_k", [3, 4])
@pytest.mark.parametrize("nnz1", [1, 10])
@pytest.mark.parametrize("nnz2", [1, 10])
def test_spspmm(create_func1, create_func2, shape_n_m, shape_k, nnz1, nnz2):
dev = F.ctx()
shape1 = shape_n_m
shape2 = (shape_n_m[1], shape_k)
A1 = create_func1(shape1, nnz1, dev)
A2 = create_func2(shape2, nnz2, dev)
A3 = matmul(A1, A2)
grad = torch.randn_like(A3.val)
A3.val.backward(grad)
torch_A1 = sparse_matrix_to_torch_sparse(A1)
torch_A2 = sparse_matrix_to_torch_sparse(A2)
torch_A3 = METHOD_NAME(torch_A1, torch_A2)
torch_A3_grad = sparse_matrix_to_torch_sparse(A3, grad)
torch_A3.backward(torch_A3_grad)
with torch.no_grad():
assert torch.allclose(A3.to_dense(), torch_A3.to_dense(), atol=1e-05)
assert torch.allclose(
val_like(A1, A1.val.grad).to_dense(),
torch_A1.grad.to_dense(),
atol=1e-05,
)
assert torch.allclose(
val_like(A2, A2.val.grad).to_dense(),
torch_A2.grad.to_dense(),
atol=1e-05,
)
def test_spspmm_duplicate():
dev = F.ctx()
row = torch.tensor([1, 0, 0, 0, 1]).to(dev)
col = torch.tensor([1, 1, 1, 2, 2]).to(dev)
val = torch.randn(len(row)).to(dev)
shape = (4, 4)
A1 = from_coo(row, col, val, shape)
row = torch.tensor([1, 0, 0, 1]).to(dev)
col = torch.tensor([1, 1, 2, 2]).to(dev)
val = torch.randn(len(row)).to(dev)
shape = (4, 4)
A2 = from_coo(row, col, val, shape)
try:
matmul(A1, A2)
except:
pass
else:
assert False, "Should raise error."
try:
matmul(A2, A1)
except:
pass
else:
assert False, "Should raise error."
@pytest.mark.parametrize("create_func", [rand_coo, rand_csr, rand_csc])
@pytest.mark.parametrize("sparse_shape", [(5, 5), (5, 6)])
@pytest.mark.parametrize("nnz", [1, 10])
def test_sparse_diag_mm(create_func, sparse_shape, nnz):
dev = F.ctx()
diag_shape = sparse_shape[1], sparse_shape[1]
A = create_func(sparse_shape, nnz, dev)
diag_val = torch.randn(sparse_shape[1], device=dev, requires_grad=True)
D = diag(diag_val, diag_shape)
B = matmul(A, D)
grad = torch.randn_like(B.val)
B.val.backward(grad)
torch_A = sparse_matrix_to_torch_sparse(A)
torch_D = sparse_matrix_to_torch_sparse(D)
torch_B = METHOD_NAME(torch_A, torch_D)
torch_B_grad = sparse_matrix_to_torch_sparse(B, grad)
torch_B.backward(torch_B_grad)
with torch.no_grad():
assert torch.allclose(B.to_dense(), torch_B.to_dense(), atol=1e-05)
assert torch.allclose(
val_like(A, A.val.grad).to_dense(),
torch_A.grad.to_dense(),
atol=1e-05,
)
assert torch.allclose(
diag(D.val.grad, D.shape).to_dense(),
torch_D.grad.to_dense(),
atol=1e-05,
)
@pytest.mark.parametrize("create_func", [rand_coo, rand_csr, rand_csc])
@pytest.mark.parametrize("sparse_shape", [(5, 5), (5, 6)])
@pytest.mark.parametrize("nnz", [1, 10])
def test_diag_sparse_mm(create_func, sparse_shape, nnz):
dev = F.ctx()
diag_shape = sparse_shape[0], sparse_shape[0]
A = create_func(sparse_shape, nnz, dev)
diag_val = torch.randn(sparse_shape[0], device=dev, requires_grad=True)
D = diag(diag_val, diag_shape)
B = matmul(D, A)
grad = torch.randn_like(B.val)
B.val.backward(grad)
torch_A = sparse_matrix_to_torch_sparse(A)
torch_D = sparse_matrix_to_torch_sparse(D)
torch_B = METHOD_NAME(torch_D, torch_A)
torch_B_grad = sparse_matrix_to_torch_sparse(B, grad)
torch_B.backward(torch_B_grad)
with torch.no_grad():
assert torch.allclose(B.to_dense(), torch_B.to_dense(), atol=1e-05)
assert torch.allclose(
val_like(A, A.val.grad).to_dense(),
torch_A.grad.to_dense(),
atol=1e-05,
)
assert torch.allclose(
diag(D.val.grad, D.shape).to_dense(),
torch_D.grad.to_dense(),
atol=1e-05,
) |
4,668 | format dialogue with target | import logging
import time
import os
import random
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from flask import Flask, request, jsonify
from healthcheck import HealthCheck
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(dsn=os.getenv("SENTRY_DSN"), integrations=[FlaskIntegration()])
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_NAME_OR_PATH = os.environ.get(
"PRETRAINED_MODEL_NAME_OR_PATH", "DeepPavlov/rudialogpt3_medium_based_on_gpt2_v2"
)
logger.info(f"PRETRAINED_MODEL_NAME_OR_PATH = {PRETRAINED_MODEL_NAME_OR_PATH}")
cuda = torch.cuda.is_available()
if cuda:
torch.cuda.set_device(0)
device = "cuda"
else:
device = "cpu"
try:
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME_OR_PATH)
model = AutoModelForCausalLM.from_pretrained(PRETRAINED_MODEL_NAME_OR_PATH).to(device)
model.eval()
logger.info("dialogpt model is ready")
except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(e)
raise e
logger.info(f"dialogpt is set to run on {device}")
SHORT_UTTERANCE_PROBA = 0.7
MAX_HISTORY_DEPTH = os.environ.get("MAX_HISTORY_DEPTH")
MAX_HISTORY_DEPTH = int(MAX_HISTORY_DEPTH) if MAX_HISTORY_DEPTH else MAX_HISTORY_DEPTH
params_default = {
"max_length": 128,
"no_repeat_ngram_size": 3,
"do_sample": True,
"top_k": 20,
"top_p": 0.9,
"temperature": 0.7,
"num_return_sequences": 3,
"device": device,
"is_always_use_length": True,
}
def inputs_by_length(input_: dict, length_rep=None):
if length_rep is None:
length_rep = len(tokenizer.encode(input_["text"]))
if params_default["is_always_use_length"]:
if length_rep <= 15:
length_param = "1"
elif length_rep <= 50:
length_param = "2"
elif length_rep <= 256:
length_param = "3"
else:
length_param = "-"
else:
length_param = "-"
return f"|{input_['speaker']}|{length_param}|{input_['text']}"
def METHOD_NAME(context, context_lengths, context_depth=3, encode=False, tokenizer=None):
"""
THE LAST UTTERANCE IN THE CONTEXT IS TARGET BOT'S UTTERANCE
context: List(dict)
context = [
{"text": "speaker": "human"},
{"text": "hi there", "speaker": "bot"},
{"text": "how are you", "speaker": "human"},
{"text": "great how are u", "speaker": "bot"},
]
OR
context = [
"hi",
"hi there",
"how are you",
"great how are u"
]
"""
if len(context) > 0 and isinstance(context[0], str):
context_len = len(context)
# the last uttr is from BOT
inputs = [{"text": uttr, "speaker": (context_len - uttr_id) % 2} for uttr_id, uttr in enumerate(context)]
inputs = inputs[-context_depth:]
else:
inputs = [{"text": uttr["text"], "speaker": 1 if uttr["speaker"] == "bot" else 0} for uttr in context]
inputs = inputs[-context_depth:]
inputs_text = "".join([inputs_by_length(input_, inp_len) for input_, inp_len in zip(inputs, context_lengths)])
if encode:
# if encode, return encoded context
inputs_token_ids = tokenizer.encode(inputs_text, return_tensors="pt")
return inputs_token_ids
return inputs_text
def format_dialogue_for_inference(context, context_depth=4, encode=False, tokenizer=None):
"""
THE LAST UTTERANCE IN THE CONTEXT IS TARGET HUMAN'S UTTERANCE
context: List(dict)
context = [
{"text": "speaker": "human"},
{"text": "hi there", "speaker": "bot"},
{"text": "how are you", "speaker": "human"},
]
OR
context = [
"hi",
"hi there",
"how are you",
]
"""
if len(context) > 0 and isinstance(context[0], str):
context_len = len(context)
# the last uttr is from HUMAN
inputs = [{"text": uttr, "speaker": (context_len - uttr_id - 1) % 2} for uttr_id, uttr in enumerate(context)]
inputs = inputs[-context_depth:]
else:
inputs = [{"text": uttr["text"], "speaker": 1 if uttr["speaker"] == "bot" else 0} for uttr in context]
inputs = inputs[-context_depth:]
inputs_text = "".join([inputs_by_length(input_) for input_ in inputs])
length = "2" if random.uniform(0, 1) > SHORT_UTTERANCE_PROBA else "1"
inputs_text += f"|1|{length}|"
if encode:
# if encode, return encoded context
inputs_token_ids = tokenizer.encode(inputs_text, return_tensors="pt")
return inputs_token_ids
return inputs_text
app = Flask(__name__)
health = HealthCheck(app, "/healthcheck")
logging.getLogger("werkzeug").setLevel("WARNING")
@app.route("/ping", methods=["POST"])
def ping():
return "pong"
def generate(context, num_return_sequences, context_depth):
bot_input_ids = format_dialogue_for_inference(
context, context_depth=context_depth, encode=True, tokenizer=tokenizer
)
bot_input_ids = bot_input_ids.to(device)
params_default["num_return_sequences"] = num_return_sequences
chat_history_ids = model.generate(bot_input_ids, pad_token_id=tokenizer.eos_token_id, **params_default)
resp_tokens = chat_history_ids[:, bot_input_ids.shape[-1] :]
outputs = [tokenizer.decode(x, skip_special_tokens=True) for x in resp_tokens]
outputs = [x.split("|")[0] for x in outputs]
return outputs
@app.route("/respond", methods=["POST"])
def respond():
st_time = time.time()
dialog_contexts = request.json.get("dialog_contexts", [])
num_return_sequences = request.json.get("num_return_sequences", 3)
try:
batch_generated_responses = []
for context in dialog_contexts:
# context is a list of dicts, each dict contains text and speaker label
# context = [{"text": "utterance text", "speaker": "human"}, ...]
logger.info(f"dialogpt inputs: {context[-MAX_HISTORY_DEPTH:]}")
hypotheses = generate(
context[-MAX_HISTORY_DEPTH:], num_return_sequences=num_return_sequences, context_depth=MAX_HISTORY_DEPTH
)
logger.info(f"dialogpt hypotheses: {hypotheses}")
batch_generated_responses.append(hypotheses)
except Exception as exc:
logger.exception(exc)
sentry_sdk.capture_exception(exc)
batch_generated_responses = [[]] * len(dialog_contexts)
total_time = time.time() - st_time
logger.info(f"dialogpt exec time: {total_time:.3f}s")
return jsonify(batch_generated_responses) |
4,669 | inputs | """
pres_to_field
=============
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.METHOD_NAME import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class pres_to_field(Operator):
"""Read the presol generated file from mapdl.
Parameters
----------
filepath : str
Filepath
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.result.pres_to_field()
>>> # Make input connections
>>> my_filepath = str()
>>> op.inputs.filepath.connect(my_filepath)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.result.pres_to_field(
... filepath=my_filepath,
... )
>>> # Get output data
>>> result_field = op.outputs.field()
"""
def __init__(self, filepath=None, config=None, server=None):
super().__init__(name="PRES_Reader", config=config, server=server)
self._inputs = InputsPresToField(self)
self._outputs = OutputsPresToField(self)
if filepath is not None:
self.METHOD_NAME.filepath.connect(filepath)
@staticmethod
def _spec():
description = """Read the presol generated file from mapdl."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="filepath",
type_names=["string"],
optional=False,
document="""Filepath""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="field",
type_names=["field"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="PRES_Reader", server=server)
@property
def METHOD_NAME(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsPresToField
"""
return super().METHOD_NAME
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsPresToField
"""
return super().outputs
class InputsPresToField(_Inputs):
"""Intermediate class used to connect user inputs to
pres_to_field operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.pres_to_field()
>>> my_filepath = str()
>>> op.inputs.filepath.connect(my_filepath)
"""
def __init__(self, op: Operator):
super().__init__(pres_to_field._spec().METHOD_NAME, op)
self._filepath = Input(pres_to_field._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._filepath)
@property
def filepath(self):
"""Allows to connect filepath input to the operator.
Filepath
Parameters
----------
my_filepath : str
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.pres_to_field()
>>> op.inputs.filepath.connect(my_filepath)
>>> # or
>>> op.inputs.filepath(my_filepath)
"""
return self._filepath
class OutputsPresToField(_Outputs):
"""Intermediate class used to get outputs from
pres_to_field operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.pres_to_field()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
"""
def __init__(self, op: Operator):
super().__init__(pres_to_field._spec().outputs, op)
self._field = Output(pres_to_field._spec().output_pin(0), 0, op)
self._outputs.append(self._field)
@property
def field(self):
"""Allows to get field output of the operator
Returns
----------
my_field : Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.pres_to_field()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
""" # noqa: E501
return self._field |
4,670 | get compute | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetComputeResult',
'AwaitableGetComputeResult',
'get_compute',
'get_compute_output',
]
@pulumi.output_type
class GetComputeResult:
"""
Machine Learning compute object wrapped into ARM resource envelope.
"""
def __init__(__self__, id=None, identity=None, location=None, name=None, properties=None, sku=None, system_data=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Compute properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetComputeResult(GetComputeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetComputeResult(
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def METHOD_NAME(compute_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetComputeResult:
"""
Gets compute definition by its name. Any secrets (storage keys, service credentials, etc) are not returned - use 'keys' nested resource to get them.
:param str compute_name: Name of the Azure Machine Learning compute.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['computeName'] = compute_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20230401:getCompute', __args__, opts=opts, typ=GetComputeResult).value
return AwaitableGetComputeResult(
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
sku=pulumi.get(__ret__, 'sku'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_compute_output(compute_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetComputeResult]:
"""
Gets compute definition by its name. Any secrets (storage keys, service credentials, etc) are not returned - use 'keys' nested resource to get them.
:param str compute_name: Name of the Azure Machine Learning compute.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
... |
4,671 | open | import logging
import os
import shutil
import sqlite3
import warnings
from aequilibrae import global_logger
from aequilibrae.log import Log
from aequilibrae.parameters import Parameters
from aequilibrae.project.about import About
from aequilibrae.project.data import Matrices
from aequilibrae.project.database_connection import database_connection
from aequilibrae.context import activate_project, get_active_project
from aequilibrae.project.network import Network
from aequilibrae.project.zoning import Zoning
from aequilibrae.reference_files import spatialite_database
from aequilibrae.log import get_log_handler
from aequilibrae.project.project_cleaning import clean
from aequilibrae.project.project_creation import initialize_tables
from aequilibrae.transit.transit import Transit
class Project:
"""AequilibraE project class
.. code-block:: python
:caption: Create Project
>>> newfile = Project()
>>> newfile.new('/tmp/new_project')
.. code-block:: python
:caption: Open Project
>>> from aequilibrae.project import Project
>>> existing = Project()
>>> existing.open('/tmp/test_project')
>>> #Let's check some of the project's properties
>>> existing.network.list_modes()
['M', 'T', 'b', 'c', 't', 'w']
>>> existing.network.count_links()
76
>>> existing.network.count_nodes()
24
"""
def __init__(self):
self.path_to_file: str = None
self.project_base_path = ""
self.source: str = None
self.conn: sqlite3.Connection = None
self.network: Network = None
self.about: About = None
self.logger: logging.Logger = None
self.transit: Transit = None
@classmethod
def from_path(cls, project_folder):
project = Project()
project.METHOD_NAME(project_folder)
return project
def METHOD_NAME(self, project_path: str) -> None:
"""
Loads project from disk
:Arguments:
**project_path** (:obj:`str`): Full path to the project data folder. If the project inside does
not exist, it will fail.
"""
file_name = os.path.join(project_path, "project_database.sqlite")
if not os.path.isfile(file_name):
raise FileNotFoundError("Model does not exist. Check your path and try again")
self.project_base_path = project_path
self.path_to_file = file_name
self.source = self.path_to_file
self.__setup_logger()
self.activate()
self.conn = self.connect()
self.__load_objects()
global_logger.info(f"Opened project on {self.project_base_path}")
clean(self)
def new(self, project_path: str) -> None:
"""Creates a new project
:Arguments:
**project_path** (:obj:`str`): Full path to the project data folder. If folder exists, it will fail
"""
self.project_base_path = project_path
self.path_to_file = os.path.join(self.project_base_path, "project_database.sqlite")
self.source = self.path_to_file
if os.path.isdir(project_path):
raise FileExistsError("Location already exists. Choose a different name or remove the existing directory")
# We create the project folder and create the base file
os.mkdir(self.project_base_path)
self.__setup_logger()
self.activate()
self.__create_empty_network()
self.__load_objects()
self.about.create()
global_logger.info(f"Created project on {self.project_base_path}")
def close(self) -> None:
"""Safely closes the project"""
if not self.project_base_path:
global_logger.warning("This Aequilibrae project is not opened")
return
try:
self.conn.commit()
clean(self)
self.conn.close()
for obj in [self.parameters, self.network]:
del obj
del self.network.link_types
del self.network.modes
global_logger.info(f"Closed project on {self.project_base_path}")
except (sqlite3.ProgrammingError, AttributeError):
global_logger.warning(f"This project at {self.project_base_path} is already closed")
finally:
self.deactivate()
def load(self, project_path: str) -> None:
"""
Loads project from disk
.. deprecated:: 0.7.0
Use :func:`open` instead.
:Arguments:
**project_path** (:obj:`str`): Full path to the project data folder. If the project inside does
not exist, it will fail.
"""
warnings.warn(f"Function has been deprecated. Use my_project.open({project_path}) instead", DeprecationWarning)
self.METHOD_NAME(project_path)
def connect(self):
return database_connection("network", self.project_base_path)
def activate(self):
activate_project(self)
def deactivate(self):
if get_active_project(must_exist=False) is self:
activate_project(None)
def log(self) -> Log:
"""Returns a log object
allows the user to read the log or clear it"""
return Log(self.project_base_path)
def __load_objects(self):
matrix_folder = os.path.join(self.project_base_path, "matrices")
if not os.path.isdir(matrix_folder):
os.mkdir(matrix_folder)
self.network = Network(self)
self.about = About(self)
self.matrices = Matrices(self)
@property
def project_parameters(self) -> Parameters:
return Parameters(self)
@property
def parameters(self) -> dict:
return self.project_parameters.parameters
def check_file_indices(self) -> None:
"""Makes results_database.sqlite and the matrices folder compatible with project database"""
raise NotImplementedError
@property
def zoning(self):
return Zoning(self.network)
def __create_empty_network(self):
shutil.copyfile(spatialite_database, self.path_to_file)
self.conn = self.connect()
# Write parameters to the project folder
p = self.project_parameters
p.parameters["system"]["logging_directory"] = self.project_base_path
p.write_back()
# Create actual tables
cursor = self.conn.cursor()
cursor.execute("PRAGMA foreign_keys = ON;")
self.conn.commit()
initialize_tables(self, "network")
def __setup_logger(self):
self.logger = logging.getLogger(f"aequilibrae.{self.project_base_path}")
self.logger.propagate = False
self.logger.setLevel(logging.DEBUG)
par = self.parameters or self.project_parameters._default
do_log = par["system"]["logging"]
if do_log:
log_file = os.path.join(self.project_base_path, "aequilibrae.log")
self.logger.addHandler(get_log_handler(log_file)) |
4,672 | test fits | import os
import re
import numpy as np
import pytest
from astropy.table.scripts import showtable
ROOT = os.path.abspath(os.path.dirname(__file__))
ASCII_ROOT = os.path.join(ROOT, "..", "..", "io", "ascii", "tests")
FITS_ROOT = os.path.join(ROOT, "..", "..", "io", "fits", "tests")
VOTABLE_ROOT = os.path.join(ROOT, "..", "..", "io", "votable", "tests")
def test_missing_file(capsys):
showtable.main(["foobar.fits"])
out, err = capsys.readouterr()
assert err.startswith("ERROR: [Errno 2] No such file or directory: 'foobar.fits'")
def test_info(capsys):
showtable.main([os.path.join(FITS_ROOT, "data/table.fits"), "--info"])
out, err = capsys.readouterr()
assert out.splitlines() == [
"<Table length=3>",
" name dtype ",
"------ -------",
"target bytes20",
" V_mag float32",
]
def test_stats(capsys):
showtable.main([os.path.join(FITS_ROOT, "data/table.fits"), "--stats"])
out, err = capsys.readouterr()
expected = [
"<Table length=3>",
" name mean std min max ",
"------ ------- ------- ---- ----",
"target -- -- -- --",
" V_mag 12.866[0-9]? 1.72111 11.1 15.2",
]
out = out.splitlines()
assert out[:4] == expected[:4]
# Here we use re.match as in some cases one of the values above is
# platform-dependent.
assert re.match(expected[4], out[4]) is not None
def METHOD_NAME(capsys):
showtable.main([os.path.join(FITS_ROOT, "data/table.fits")])
out, err = capsys.readouterr()
assert out.splitlines() == [
" target V_mag",
"------- -----",
"NGC1001 11.1",
"NGC1002 12.3",
"NGC1003 15.2",
]
def test_fits_hdu(capsys):
from astropy.units import UnitsWarning
with pytest.warns(UnitsWarning):
showtable.main(
[
os.path.join(FITS_ROOT, "data/zerowidth.fits"),
"--hdu",
"AIPS OF",
]
)
out, err = capsys.readouterr()
assert out.startswith(
" TIME SOURCE ID ANTENNA NO. SUBARRAY FREQ ID ANT FLAG STATUS 1\n"
" DAYS \n"
"---------- --------- ----------- -------- ------- -------- --------\n"
"0.14438657 1 10 1 1 4 4\n"
)
def test_csv(capsys):
showtable.main([os.path.join(ASCII_ROOT, "data/simple_csv.csv")])
out, err = capsys.readouterr()
assert out.splitlines() == [
" a b c ",
"--- --- ---",
" 1 2 3",
" 4 5 6",
]
def test_ascii_format(capsys):
showtable.main(
[
os.path.join(ASCII_ROOT, "data/commented_header.dat"),
"--format",
"ascii.commented_header",
]
)
out, err = capsys.readouterr()
assert out.splitlines() == [
" a b c ",
"--- --- ---",
" 1 2 3",
" 4 5 6",
]
def test_ascii_delimiter(capsys):
showtable.main(
[
os.path.join(ASCII_ROOT, "data/simple2.txt"),
"--format",
"ascii",
"--delimiter",
"|",
]
)
out, err = capsys.readouterr()
assert out.splitlines() == [
"obsid redshift X Y object rad ",
"----- -------- ---- ---- ----------- ----",
" 3102 0.32 4167 4085 Q1250+568-A 9.0",
" 3102 0.32 4706 3916 Q1250+568-B 14.0",
" 877 0.22 4378 3892 'Source 82' 12.5",
]
def test_votable(capsys):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
showtable.main(
[
os.path.join(VOTABLE_ROOT, "data/regression.xml"),
"--table-id",
"main_table",
"--max-width",
"50",
]
)
out, err = capsys.readouterr()
assert out.splitlines() == [
" string_test string_test_2 ... bitarray2 ",
"----------------- ------------- ... -------------",
" String & test Fixed stri ... True .. False",
"String & test 0123456789 ... -- .. --",
" XXXX XXXX ... -- .. --",
" ... -- .. --",
" ... -- .. --",
]
def test_max_lines(capsys):
showtable.main(
[
os.path.join(ASCII_ROOT, "data/cds2.dat"),
"--format",
"ascii.cds",
"--max-lines",
"7",
"--max-width",
"30",
]
)
out, err = capsys.readouterr()
assert out.splitlines() == [
" SST ... Note",
" ... ",
"--------------- ... ----",
"041314.1+281910 ... --",
" ... ... ...",
"044427.1+251216 ... --",
"044642.6+245903 ... --",
"Length = 215 rows",
]
def test_show_dtype(capsys):
showtable.main([os.path.join(FITS_ROOT, "data/table.fits"), "--show-dtype"])
out, err = capsys.readouterr()
assert out.splitlines() == [
" target V_mag ",
"bytes20 float32",
"------- -------",
"NGC1001 11.1",
"NGC1002 12.3",
"NGC1003 15.2",
]
def test_hide_unit(capsys):
showtable.main([os.path.join(ASCII_ROOT, "data/cds.dat"), "--format", "ascii.cds"])
out, err = capsys.readouterr()
assert out.splitlines() == [
"Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ",
" h min s deg arcmin arcsec mag GMsun",
"----- --- --- ----- --- --- ------ ------ ----- ----- --- -----",
" 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35",
]
showtable.main(
[
os.path.join(ASCII_ROOT, "data/cds.dat"),
"--format",
"ascii.cds",
"--hide-unit",
]
)
out, err = capsys.readouterr()
assert out.splitlines() == [
"Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ",
"----- --- --- ----- --- --- --- --- ----- ----- --- ----",
" 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35",
] |
4,673 | send mattermost request | import logging
from typing import Dict, List, Optional, Tuple
from robusta.core.model.env_vars import ROBUSTA_LOGO_URL
from robusta.integrations.common.requests import HttpMethod, check_response_succeed, process_request
_API_PREFIX = "api/v4"
class MattermostClient:
channel_id: str
bot_id: str
team_id: Optional[str]
def __init__(self, url: str, token: str, token_id: str, channel_name: str, team: Optional[str], team_id: Optional[str]):
"""
Set the Mattermost webhook url.
"""
self.client_url = url
self.token = token
self.token_id = token_id
self.team_id = team_id
self.is_admin = self.is_admin_bot()
self._init_setup(channel_name, team)
def METHOD_NAME(self, url: str, method: HttpMethod, **kwargs):
headers = kwargs.pop("headers", {})
headers["Authorization"] = f"Bearer {self.token}"
return process_request(url, method, headers=headers, **kwargs)
def _get_full_mattermost_url(self, endpoint: str) -> str:
return "/".join([self.client_url, _API_PREFIX, endpoint])
def get_token_owner_id(self) -> Optional[str]:
endpoint = f"users/tokens/{self.token_id}"
url = self._get_full_mattermost_url(endpoint)
response = self.METHOD_NAME(url, HttpMethod.GET)
if not check_response_succeed(response):
logging.warning("Cannot get owner token, probably bot has not enough permissions")
return
response_data = response.json()
return response_data.get("user_id")
def is_admin_bot(self):
endpoint = "/users/me"
url = self._get_full_mattermost_url(endpoint)
response = self.METHOD_NAME(url, HttpMethod.GET)
if not check_response_succeed(response):
logging.error("Could not connect to Mattermost with bot account")
return
response_data = response.json()
if "system_admin" in response_data.get("roles"):
logging.info("Using Mattermost admin bot")
return True
else:
logging.warning("Bot is not an admin. You will not be able to post to private channels.")
if self.team_id is None:
logging.error(
"You need to provide 'team_id' in your configuration if your bot account is not an admin."
)
return False
def update_bot_settings(self, bot_id: str):
endpoint = f"bots/{bot_id}"
url = self._get_full_mattermost_url(endpoint)
response = self.METHOD_NAME(
url, HttpMethod.PUT, json={"username": "robusta", "display_name": "Robusta"}
)
if not check_response_succeed(response):
logging.warning("Cannot update bot settings, probably bot has not enough permissions")
self.update_bot_logo(bot_id)
def update_bot_logo(self, bot_id: str):
endpoint = f"users/{bot_id}/image"
img_data = process_request(ROBUSTA_LOGO_URL, HttpMethod.GET).content
url = self._get_full_mattermost_url(endpoint)
response = self.METHOD_NAME(url, HttpMethod.POST, files=[("image", ("image", img_data))])
if not check_response_succeed(response):
logging.warning("Cannot update bot logo, probably bot has not enough permissions")
def _init_setup(self, channel_name: str, team_name: Optional[str] = None):
if self.is_admin:
self.bot_id = self.get_token_owner_id()
self.team_id = self.get_team_id(team_name) if team_name else None
if self.bot_id:
self.update_bot_settings(self.bot_id)
self.channel_id = self.get_channel_id(channel_name)
if not self.channel_id:
logging.warning("No channel found, messages won't be sent")
def get_channel_id(self, channel_name: str) -> Optional[str]:
if self.is_admin:
endpoint = "channels/search"
url = self._get_full_mattermost_url(endpoint)
payload = {"term": channel_name}
if self.team_id:
payload["team_ids"] = [self.team_id]
else:
endpoint = f"teams/{self.team_id}/channels/search"
url = self._get_full_mattermost_url(endpoint)
payload = {"term": channel_name}
response = self.METHOD_NAME(url, HttpMethod.POST, json=payload)
if check_response_succeed(response):
response = response.json()
if not len(response):
return None
return response[0].get("id")
def get_team_id(self, team_name: str) -> Optional[str]:
if not self.is_admin:
logging.error("You are using a non-admin bot account, which means you need to configure 'team_id'.")
return
endpoint = "teams/search"
url = self._get_full_mattermost_url(endpoint)
response = self.METHOD_NAME(url, HttpMethod.POST, json={"term": team_name})
if check_response_succeed(response):
response = response.json()
if not len(response):
logging.warning("No team found, all channels will be searched")
return None
return response[0].get("id")
else:
logging.warning("There was an error finding a team, all channels will be searched")
def post_message(self, title, msg_attachments: List[Dict], file_attachments: Optional[List[Tuple]] = None):
if not self.channel_id:
logging.warning("No channel found, messages won't be sent")
return
file_attachments = file_attachments or []
file_attachments = self.upload_files(file_attachments)
endpoint = "posts"
url = self._get_full_mattermost_url(endpoint)
response = self.METHOD_NAME(
url,
HttpMethod.POST,
json={
"channel_id": self.channel_id,
"message": title,
"file_ids": file_attachments,
"props": {"attachments": msg_attachments},
},
)
if not check_response_succeed(response):
logging.error("Couldn't deliver mattermost bot message")
def upload_files(self, files: List[Tuple]):
endpoint = "files"
file_ids = []
url = self._get_full_mattermost_url(endpoint)
for file in files:
response = self.METHOD_NAME(
url,
HttpMethod.POST,
files={"files": file, "channel_id": (None, self.channel_id), "filename": (None, file[0])},
)
if not check_response_succeed(response):
logging.error(f"There was an error uploading the file: {file[0]}")
continue
response = response.json()
file_ids.append(response["file_infos"][0]["id"])
return file_ids |
4,674 | set up | import datetime
import pytest
import responses
from django.http import HttpRequest
from freezegun import freeze_time
from sentry.auth.authenticators.sms import SmsInterface, SMSRateLimitExceeded
from sentry.testutils.cases import TestCase
from sentry.testutils.silo import control_silo_test
from sentry.utils.sms import InvalidPhoneNumber, phone_number_as_e164
@control_silo_test(stable=True)
class SmsInterfaceTest(TestCase):
def METHOD_NAME(self):
self.user = self.create_user(email="test@example.com", is_superuser=False)
@responses.activate
def test_activate(self):
request = HttpRequest()
request.user = self.user
request.META["REMOTE_ADDR"] = "127.0.0.1"
responses.add(
responses.POST,
"https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Messages.json",
json={
"account_sid": "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"api_version": "2010-04-01",
"body": "Hi there!",
"date_created": "Thu, 30 Jul 2015 20:12:31 +0000",
"date_sent": "Thu, 30 Jul 2015 20:12:33 +0000",
"date_updated": "Thu, 30 Jul 2015 20:12:33 +0000",
"direction": "outbound-api",
"error_code": None,
"error_message": None,
"from": "+15551231234",
"messaging_service_sid": None,
"num_media": "0",
"num_segments": "1",
"price": None,
"price_unit": None,
"sid": "SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"status": "sent",
"subresource_uris": {
"media": "/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Messages/SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Media.json"
},
"to": "+12345678901",
"uri": "/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Messages/SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json",
},
)
interface = SmsInterface()
interface.phone_number = "2345678901"
with self.options({"sms.twilio-account": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}):
rv = interface.activate(request)
assert (
rv.message
== "A confirmation code was sent to <strong>(***) ***-**01</strong>. It is valid for 45 seconds."
)
@responses.activate
def test_ratelimit_exception(self):
request = HttpRequest()
request.user = self.user
request.META["REMOTE_ADDR"] = "127.0.0.1"
responses.add(
responses.POST,
"https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Messages.json",
json={
"account_sid": "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"api_version": "2010-04-01",
"body": "Hi there!",
"date_created": "Thu, 30 Jul 2015 20:12:31 +0000",
"date_sent": "Thu, 30 Jul 2015 20:12:33 +0000",
"date_updated": "Thu, 30 Jul 2015 20:12:33 +0000",
"direction": "outbound-api",
"error_code": None,
"error_message": None,
"from": "+15551231234",
"messaging_service_sid": None,
"num_media": "0",
"num_segments": "1",
"price": None,
"price_unit": None,
"sid": "SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"status": "sent",
"subresource_uris": {
"media": "/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Messages/SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Media.json"
},
"to": "+12345678901",
"uri": "/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Messages/SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json",
},
)
interface = SmsInterface()
interface.phone_number = "2345678901"
with freeze_time(datetime.datetime.now()):
with self.options({"sms.twilio-account": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}):
with pytest.raises(SMSRateLimitExceeded):
for _ in range(4):
rv = interface.activate(request)
interface.phone_number = "2345678900"
rv = interface.activate(request)
assert (
rv.message
== "A confirmation code was sent to <strong>(***) ***-**00</strong>. It is valid for 45 seconds."
)
def test_invalid_phone_number(self):
with pytest.raises(InvalidPhoneNumber):
phone_number_as_e164("+15555555555")
def test_valid_phone_number(self):
formatted_number = phone_number_as_e164("2345678900")
assert "+12345678900" == formatted_number |
4,675 | test pytorch trainable and transferable | # coding: utf-8
import argparse
import json
import os
import tempfile
import numpy as np
import pytest
import torch
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.asr.asr_utils import torch_save
from espnet.asr.pytorch_backend.asr_init import freeze_modules, load_trained_modules
from espnet.nets.beam_search_transducer import BeamSearchTransducer
from espnet.nets.pytorch_backend.nets_utils import pad_list
def get_rnn_args(**kwargs):
train_defaults = dict(
elayers=1,
subsample="1_2_2_1_1",
etype="vggblstm",
eunits=2,
eprojs=2,
dtype="lstm",
dlayers=1,
dunits=2,
atype="location",
aheads=1,
awin=2,
aconv_chans=1,
aconv_filts=2,
mtlalpha=1.0,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
adim=2,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
nbest=1,
beam_size=1,
penalty=0.5,
maxlenratio=1.0,
minlenratio=0.0,
ctc_weight=0.2,
lm_weight=0.0,
rnnlm=None,
verbose=2,
char_list=["a", "e", "i", "o", "u"],
outdir=None,
ctc_type="builtin",
report_cer=False,
report_wer=False,
sym_space="<space>",
sym_blank="<blank>",
replace_sos=False,
tgt_lang=False,
enc_init=None,
enc_init_mods="enc.",
dec_init=None,
dec_init_mods="dec.",
freeze_mods=None,
model_module="espnet.nets.pytorch_backend.e2e_asr:E2E",
)
train_defaults.update(kwargs)
return argparse.Namespace(**train_defaults)
def get_rnnt_args(**kwargs):
train_defaults = dict(
etype="vggblstm",
elayers=1,
subsample="1_2_2_1_1",
eunits=2,
eprojs=2,
dtype="lstm",
dlayers=1,
dunits=4,
dec_embed_dim=4,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
dropout_rate_embed_decoder=0.0,
joint_dim=2,
joint_activation_type="tanh",
aux_task_type=None,
rnnt_mode="rnnt",
trans_type="warp-transducer",
char_list=["a", "b", "c", "d"],
sym_space="<space>",
sym_blank="<blank>",
report_cer=False,
report_wer=False,
beam_size=1,
nbest=1,
verbose=0,
outdir=None,
rnnlm=None,
enc_init=None,
enc_init_mods="enc.",
dec_init=None,
dec_init_mods="dec.",
freeze_mods=None,
model_module="espnet.nets.pytorch_backend.e2e_asr_transducer:E2E",
)
train_defaults.update(kwargs)
return argparse.Namespace(**train_defaults)
def get_default_scope_inputs():
idim = 10
odim = 5
ilens = [10, 6]
olens = [4, 3]
return idim, odim, ilens, olens
def get_lm(n_layers, n_units, char_list):
char_list = ["<blank>"] + char_list + ["<eos>"]
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(char_list), n_layers, n_units, typ="lstm")
)
return rnnlm
def pytorch_prepare_inputs(idim, odim, ilens, olens, is_cuda=False):
np.random.seed(1)
xs = [np.random.randn(ilen, idim).astype(np.float32) for ilen in ilens]
ys = [np.random.randint(1, odim, olen).astype(np.int32) for olen in olens]
ilens = np.array([x.shape[0] for x in xs], dtype=np.int32)
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0)
ys_pad = pad_list([torch.from_numpy(y).long() for y in ys], -1)
ilens = torch.from_numpy(ilens).long()
if is_cuda:
xs_pad = xs_pad.cuda()
ys_pad = ys_pad.cuda()
ilens = ilens.cuda()
return xs_pad, ilens, ys_pad
@pytest.mark.parametrize(
"main_model_type, pt_model_type, finetune_dic",
[
(
"rnn",
"rnn",
{
"enc_init": None,
"dec_init": True,
"dec_init_mods": ["dec.", "att."],
"mtlalpha": 0.5,
"use_lm": None,
},
),
(
"rnnt",
"rnn",
{
"enc_init": True,
"enc_init_mods": ["enc."],
"dec_init": None,
"mtlalpha": 1.0,
"use_lm": None,
},
),
(
"rnnt",
"lm",
{
"enc_init": None,
"dec_init": True,
"dec_init_mods": ["dec.decoder."],
"use_lm": True,
},
),
],
)
def METHOD_NAME(
main_model_type, pt_model_type, finetune_dic
):
idim, odim, ilens, olens = get_default_scope_inputs()
batch = pytorch_prepare_inputs(idim, odim, ilens, olens)
if pt_model_type == "lm":
pt_args = get_rnnt_args() if main_model_type == "rnnt" else get_rnn_args()
pt_model = get_lm(pt_args.dlayers, pt_args.dunits, pt_args.char_list)
prefix_tmppath = "_rnnlm"
else:
if pt_model_type == "rnn":
from espnet.nets.pytorch_backend.e2e_asr import E2E
pt_args = get_rnn_args()
else:
from espnet.nets.pytorch_backend.e2e_asr_transducer import E2E
pt_args = get_rnnt_args()
pt_model = E2E(idim, odim, pt_args)
prefix_tmppath = ""
loss = pt_model(*batch)
loss.backward()
if not os.path.exists(".pytest_cache"):
os.makedirs(".pytest_cache")
tmppath = tempfile.mktemp() + prefix_tmppath
torch_save(tmppath, pt_model)
# create dummy model.json for saved model to go through
# get_model_conf(...) called in load_trained_modules method.
model_conf = os.path.dirname(tmppath) + "/model.json"
with open(model_conf, "wb") as f:
f.write(
json.dumps(
(idim, odim, vars(pt_args)),
indent=4,
ensure_ascii=False,
sort_keys=True,
).encode("utf_8")
)
if finetune_dic["enc_init"] is not None:
finetune_dic["enc_init"] = tmppath
if finetune_dic["dec_init"] is not None:
finetune_dic["dec_init"] = tmppath
if main_model_type == "rnn":
main_args = get_rnn_args(**finetune_dic)
else:
main_args = get_rnnt_args(**finetune_dic)
main_model = load_trained_modules(idim, odim, main_args)
loss = main_model(*batch)
loss.backward()
if main_model_type == "rnnt":
beam_search = BeamSearchTransducer(
decoder=main_model.dec,
joint_network=main_model.transducer_tasks.joint_network,
beam_size=1,
lm=None,
lm_weight=0.0,
search_type="default",
max_sym_exp=2,
u_max=10,
nstep=1,
prefix_alpha=1,
score_norm=False,
)
with torch.no_grad():
in_data = np.random.randn(10, idim)
main_model.recognize(in_data, beam_search)
else:
with torch.no_grad():
in_data = np.random.randn(10, idim)
main_model.recognize(in_data, main_args, main_args.char_list)
# todo (b-flo): add test for frozen layers
def test_pytorch_freezable():
from espnet.nets.pytorch_backend.e2e_asr import E2E
idim, odim, ilens, olens = get_default_scope_inputs()
args = get_rnn_args(freeze_mods="enc.enc.0.")
model = E2E(idim, odim, args)
model, model_params = freeze_modules(model, args.freeze_mods)
model.train() |
4,676 | blacklist | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Properties: This section contains additional properties to be guessed by guessit."""
from __future__ import unicode_literals
import re
from guessit.reutils import build_or_pattern
from guessit.rules.common import dash
from guessit.rules.common.validators import seps_surround
from rebulk.processors import POST_PROCESS
from rebulk.rebulk import Rebulk
from rebulk.rules import RemoveMatch, Rule
import six
def METHOD_NAME():
"""Blacklisted patterns.
All blacklisted patterns.
:return:
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash])
rebulk.defaults(name='blacklist', validator=seps_surround,
conflict_solver=lambda match, other: other if other.name != 'blacklist' else '__default__')
rebulk.regex(r'(?:(?:\[\d+/\d+\])-+)?\.".+".*', tags=['blacklist-01'])
rebulk.regex(r'vol\d{2,3}\+\d{2,3}.*', tags=['blacklist-02'])
rebulk.regex(r'(?:nzb|par2)-+\d+\.of\.\d+.*', tags=['blacklist-03'])
rebulk.regex(r'(?:(?:nzb|par2)-+)?\d+\.of\.\d+.*', tags=['blacklist-03', 'should-have-container-before'])
rebulk.rules(ValidateBlacklist, RemoveBlacklisted)
return rebulk
def source():
"""Source property.
:return:
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash])
rebulk.defaults(name='source', tags='video-codec-prefix')
# More accurate sources
rebulk.regex('BD-?Rip', 'BD(?=-?Mux)', value='BDRip',
conflict_solver=lambda match, other: other if other.name == 'source' else '__default__')
rebulk.regex(r'BD(?!\d)', value='BDRip', validator=seps_surround,
conflict_solver=lambda match, other: other if other.name == 'source' else '__default__')
rebulk.regex('BR-?Rip', 'BR(?=-?Mux)', value='BRRip',
conflict_solver=lambda match, other: other if other.name == 'source' else '__default__')
rebulk.regex('DVD-?Rip', value='DVDRip',
conflict_solver=lambda match, other: other if other.name == 'source' else '__default__')
rebulk.regex(r'DVD\d', value='DVD')
return rebulk
def screen_size():
"""Screen size property.
:return:
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE)
rebulk.defaults(name='screen_size', validator=seps_surround)
# Discarded:
rebulk.regex(r'(?:\d{3,}(?:x|\*))?4320(?:p?x?)', value='4320p', private=True)
return rebulk
def other():
"""Other property.
:return:
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash])
rebulk.defaults(name='other', validator=seps_surround)
rebulk.regex('F1', value='Formula One',
conflict_solver=lambda match, other: other if other.name == 'film' else '__default__')
# Discarded:
rebulk.regex('DownRev', 'small-size', private=True)
return rebulk
def container():
"""Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE).string_defaults(ignore_case=True)
rebulk.defaults(name='container',
tags=['extension'],
conflict_solver=lambda match, other: other
if other.name in ['source', 'video_codec'] or
other.name == 'container' and 'extension' not in other.tags
else '__default__')
if six.PY3:
nzb = ['nzb']
else:
nzb = [b'nzb']
rebulk.regex(r'\.' + build_or_pattern(nzb) + '$', exts=nzb, tags=['extension', 'torrent'])
rebulk.defaults(name='container',
validator=seps_surround,
formatter=lambda s: s.upper(),
conflict_solver=lambda match, other: match
if other.name in ['source', 'video_codec'] or
other.name == 'container' and 'extension' in other.tags
else '__default__')
rebulk.string(*nzb, tags=['nzb'])
return rebulk
class ValidateBlacklist(Rule):
"""Validate blacklist pattern 03. It should appear after a container."""
priority = 10000
consequence = RemoveMatch
def when(self, matches, context):
"""Remove blacklist if it doesn't appear after a container.
:param matches:
:type matches: rebulk.match.Matches
:param context:
:type context: dict
:return:
"""
to_remove = []
for bl in matches.tagged('should-have-container-before'):
if not matches.previous(bl, predicate=lambda match: match.name == 'container', index=0):
to_remove.append(bl)
return to_remove
class RemoveBlacklisted(Rule):
"""Remove blacklisted properties from final result."""
priority = POST_PROCESS - 9000
consequence = RemoveMatch
def when(self, matches, context):
"""Remove blacklisted properties.
:param matches:
:type matches: rebulk.match.Matches
:param context:
:type context: dict
:return:
"""
return matches.named('blacklist') |
4,677 | delete nodes | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Functions to delete entities from the database, preserving provenance integrity."""
import logging
from typing import Callable, Iterable, Set, Tuple, Union
from aiida.common.log import AIIDA_LOGGER
from aiida.manage import get_manager
from aiida.orm import Group, Node, QueryBuilder
from aiida.tools.graph.graph_traversers import get_nodes_delete
__all__ = ('DELETE_LOGGER', 'delete_nodes', 'delete_group_nodes')
DELETE_LOGGER = AIIDA_LOGGER.getChild('delete')
def METHOD_NAME(
pks: Iterable[int],
dry_run: Union[bool, Callable[[Set[int]], bool]] = True,
backend=None,
**traversal_rules: bool
) -> Tuple[Set[int], bool]:
"""Delete nodes given a list of "starting" PKs.
This command will delete not only the specified nodes, but also the ones that are
linked to these and should be also deleted in order to keep a consistent provenance
according to the rules explained in the Topics - Provenance section of the documentation.
In summary:
1. If a DATA node is deleted, any process nodes linked to it will also be deleted.
2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as
well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes
(outputs) will be deleted by default but this can be disabled.
3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as
well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by
default, but deletion of either of both kind of connected nodes can be enabled.
These rules are 'recursive', so if a CALC node is deleted, then its output DATA
nodes will be deleted as well, and then any CALC node that may have those as
inputs, and so on.
:param pks: a list of starting PKs of the nodes to delete
(the full set will be based on the traversal rules)
:param dry_run:
If True, return the pks to delete without deleting anything.
If False, delete the pks without confirmation
If callable, a function that return True/False, based on the pks, e.g. ``dry_run=lambda pks: True``
:param traversal_rules: graph traversal rules.
See :const:`aiida.common.links.GraphTraversalRules` for what rule names
are toggleable and what the defaults are.
:returns: (pks to delete, whether they were deleted)
"""
backend = backend or get_manager().get_profile_storage()
# pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements
def _missing_callback(_pks: Iterable[int]):
for _pk in _pks:
DELETE_LOGGER.warning(f'warning: node with pk<{_pk}> does not exist, skipping')
pks_set_to_delete = get_nodes_delete(
pks, get_links=False, missing_callback=_missing_callback, backend=backend, **traversal_rules
)['nodes']
DELETE_LOGGER.report('%s Node(s) marked for deletion', len(pks_set_to_delete))
if pks_set_to_delete and DELETE_LOGGER.level == logging.DEBUG:
builder = QueryBuilder(
backend=backend
).append(Node, filters={'id': {
'in': pks_set_to_delete
}}, project=('uuid', 'id', 'node_type', 'label'))
DELETE_LOGGER.debug('Node(s) to delete:')
for uuid, pk, type_string, label in builder.iterall():
try:
short_type_string = type_string.split('.')[-2]
except IndexError:
short_type_string = type_string
DELETE_LOGGER.debug(f' {uuid} {pk} {short_type_string} {label}')
if dry_run is True:
DELETE_LOGGER.report('This was a dry run, exiting without deleting anything')
return (pks_set_to_delete, False)
# confirm deletion
if callable(dry_run) and dry_run(pks_set_to_delete):
DELETE_LOGGER.report('This was a dry run, exiting without deleting anything')
return (pks_set_to_delete, False)
if not pks_set_to_delete:
return (pks_set_to_delete, True)
DELETE_LOGGER.report('Starting node deletion...')
with backend.transaction():
backend.delete_nodes_and_connections(pks_set_to_delete)
DELETE_LOGGER.report('Deletion of nodes completed.')
return (pks_set_to_delete, True)
def delete_group_nodes(
pks: Iterable[int],
dry_run: Union[bool, Callable[[Set[int]], bool]] = True,
backend=None,
**traversal_rules: bool
) -> Tuple[Set[int], bool]:
"""Delete nodes contained in a list of groups (not the groups themselves!).
This command will delete not only the nodes, but also the ones that are
linked to these and should be also deleted in order to keep a consistent provenance
according to the rules explained in the concepts section of the documentation.
In summary:
1. If a DATA node is deleted, any process nodes linked to it will also be deleted.
2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as
well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes
(outputs) will be deleted by default but this can be disabled.
3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as
well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by
default, but deletion of either of both kind of connected nodes can be enabled.
These rules are 'recursive', so if a CALC node is deleted, then its output DATA
nodes will be deleted as well, and then any CALC node that may have those as
inputs, and so on.
:param pks: a list of the groups
:param dry_run:
If True, return the pks to delete without deleting anything.
If False, delete the pks without confirmation
If callable, a function that return True/False, based on the pks, e.g. ``dry_run=lambda pks: True``
:param traversal_rules: graph traversal rules. See :const:`aiida.common.links.GraphTraversalRules` what rule names
are toggleable and what the defaults are.
:returns: (node pks to delete, whether they were deleted)
"""
group_node_query = QueryBuilder(backend=backend).append(
Group,
filters={
'id': {
'in': list(pks)
}
},
tag='groups',
).append(Node, project='id', with_group='groups')
group_node_query.distinct()
node_pks = group_node_query.all(flat=True)
return METHOD_NAME(node_pks, dry_run=dry_run, backend=backend, **traversal_rules) |
4,678 | child main | # Copyright 2017, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import random
import sys
import threading
import traceback
import mitogen.core
import mitogen.parent
LOG = logging.getLogger('mitogen')
def fixup_prngs():
"""
Add 256 bits of /dev/urandom to OpenSSL's PRNG in the child, and re-seed
the random package with the same data.
"""
s = os.urandom(256 // 8)
random.seed(s)
if 'ssl' in sys.modules:
sys.modules['ssl'].RAND_add(s, 75.0)
def reset_logging_framework():
"""
After fork, ensure any logging.Handler locks are recreated, as a variety of
threads in the parent may have been using the logging package at the moment
of fork.
It is not possible to solve this problem in general; see
https://github.com/dw/mitogen/issues/150 for a full discussion.
"""
logging._lock = threading.RLock()
# The root logger does not appear in the loggerDict.
for name in [None] + list(logging.Logger.manager.loggerDict):
for handler in logging.getLogger(name).handlers:
handler.createLock()
root = logging.getLogger()
root.handlers = [
handler
for handler in root.handlers
if not isinstance(handler, mitogen.core.LogHandler)
]
def handle_child_crash():
"""
Respond to _child_main() crashing by ensuring the relevant exception is
logged to /dev/tty.
"""
tty = open('/dev/tty', 'wb')
tty.write('\n\nFORKED CHILD PID %d CRASHED\n%s\n\n' % (
os.getpid(),
traceback.format_exc(),
))
tty.close()
os._exit(1)
class Stream(mitogen.parent.Stream):
child_is_immediate_subprocess = True
#: Reference to the importer, if any, recovered from the parent.
importer = None
#: User-supplied function for cleaning up child process state.
on_fork = None
def construct(self, old_router, max_message_size, on_fork=None,
debug=False, profiling=False, unidirectional=False,
on_start=None):
# fork method only supports a tiny subset of options.
super(Stream, self).construct(max_message_size=max_message_size,
debug=debug, profiling=profiling,
unidirectional=False)
self.on_fork = on_fork
self.on_start = on_start
responder = getattr(old_router, 'responder', None)
if isinstance(responder, mitogen.parent.ModuleForwarder):
self.importer = responder.importer
name_prefix = u'fork'
def start_child(self):
parentfp, childfp = mitogen.parent.create_socketpair()
self.pid = os.fork()
if self.pid:
childfp.close()
# Decouple the socket from the lifetime of the Python socket object.
fd = os.dup(parentfp.fileno())
parentfp.close()
return self.pid, fd, None
else:
parentfp.close()
self._wrap_child_main(childfp)
def _wrap_child_main(self, childfp):
try:
self.METHOD_NAME(childfp)
except BaseException:
handle_child_crash()
def METHOD_NAME(self, childfp):
reset_logging_framework() # Must be first!
fixup_prngs()
mitogen.core.Latch._on_fork()
mitogen.core.Side._on_fork()
if self.on_fork:
self.on_fork()
mitogen.core.set_block(childfp.fileno())
# Expected by the ExternalContext.main().
os.dup2(childfp.fileno(), 1)
os.dup2(childfp.fileno(), 100)
# Overwritten by ExternalContext.main(); we must replace the
# parent-inherited descriptors that were closed by Side._on_fork() to
# avoid ExternalContext.main() accidentally allocating new files over
# the standard handles.
os.dup2(childfp.fileno(), 0)
# Avoid corrupting the stream on fork crash by dupping /dev/null over
# stderr. Instead, handle_child_crash() uses /dev/tty to log errors.
devnull = os.open('/dev/null', os.O_WRONLY)
if devnull != 2:
os.dup2(devnull, 2)
os.close(devnull)
# If we're unlucky, childfp.fileno() may coincidentally be one of our
# desired FDs. In that case closing it breaks ExternalContext.main().
if childfp.fileno() not in (0, 1, 100):
childfp.close()
config = self.get_econtext_config()
config['core_src_fd'] = None
config['importer'] = self.importer
config['setup_package'] = False
if self.on_start:
config['on_start'] = self.on_start
try:
mitogen.core.ExternalContext(config).main()
except Exception:
# TODO: report exception somehow.
os._exit(72)
finally:
# Don't trigger atexit handlers, they were copied from the parent.
os._exit(0)
def _connect_bootstrap(self, extra_fd):
# None required.
pass |
4,679 | test create enkelvoudiginformatieobject | # SPDX-License-Identifier: EUPL-1.2
# Copyright (C) 2020 Dimpact
"""
Test the flow described in https://github.com/VNG-Realisatie/gemma-zaken/issues/39
"""
import base64
from datetime import date
from urllib.parse import urlparse
from django.test import override_settings
from rest_framework import status
from vng_api_common.constants import VertrouwelijkheidsAanduiding
from vng_api_common.tests import reverse
from zgw_consumers.constants import APITypes
from zgw_consumers.models import Service
from openzaak.components.catalogi.tests.factories import InformatieObjectTypeFactory
from openzaak.tests.utils import APICMISTestCase, JWTAuthMixin, require_cmis
from ..models import EnkelvoudigInformatieObject
from .factories import (
EnkelvoudigInformatieObjectCanonicalFactory,
EnkelvoudigInformatieObjectFactory,
)
from .utils import get_operation_url
@require_cmis
@override_settings(CMIS_ENABLED=True)
class US39TestCase(JWTAuthMixin, APICMISTestCase):
heeft_alle_autorisaties = True
def METHOD_NAME(self):
"""
Registreer een ENKELVOUDIGINFORMATIEOBJECT
"""
Service.objects.create(
api_root="http://testserver/catalogi/api/v1/", api_type=APITypes.ztc
)
informatieobjecttype = InformatieObjectTypeFactory.create(concept=False)
informatieobjecttype_url = reverse(informatieobjecttype)
url = get_operation_url("enkelvoudiginformatieobject_create")
data = {
"identificatie": "AMS20180701001",
"bronorganisatie": "159351741",
"creatiedatum": "2018-07-01",
"titel": "text_extra.txt",
"auteur": "ANONIEM",
"formaat": "text/plain",
"taal": "dut",
"inhoud": base64.b64encode(b"Extra tekst in bijlage").decode("utf-8"),
"informatieobjecttype": f"http://testserver{informatieobjecttype_url}",
"vertrouwelijkheidaanduiding": VertrouwelijkheidsAanduiding.openbaar,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
eio = EnkelvoudigInformatieObject.objects.get()
self.assertEqual(eio.identificatie, "AMS20180701001")
self.assertEqual(eio.creatiedatum, date(2018, 7, 1))
download_url = urlparse(response.data["inhoud"])
self.assertEqual(
download_url.path,
get_operation_url("enkelvoudiginformatieobject_download", uuid=eio.uuid),
)
def test_read_detail_file(self):
eio = EnkelvoudigInformatieObjectFactory.create()
file_url = get_operation_url(
"enkelvoudiginformatieobject_download", uuid=eio.uuid
)
response = self.client.get(file_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.getvalue().decode("utf-8"), "some data")
def test_list_file(self):
EnkelvoudigInformatieObjectCanonicalFactory.create()
eio = EnkelvoudigInformatieObject.objects.get()
list_url = get_operation_url("enkelvoudiginformatieobject_list")
response = self.client.get(list_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.data["results"]
download_url = reverse(
"enkelvoudiginformatieobject-download", kwargs={"uuid": eio.uuid}
)
self.assertEqual(
data[0]["inhoud"], f"http://testserver{download_url}?versie={eio.versie}",
)
def test_create_enkelvoudiginformatieobject_without_identificatie(self):
Service.objects.create(
api_root="http://testserver/catalogi/api/v1/", api_type=APITypes.ztc
)
informatieobjecttype = InformatieObjectTypeFactory.create(concept=False)
informatieobjecttype_url = reverse(informatieobjecttype)
url = get_operation_url("enkelvoudiginformatieobject_create")
data = {
"bronorganisatie": "159351741",
"creatiedatum": "2018-07-01",
"titel": "text_extra.txt",
"auteur": "ANONIEM",
"formaat": "text/plain",
"taal": "dut",
"inhoud": base64.b64encode(b"Extra tekst in bijlage").decode("utf-8"),
"informatieobjecttype": f"http://testserver{informatieobjecttype_url}",
"vertrouwelijkheidaanduiding": VertrouwelijkheidsAanduiding.openbaar,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
eio = EnkelvoudigInformatieObject.objects.get()
self.assertEqual(eio.identificatie, str(eio.uuid))
self.assertEqual(b"Extra tekst in bijlage", eio.inhoud.read()) |
4,680 | test botservice create channel specific location | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer
import uuid
class ChannelTests(ScenarioTest):
def create_bot(self, resource_group, location='global'):
self.kwargs.update({
'botname': self.create_random_name(prefix='cli', length=10),
'endpoint': 'https://www.google.com/api/messages',
'app_id': str(uuid.uuid4()),
'setting_name': self.create_random_name(prefix='auth', length=10),
'clientid': 'clientid',
'password': str(uuid.uuid4()),
'location': location
})
self.cmd('az bot create -g {rg} -n {botname} -e {endpoint} --app-type MultiTenant --appid {app_id} -l {location}', checks=[
self.check('name', '{botname}'),
self.check('resourceGroup', '{rg}'),
self.check('location', '{location}')
])
@ResourceGroupPreparer(random_name_length=20)
def test_webchat_channel(self, resource_group):
self.create_bot(resource_group)
# We verify that webchat exists for the bot.
# We cannot make guarantees on the number of webchat sites, but yes on it being enabled.
self.cmd('az bot webchat show -g {rg} -n {botname}', checks=[
self.check('properties.channelName', 'WebChatChannel'),
])
self.cmd('az bot webchat show -g {rg} -n {botname} --with-secrets', checks=[
self.check('properties.channelName', 'WebChatChannel'),
])
self.cmd('az bot delete -g {rg} -n {botname}')
@ResourceGroupPreparer(random_name_length=20)
def test_skype_channel(self, resource_group):
self.create_bot(resource_group)
self.cmd('az bot skype create -g {rg} -n {botname} --enable-calling true --enable-media-cards true --enable-messaging true --enable-video true --calling-web-hook https://www.google.com', checks=[
self.check('properties.properties.enableMessaging', True),
self.check('properties.properties.enableMediaCards', True),
self.check('properties.properties.enableVideo', True)
])
self.cmd('az bot skype show -g {rg} -n {botname}', checks=[
self.check('properties.properties.enableMessaging', True),
self.check('properties.properties.enableMediaCards', True),
self.check('properties.properties.enableVideo', False)
])
self.cmd('az bot skype show -g {rg} -n {botname} --with-secrets', checks=[
self.check('properties.properties.enableMessaging', True),
self.check('properties.properties.enableMediaCards', True),
self.check('properties.properties.enableVideo', False)
])
self.cmd('az bot skype delete -g {rg} -n {botname}')
self.cmd('az bot delete -g {rg} -n {botname}')
@ResourceGroupPreparer(random_name_length=20)
def test_msteams_channel(self, resource_group):
self.create_bot(resource_group)
self.cmd('az bot msteams create -g {rg} -n {botname} --enable-calling true --calling-web-hook https://www.google.com', checks=[
self.check('properties.properties.enableCalling', True),
self.check('properties.properties.isEnabled', True)
])
self.cmd('az bot msteams show -g {rg} -n {botname}', checks=[
self.check('properties.properties.enableCalling', True),
self.check('properties.properties.isEnabled', True)
])
self.cmd('az bot msteams show -g {rg} -n {botname} --with-secrets', checks=[
self.check('properties.properties.enableCalling', True),
self.check('properties.properties.isEnabled', True)
])
self.cmd('az bot msteams delete -g {rg} -n {botname}')
self.cmd('az bot delete -g {rg} -n {botname}')
@ResourceGroupPreparer(random_name_length=20)
def test_directline_channel(self, resource_group):
self.create_bot(resource_group)
self.cmd('az bot directline create -g {rg} -n {botname}', checks=[
self.check('properties.properties.sites[0].siteName', 'Default Site'),
self.check('properties.properties.sites[0].isEnabled', True)
])
self.cmd('az bot directline show -g {rg} -n {botname}', checks=[
self.check('properties.properties.sites[0].siteName', 'Default Site'),
self.check('properties.properties.sites[0].isEnabled', True),
self.check('properties.properties.sites[0].key', None)
])
self.cmd('az bot directline show -g {rg} -n {botname} --with-secrets', checks=[
self.check('properties.properties.sites[0].siteName', 'Default Site'),
self.check('properties.properties.sites[0].isEnabled', True)
])
self.cmd('az bot directline delete -g {rg} -n {botname}')
self.cmd('az bot delete -g {rg} -n {botname}')
@ResourceGroupPreparer(random_name_length=20)
def test_botservice_update_directline(self, resource_group):
self.create_bot(resource_group)
self.cmd('az bot directline create -g {rg} -n {botname}', checks=[
self.check('properties.properties.sites[0].siteName', 'Default Site'),
self.check('properties.properties.sites[0].isEnabled', True),
self.check('properties.properties.sites[0].isSecureSiteEnabled', False)
])
origin_url = 'https://mybotsite1.azurewebsites.net'
self.kwargs.update({'origin_url': origin_url})
self.cmd('az bot directline update -g {rg} -n {botname} --enable-enhanced-auth --trusted-origins {origin_url} --debug', checks=[
self.check('properties.properties.sites[0].trustedOrigins[0]', 'https://mybotsite1.azurewebsites.net'),
self.check('properties.properties.sites[0].isSecureSiteEnabled', True)
])
self.cmd('az bot delete -g {rg} -n {botname}')
@ResourceGroupPreparer(random_name_length=20)
def METHOD_NAME(self, resource_group):
locations = ['global', 'westus', 'westeurope']
for location in locations:
self.create_bot(resource_group, location)
self.cmd('az bot msteams create -g {rg} -n {botname} -l {location}', checks=[
self.check('location', '{location}')
])
self.cmd('az bot directline create -g {rg} -n {botname} -l {location}', checks=[
self.check('location', '{location}')
])
self.cmd('az bot delete -g {rg} -n {botname}') |
4,681 | make drive | # SPDX-FileCopyrightText: Red Hat, Inc.
# SPDX-License-Identifier: GPL-2.0-or-later
from __future__ import absolute_import
from __future__ import division
import logging
import pytest
import vdsm.virt.vmdevices.storage as storage
from vdsm.clientIF import clientIF
from vdsm.common import response
from vdsm.common import exception
from vdsm.virt import vm
from vdsm.virt.vmdevices import hwclass
log = logging.getLogger()
src_drive_conf = {
'device': 'disk',
'iface': 'virtio',
'path': '/path/to/volume',
'type': 'disk',
'index': 0,
'domainID': 'src-domain-id',
'imageID': 'imgID',
'volumeID': 'volumeID',
'diskReplicate': '',
'name': 'vda',
}
dst_drive_conf = {
'device': 'disk',
'iface': 'virtio',
'path': '/path/to/volume',
'type': 'disk',
'index': 0,
'domainID': 'dst-domain-id',
'imageID': 'imgID',
'volumeID': 'volumeID',
'name': 'vda',
}
lease_drive_conf = {
'device': 'disk',
'iface': 'virtio',
'path': '/path/to/volume',
'type': 'disk',
'index': 0,
'domainID': 'src-domain-id',
'imageID': 'imgID',
'volumeID': 'volumeID',
'diskReplicate': '',
'name': 'vda',
'volumeChain': [{
'leasePath': 'path',
'leaseOffset': 'offset',
}],
}
def test_lookup_error():
_vm = FakeVm()
result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)
assert result == response.error('imageErr')
def test_has_volume_leases():
_vm = FakeVm([METHOD_NAME(lease_drive_conf)])
result = _vm.diskReplicateFinish(lease_drive_conf, dst_drive_conf)
assert result == response.error('noimpl')
def test_diskreplicatefinish_transient_disk():
src_drive = METHOD_NAME(src_drive_conf,
storage.DRIVE_SHARED_TYPE.TRANSIENT)
_vm = FakeVm([src_drive])
result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)
assert result == response.error("transientErr")
def test_diskreplicatefinish_replication_not_in_progress():
# Passing dst_drive conf as src as it does not have the diskReplicate
# attribute
_vm = FakeVm([METHOD_NAME(dst_drive_conf)])
src = dst_drive_conf
with pytest.raises(exception.ReplicationNotInProgress):
_vm.diskReplicateFinish(src, dst_drive_conf)
def test_diskreplicatefinish_job_not_found():
src_drive = METHOD_NAME(src_drive_conf)
_vm = FakeVm([src_drive])
# Passing an empty dict so 'cur' and 'end will not be found
_vm._dom = FakeDomain({})
result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)
assert result == response.error("replicaErr")
def test_diskreplicatefinish_job_not_finished():
_vm = FakeVm([METHOD_NAME(src_drive_conf)])
_vm._dom = FakeDomain({'cur': 0, 'end': 1})
result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)
assert result == response.error("unavail")
# if pivot was not called the monitor should not have been disabled
assert not _vm.volume_monitor.was_disabled
def test_blockjobabort_failed(monkeypatch):
def raising_blockjobabort():
raise Exception('blockJobAbort failed')
src_drive = METHOD_NAME(src_drive_conf)
dst_drive = METHOD_NAME(dst_drive_conf)
_vm = FakeVm([src_drive, dst_drive])
_vm._dom = FakeDomain({'cur': 1, 'end': 1})
monkeypatch.setattr(FakeDomain, 'blockJobAbort', raising_blockjobabort)
result = _vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)
assert result == response.error("changeDisk")
def test_replicatefinish_successful():
src_drive = METHOD_NAME(src_drive_conf)
dst_drive = METHOD_NAME(dst_drive_conf)
_vm = FakeVm([src_drive, dst_drive])
_vm._dom = FakeDomain({'cur': 1, 'end': 1})
_vm.diskReplicateFinish(src_drive_conf, dst_drive_conf)
# we should have only one device at the end of the replication
# and its domainID should be the destination's ID
assert len(_vm._devices) == 1
assert (_vm._devices[hwclass.DISK][0]['domainID'] ==
dst_drive_conf['domainID'])
# we need to check whether the monitor was disabled during the
# run of diskReplicateFinish
assert _vm.volume_monitor.was_disabled
def METHOD_NAME(drive_conf, shared_type=storage.DRIVE_SHARED_TYPE.EXCLUSIVE):
drive_conf['shared'] = shared_type
return storage.Drive(log, **drive_conf)
class FakeVm(vm.Vm):
def __init__(self, devices=[]):
self._devices = {hwclass.DISK: devices}
self.id = "testvm"
self.volume_monitor = FakeVolumeMonitor()
self._dom = FakeDomain()
self.cif = FakeClientIF(log)
# We don't always pass the destination drive
if len(devices) > 1:
self.dst_drive = devices[1]
else:
# If dst_drive wasn't passed use the original
self.dst_drive = {'domainID': 'src-domain-id'}
def _delDiskReplica(self, drive):
# as the actual _delDiskReplica does extra stuff like syncing metadata
# and locking we override it here to make it do only what we care about
del drive.diskReplicate
def updateDriveParameters(self, driveParams):
# We only care about the domainID for the tests
for vmDrive in self._devices[hwclass.DISK][:]:
setattr(vmDrive, 'domainID', self.dst_drive['domainID'])
class FakeDomain(object):
def __init__(self, block_job_info={}):
self.block_job_info = block_job_info
def blockJobInfo(self, drive_name, flags=0):
return self.block_job_info
def blockJobAbort(self, drive_name, flags=0):
pass
class FakeClientIF(clientIF):
def __init__(self, log):
self.log = log
class FakeVolumeMonitor(object):
def __init__(self):
self.enabled = False
self.was_disabled = False
def disable(self):
self.was_disabled = True
self.enabled = False
def enable(self):
self.enabled = True |
4,682 | downsample basic block | import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"""conv3x3.
:param in_planes: int, number of channels in the input sequence.
:param out_planes: int, number of channels produced by the convolution.
:param stride: int, size of the convolving kernel.
"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
)
def METHOD_NAME(inplanes, outplanes, stride):
"""downsample_basic_block.
:param inplanes: int, number of channels in the input sequence.
:param outplanes: int, number of channels produced by the convolution.
:param stride: int, size of the convolving kernel.
"""
return nn.Sequential(
nn.Conv2d(
inplanes,
outplanes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(outplanes),
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
relu_type="swish",
):
"""__init__.
:param inplanes: int, number of channels in the input sequence.
:param planes: int, number of channels produced by the convolution.
:param stride: int, size of the convolving kernel.
:param downsample: boolean, if True, the temporal resolution is downsampled.
:param relu_type: str, type of activation function.
"""
super(BasicBlock, self).__init__()
assert relu_type in ["relu", "prelu", "swish"]
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
if relu_type == "relu":
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
elif relu_type == "prelu":
self.relu1 = nn.PReLU(num_parameters=planes)
self.relu2 = nn.PReLU(num_parameters=planes)
elif relu_type == "swish":
self.relu1 = nn.SiLU(inplace=True)
self.relu2 = nn.SiLU(inplace=True)
else:
raise NotImplementedError
# --------
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""forward.
:param x: torch.Tensor, input tensor with input size (B, C, T, H, W).
"""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu2(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
relu_type="swish",
):
super(ResNet, self).__init__()
self.inplanes = 64
self.relu_type = relu_type
self.downsample_block = METHOD_NAME
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
def _make_layer(self, block, planes, blocks, stride=1):
"""_make_layer.
:param block: torch.nn.Module, class of blocks.
:param planes: int, number of channels produced by the convolution.
:param blocks: int, number of layers in a block.
:param stride: int, size of the convolving kernel.
"""
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = self.downsample_block(
inplanes=self.inplanes,
outplanes=planes * block.expansion,
stride=stride,
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
relu_type=self.relu_type,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
relu_type=self.relu_type,
)
)
return nn.Sequential(*layers)
def forward(self, x):
"""forward.
:param x: torch.Tensor, input tensor with input size (B, C, T, H, W).
"""
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
# -- auxiliary functions
def threeD_to_2D_tensor(x):
n_batch, n_channels, s_time, sx, sy = x.shape
x = x.transpose(1, 2)
return x.reshape(n_batch * s_time, n_channels, sx, sy)
class Conv3dResNet(nn.Module):
"""Conv3dResNet module"""
def __init__(self, backbone_type="resnet", relu_type="swish"):
"""__init__.
:param backbone_type: str, the type of a visual front-end.
:param relu_type: str, activation function used in an audio front-end.
"""
super(Conv3dResNet, self).__init__()
self.backbone_type = backbone_type
self.frontend_nout = 64
self.trunk = ResNet(
BasicBlock,
[2, 2, 2, 2],
relu_type=relu_type,
)
# -- frontend3D
if relu_type == "relu":
frontend_relu = nn.ReLU(True)
elif relu_type == "prelu":
frontend_relu = nn.PReLU(self.frontend_nout)
elif relu_type == "swish":
frontend_relu = nn.SiLU(inplace=True)
self.frontend3D = nn.Sequential(
nn.Conv3d(
in_channels=1,
out_channels=self.frontend_nout,
kernel_size=(5, 7, 7),
stride=(1, 2, 2),
padding=(2, 3, 3),
bias=False,
),
nn.BatchNorm3d(self.frontend_nout),
frontend_relu,
nn.MaxPool3d(
kernel_size=(1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
),
)
def forward(self, xs_pad):
"""forward.
:param xs_pad: torch.Tensor, batch of padded input sequences.
"""
# -- include Channel dimension
xs_pad = xs_pad.transpose(2, 1)
B, C, T, H, W = xs_pad.size()
xs_pad = self.frontend3D(xs_pad)
Tnew = xs_pad.shape[2] # outpu should be B x C2 x Tnew x H x W
xs_pad = threeD_to_2D_tensor(xs_pad)
xs_pad = self.trunk(xs_pad)
xs_pad = xs_pad.view(B, Tnew, xs_pad.size(1))
return xs_pad
def video_resnet():
return Conv3dResNet() |
4,683 | generate swagger readme | import argparse
import logging
from pathlib import Path
import os
from jinja2 import Environment, FileSystemLoader
from subprocess import check_call
from typing import Any
import json
_LOGGER = logging.getLogger(__name__)
_TEMPLATE = Path(__file__).resolve().parent / "template"
_TEMPLATE_TESTS = Path(__file__).resolve().parent / "template_tests"
_TEMPLATE_SAMPLES = Path(__file__).resolve().parent / "template_samples"
_TEMPLATE_CI = Path(__file__).resolve().parent / "template_ci"
_CONFIG_FILE = Path(__file__).resolve().parent / "../../swagger_to_sdk_config_dpg.json"
def check_parameters(
output_folder: str,
) -> None:
# check output_folder exists or not. If not, create it.
output = Path(output_folder)
if not os.path.exists(output):
_LOGGER.info(f'{output} does not exist and try to create it')
os.makedirs(output)
_LOGGER.info(f'{output} is created')
def generate_ci(template_path: Path, folder_path: Path, package_name: str) -> None:
ci = Path(folder_path, "ci.yml")
service_name = folder_path.name
safe_name = package_name.replace("-", "")
if not ci.exists():
env = Environment(loader=FileSystemLoader(template_path), keep_trailing_newline=True)
template = env.get_template('ci.yml')
content = template.render(package_name=package_name, service_name=service_name, safe_name=safe_name)
else:
with open(ci, "r") as file_in:
content = file_in.readlines()
for line in content:
if package_name in line:
return
content.append(f' - name: {package_name}\n')
content.append(f' safeName: {safe_name}\n')
with open(ci, "w") as file_out:
file_out.writelines(content)
def generate_test_sample(template_path: Path, target_path: Path, **kwargs: Any) -> None:
if not os.path.exists(target_path):
os.makedirs(target_path)
env = Environment(loader=FileSystemLoader(template_path), keep_trailing_newline=True)
for template_name in env.list_templates():
_LOGGER.info(f"generate file: {template_name}")
template = env.get_template(template_name)
result = template.render(**kwargs)
with open(target_path / template_name, "w") as fd:
fd.write(result)
def METHOD_NAME(work_path: str, env: Environment, **kwargs: Any) -> Path:
_LOGGER.info("Building swagger readme")
# check path exists
swagger_path = Path(work_path) / Path('swagger')
if not os.path.exists(swagger_path):
os.makedirs(swagger_path)
# render file
template = env.get_template('README.md')
input_file = kwargs.pop("input_file", "").split(",")
result = template.render(input_file=input_file, **kwargs)
swagger_readme = swagger_path / Path('README.md')
with open(swagger_readme, 'w') as fd:
fd.write(result)
return swagger_readme
def generate_toml_file(target_path: Path) -> None:
with open(target_path / "sdk_packaging.toml", "w") as file_out:
file_out.write("[packaging]\nauto_update = false\n")
def get_autorest_version() -> str:
with open(_CONFIG_FILE, 'r') as file_in:
config = json.load(file_in)
autorest_use = " ".join(["--use=" + item for item in config["meta"]["autorest_options"]["use"]])
return "--version={} {}".format(config["meta"]["autorest_options"]["version"], autorest_use)
def build_package(**kwargs) -> None:
# prepare template render parameters
output_folder = kwargs.get("output_folder")
package_name = kwargs.get("package_name")
namespace = package_name.replace('-', '.')
kwargs['namespace'] = namespace
kwargs['test_prefix'] = package_name.split('-')[-1]
_LOGGER.info("Build start: %s", package_name)
check_parameters(output_folder)
# generate ci
generate_ci(_TEMPLATE_CI, Path(output_folder).parent, package_name)
# generate swagger readme
env = Environment(loader=FileSystemLoader(_TEMPLATE), keep_trailing_newline=True)
swagger_readme = METHOD_NAME(output_folder, env, **kwargs)
# generate code with autorest and swagger readme
autorest_cmd = f'autorest {swagger_readme} {get_autorest_version()} '
_LOGGER.info(f"generate SDK code with autorest: {autorest_cmd}")
check_call(autorest_cmd, shell=True)
# generate test framework
work_path = Path(output_folder)
generate_test_sample(_TEMPLATE_TESTS, work_path / Path('tests'), **kwargs)
# generate sample framework
generate_test_sample(_TEMPLATE_SAMPLES, work_path / Path('samples'), **kwargs)
# generate .toml file to avoid udpate_pr in CI
generate_toml_file(work_path)
_LOGGER.info("Build complete: %s", package_name)
def validate_params(**kwargs):
if not kwargs.get("security_scope") and not kwargs.get("security_header_name"):
raise Exception('At least one of "security-scope" and "security-header-name" is needed')
def main(**kwargs):
build_package(**kwargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="build package for Azure SDK of data-plane for Python",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--output-folder", "-o",
dest="output_folder",
required=True,
help="absolute path where generated SDK package will be put"
)
parser.add_argument("--debug", dest="debug", action="store_true", help="Verbosity in DEBUG mode")
parser.add_argument(
"--input-file", "-f",
dest="input_file",
required=True,
help="absolute path of swagger input file. For example: `https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/webpubsub/data-plane/WebPubSub/stable/2021-10-01/webpubsub.json`"
" or `D:\\azure-rest-api-specs\\specification\\webpubsub\\data-plane\\WebPubSub\\stable\\2021-10-01\\webpubsub.json`",
)
parser.add_argument(
"--security-scope", "-c",
dest="security_scope",
required=False,
help="If authentication is AADToken, this param is necessary",
)
parser.add_argument(
"--security-header-name",
dest="security_header_name",
required=False,
help="If authentication is api key, this param is necessary",
)
parser.add_argument(
"--package-name", "-p",
dest="package_name",
required=True,
help="package name. For example: azure-messaging-webpubsub",
)
parser.add_argument(
"--package-pprint-name", "-n",
dest="package_pprint_name",
required=True,
help="Print name of the package. For example: Azure Web PubSub Service",
)
parser.add_argument(
"--client-name", "-t",
dest="client_name",
required=True,
help="client name. For example: WebPubSubServiceClient",
)
args = parser.parse_args()
main_logger = logging.getLogger()
logging.basicConfig()
main_logger.setLevel(logging.INFO)
parameters = vars(args)
validate_params(**parameters)
main(**parameters) |
4,684 | test shim translates set command | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for the webcfg command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import json
import os
from unittest import mock
from gslib.commands import web
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import SetEnvironmentForTest
from gslib.utils import shim_util
WEBCFG_FULL = json.loads('{"notFoundPage": "404", "mainPageSuffix": "main"}\n')
WEBCFG_MAIN = json.loads('{"mainPageSuffix": "main"}\n')
WEBCFG_ERROR = json.loads('{"notFoundPage": "404"}\n')
WEBCFG_EMPTY = 'has no website configuration'
@SkipForS3('Web set not supported for S3, web get returns XML.')
class TestWeb(testcase.GsUtilIntegrationTestCase):
"""Integration tests for the web command."""
_set_web_cmd = ['web', 'set']
_get_web_cmd = ['web', 'get']
def test_full(self):
bucket_uri = self.CreateBucket()
self.RunGsUtil(
self._set_web_cmd +
['-m', 'main', '-e', '404', suri(bucket_uri)])
stdout = self.RunGsUtil(self._get_web_cmd + [suri(bucket_uri)],
return_stdout=True)
if self._use_gcloud_storage:
self.assertIn('"mainPageSuffix": "main"', stdout)
self.assertIn('"notFoundPage": "404"', stdout)
else:
self.assertEquals(json.loads(stdout), WEBCFG_FULL)
def test_main(self):
bucket_uri = self.CreateBucket()
self.RunGsUtil(self._set_web_cmd + ['-m', 'main', suri(bucket_uri)])
stdout = self.RunGsUtil(self._get_web_cmd + [suri(bucket_uri)],
return_stdout=True)
if self._use_gcloud_storage:
self.assertEquals('{\n "mainPageSuffix": "main"\n}\n', stdout)
else:
self.assertEquals(json.loads(stdout), WEBCFG_MAIN)
def test_error(self):
bucket_uri = self.CreateBucket()
self.RunGsUtil(self._set_web_cmd + ['-e', '404', suri(bucket_uri)])
stdout = self.RunGsUtil(self._get_web_cmd + [suri(bucket_uri)],
return_stdout=True)
if self._use_gcloud_storage:
self.assertEquals('{\n "notFoundPage": "404"\n}\n', stdout)
else:
self.assertEquals(json.loads(stdout), WEBCFG_ERROR)
def test_empty(self):
bucket_uri = self.CreateBucket()
self.RunGsUtil(self._set_web_cmd + [suri(bucket_uri)])
stdout = self.RunGsUtil(self._get_web_cmd + [suri(bucket_uri)],
return_stdout=True)
if self._use_gcloud_storage:
self.assertEquals('[]\n', stdout)
else:
self.assertIn(WEBCFG_EMPTY, stdout)
def testTooFewArgumentsFails(self):
"""Ensures web commands fail with too few arguments."""
# No arguments for get, but valid subcommand.
stderr = self.RunGsUtil(self._get_web_cmd,
return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# No arguments for set, but valid subcommand.
stderr = self.RunGsUtil(self._set_web_cmd,
return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# Neither arguments nor subcommand.
stderr = self.RunGsUtil(['web'], return_stderr=True, expected_status=1)
self.assertIn('command requires at least', stderr)
class TestWebShim(testcase.GsUtilUnitTestCase):
@mock.patch.object(web.WebCommand, '_GetWeb', new=mock.Mock())
def test_shim_translates_get_command(self):
with SetBotoConfigForTest([('GSUtil', 'use_gcloud_storage', 'True'),
('GSUtil', 'hidden_shim_mode', 'dry_run')]):
with SetEnvironmentForTest({
'CLOUDSDK_CORE_PASS_CREDENTIALS_TO_GSUTIL': 'True',
'CLOUDSDK_ROOT_DIR': 'fake_dir',
}):
mock_log_handler = self.RunCommand('web', [
'get',
'gs://bucket',
],
return_log_handler=True)
info_lines = '\n'.join(mock_log_handler.messages['info'])
self.assertIn(
('Gcloud Storage Command: {} storage buckets describe'
' --format="gsutiljson[key=website_config,empty=\' has no website'
' configuration.\',empty_prefix_key=storage_url]"'
' --raw gs://bucket').format(
shim_util._get_gcloud_binary_path('fake_dir')), info_lines)
@mock.patch.object(web.WebCommand, '_SetWeb', new=mock.Mock())
def METHOD_NAME(self):
with SetBotoConfigForTest([('GSUtil', 'use_gcloud_storage', 'True'),
('GSUtil', 'hidden_shim_mode', 'dry_run')]):
with SetEnvironmentForTest({
'CLOUDSDK_CORE_PASS_CREDENTIALS_TO_GSUTIL': 'True',
'CLOUDSDK_ROOT_DIR': 'fake_dir',
}):
mock_log_handler = self.RunCommand('web', [
'set',
'-e',
'404',
'-m',
'main',
'gs://bucket',
],
return_log_handler=True)
info_lines = '\n'.join(mock_log_handler.messages['info'])
self.assertIn(
('Gcloud Storage Command: {} storage buckets update'
' --web-error-page 404 --web-main-page-suffix main gs://bucket'
).format(shim_util._get_gcloud_binary_path('fake_dir')), info_lines)
@mock.patch.object(web.WebCommand, '_SetWeb', new=mock.Mock())
def test_shim_translates_clear_command(self):
with SetBotoConfigForTest([('GSUtil', 'use_gcloud_storage', 'True'),
('GSUtil', 'hidden_shim_mode', 'dry_run')]):
with SetEnvironmentForTest({
'CLOUDSDK_CORE_PASS_CREDENTIALS_TO_GSUTIL': 'True',
'CLOUDSDK_ROOT_DIR': 'fake_dir',
}):
mock_log_handler = self.RunCommand('web', ['set', 'gs://bucket'],
return_log_handler=True)
info_lines = '\n'.join(mock_log_handler.messages['info'])
self.assertIn(('Gcloud Storage Command: {} storage buckets update'
' --clear-web-error-page --clear-web-main-page-suffix'
' gs://bucket').format(
shim_util._get_gcloud_binary_path('fake_dir')),
info_lines)
class TestWebOldAlias(TestWeb):
_set_web_cmd = ['setwebcfg']
_get_web_cmd = ['getwebcfg'] |
4,685 | jacobian adjoint action | """ Solves a optimal control problem constrained by the Poisson equation:
min_(u, m) \int_\Omega 1/2 || u - d ||^2 + 1/2 || f ||^2
subjecct to
grad \cdot \grad u = f in \Omega
u = 0 on \partial \Omega
"""
from dolfin import *
from dolfin_adjoint import *
try:
import Optizelle
except ImportError:
info_red("Optizelle unavailable, skipping test")
import sys; sys.exit(0)
set_log_level(ERROR)
parameters["adjoint"]["cache_factorizations"] = False
# Create mesh
n = 50
mesh = UnitIntervalMesh(n)
# Define discrete function spaces and funcions
V = FunctionSpace(mesh, "CG", 1)
W = FunctionSpace(mesh, "CG", 1)
f = interpolate(Constant(0.11), W, name='Control')
u = Function(V, name='State')
v = TestFunction(V)
# Define and solve the Poisson equation to generate the dolfin-adjoint annotation
F = (inner(grad(u), grad(v)) - f*v)*dx
bc = DirichletBC(V, 0.0, "on_boundary")
solve(F == 0, u, bc)
# Define functional of interest and the reduced functional
x = SpatialCoordinate(mesh)
d = 1/(2*pi**2)*sin(pi*x[0])
alpha = Constant(1e-10)
J = Functional((0.5*inner(u-d, u-d))*dx + alpha/2*f**2*dx)
control = Control(f)
rf = ReducedFunctional(J, control)
# Volume constraints
class VolumeConstraint(InequalityConstraint):
"""A class that enforces the volume constraint g(a) = V - a*dx >= 0."""
def __init__(self, Vol):
self.Vol = float(Vol)
# The derivative of the constraint g(x) is constant (it is the diagonal of the lumped mass matrix for the control function space), so let's assemble it here once.
# This is also useful in rapidly calculating the integral each time without re-assembling.
self.smass = assemble(TestFunction(W) * Constant(1) * dx)
self.tmpvec = Function(W, name="Control")
def function(self, m):
self.tmpvec.assign(m)
# Compute the integral of the control over the domain
integral = self.smass.inner(self.tmpvec.vector())
vecmax = m.vector().max()
vecmin = m.vector().min()
if MPI.rank(MPI.comm_world) == 0:
print("Current control integral: ", integral)
print("Maximum of control: ", vecmax)
print("Minimum of control: ", vecmin)
return [self.Vol - integral]
def jacobian_action(self, m, dm, result):
result[:] = self.smass.inner(-dm.vector())
#print "Returning Volume Jacobian action in direction %s is %s" % (dm.vector().array(), result)
def METHOD_NAME(self, m, dp, result):
result.vector()[:] = interpolate(Constant(-dp[0]), W).vector()
def hessian_action(self, m, dm, dp, result):
result.vector().zero()
def output_workspace(self):
return [0.0]
def length(self):
return 1
Vol = 0.2
Vconst = VolumeConstraint(Vol)
ub = Function(W)
ub.vector()[:] = 0.4
lb = 0.1
problem = MinimizationProblem(rf, bounds=[(lb, ub)], constraints=Vconst)
parameters = {
"maximum_iterations": 10,
"optizelle_parameters":
{
"msg_level" : 10,
"algorithm_class" : Optizelle.AlgorithmClass.TrustRegion,
"H_type" : Optizelle.Operators.UserDefined,
"dir" : Optizelle.LineSearchDirection.BFGS,
#"ipm": Optizelle.InteriorPointMethod.PrimalDualLinked,
"linesearch_iter_max" : 50,
"krylov_iter_max" : 10,
"eps_krylov" : 1e-4,
"eps_dx" : 1e-10,
"sigma": 0.001,
"gamma": 0.995,
}
}
solver = OptizelleSolver(problem, parameters=parameters)
f_opt = solver.solve()
cmax = f_opt.vector().max()
cmin = f_opt.vector().min()
print("f min: {} (should be more than {})".format(cmin, lb))
print("f max: {} (should be less than {})".format(cmax, ub.vector().max()))
# Check that the bounds are satisfied
assert cmin >= lb
assert cmax <= ub.vector().max()
assert abs(assemble(f_opt*dx) - Vol) < 1e-3
# Check that the functional value is below the threshold
assert rf(f_opt) < 2e-4 |
4,686 | log distribution | # Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Partitioned versions of CIFAR-10/100 datasets."""
# pylint: disable=invalid-name
from typing import List, Tuple, cast
import numpy as np
XY = Tuple[np.ndarray, np.ndarray]
XYList = List[XY]
PartitionedDataset = Tuple[XYList, XYList]
np.random.seed(2020)
def float_to_int(i: float) -> int:
"""Return float as int but raise if decimal is dropped."""
if not i.is_integer():
raise Exception("Cast would drop decimals")
return int(i)
def sort_by_label(x: np.ndarray, y: np.ndarray) -> XY:
"""Sort by label.
Assuming two labels and four examples the resulting label order
would be 1,1,2,2
"""
idx = np.argsort(y, axis=0).reshape((y.shape[0]))
return (x[idx], y[idx])
def sort_by_label_repeating(x: np.ndarray, y: np.ndarray) -> XY:
"""Sort by label in repeating groups. Assuming two labels and four examples
the resulting label order would be 1,2,1,2.
Create sorting index which is applied to by label sorted x, y
.. code-block:: python
# given:
y = [
0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9
]
# use:
idx = [
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19
]
# so that y[idx] becomes:
y = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
]
"""
x, y = sort_by_label(x, y)
num_example = x.shape[0]
num_class = np.unique(y).shape[0]
idx = (
np.array(range(num_example), np.int64)
.reshape((num_class, num_example // num_class))
.transpose()
.reshape(num_example)
)
return (x[idx], y[idx])
def split_at_fraction(x: np.ndarray, y: np.ndarray, fraction: float) -> Tuple[XY, XY]:
"""Split x, y at a certain fraction."""
splitting_index = float_to_int(x.shape[0] * fraction)
# Take everything BEFORE splitting_index
x_0, y_0 = x[:splitting_index], y[:splitting_index]
# Take everything AFTER splitting_index
x_1, y_1 = x[splitting_index:], y[splitting_index:]
return (x_0, y_0), (x_1, y_1)
def shuffle(x: np.ndarray, y: np.ndarray) -> XY:
"""Shuffle x and y."""
idx = np.random.permutation(len(x))
return x[idx], y[idx]
def partition(x: np.ndarray, y: np.ndarray, num_partitions: int) -> List[XY]:
"""Return x, y as list of partitions."""
return list(zip(np.split(x, num_partitions), np.split(y, num_partitions)))
def combine_partitions(xy_list_0: XYList, xy_list_1: XYList) -> XYList:
"""Combine two lists of ndarray Tuples into one list."""
return [
(np.concatenate([x_0, x_1], axis=0), np.concatenate([y_0, y_1], axis=0))
for (x_0, y_0), (x_1, y_1) in zip(xy_list_0, xy_list_1)
]
def shift(x: np.ndarray, y: np.ndarray) -> XY:
"""Shift x_1, y_1 so that the first half contains only labels 0 to 4 and
the second half 5 to 9."""
x, y = sort_by_label(x, y)
(x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction=0.5)
(x_0, y_0), (x_1, y_1) = shuffle(x_0, y_0), shuffle(x_1, y_1)
x, y = np.concatenate([x_0, x_1], axis=0), np.concatenate([y_0, y_1], axis=0)
return x, y
def create_partitions(
unpartitioned_dataset: XY,
iid_fraction: float,
num_partitions: int,
) -> XYList:
"""Create partitioned version of a training or test set.
Currently tested and supported are MNIST, FashionMNIST and
CIFAR-10/100
"""
x, y = unpartitioned_dataset
x, y = shuffle(x, y)
x, y = sort_by_label_repeating(x, y)
(x_0, y_0), (x_1, y_1) = split_at_fraction(x, y, fraction=iid_fraction)
# Shift in second split of dataset the classes into two groups
x_1, y_1 = shift(x_1, y_1)
xy_0_partitions = partition(x_0, y_0, num_partitions)
xy_1_partitions = partition(x_1, y_1, num_partitions)
xy_partitions = combine_partitions(xy_0_partitions, xy_1_partitions)
# Adjust x and y shape
return [adjust_xy_shape(xy) for xy in xy_partitions]
def create_partitioned_dataset(
keras_dataset: Tuple[XY, XY],
iid_fraction: float,
num_partitions: int,
) -> Tuple[PartitionedDataset, XY]:
"""Create partitioned version of keras dataset.
Currently tested and supported are MNIST, FashionMNIST and
CIFAR-10/100
"""
xy_train, xy_test = keras_dataset
xy_train_partitions = create_partitions(
unpartitioned_dataset=xy_train,
iid_fraction=iid_fraction,
num_partitions=num_partitions,
)
xy_test_partitions = create_partitions(
unpartitioned_dataset=xy_test,
iid_fraction=iid_fraction,
num_partitions=num_partitions,
)
return (xy_train_partitions, xy_test_partitions), adjust_xy_shape(xy_test)
def METHOD_NAME(xy_partitions: XYList) -> None:
"""Print label distribution for list of paritions."""
distro = [np.unique(y, return_counts=True) for _, y in xy_partitions]
for d in distro:
print(d)
def adjust_xy_shape(xy: XY) -> XY:
"""Adjust shape of both x and y."""
x, y = xy
if x.ndim == 3:
x = adjust_x_shape(x)
if y.ndim == 2:
y = adjust_y_shape(y)
return (x, y)
def adjust_x_shape(nda: np.ndarray) -> np.ndarray:
"""Turn shape (x, y, z) into (x, y, z, 1)."""
nda_adjusted = np.reshape(nda, (nda.shape[0], nda.shape[1], nda.shape[2], 1))
return cast(np.ndarray, nda_adjusted)
def adjust_y_shape(nda: np.ndarray) -> np.ndarray:
"""Turn shape (x, 1) into (x)."""
nda_adjusted = np.reshape(nda, (nda.shape[0]))
return cast(np.ndarray, nda_adjusted) |
4,687 | rcodesign notary wait | #!/usr/bin/env python
"""Functions that interface with rcodesign"""
import asyncio
import logging
import re
from signingscript.exceptions import SigningScriptError
log = logging.getLogger(__name__)
class RCodesignError(SigningScriptError):
pass
async def _execute_command(command):
"""Executes a command, logging output, and return exitcode and output lines
Args:
command (str): The command to be run
Returns:
(Tuple) exit code, log lines
"""
log.info("Running command: {}".format(" ".join(command)))
proc = await asyncio.create_subprocess_exec(
*command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
log.info("COMMAND OUTPUT: ")
output_lines = []
while True:
# If at EOF, stop
if proc.stdout.at_eof() and proc.stderr.at_eof():
break
# Handle stdout
stdout = (await proc.stdout.readline()).decode("utf-8").rstrip()
if stdout:
log.info(stdout)
output_lines.append(stdout)
# Handle stderr
stderr = (await proc.stderr.readline()).decode("utf-8").rstrip()
if stderr:
# Unfortunately a lot of outputs from rcodesign come out to stderr
log.warn(stderr)
output_lines.append(stderr)
exitcode = await proc.wait()
log.info("exitcode {}".format(exitcode))
return exitcode, output_lines
def find_submission_id(logs):
"""Given notarization logs, find and return the submission id
Args:
logs (list<str>): Notarization logs
Returns:
(str) The submission id
"""
ids = set()
for line in logs:
if "created submission ID: " in line:
ids.add(line.split(": ")[-1])
if len(ids) > 1:
log.error(f"Submission ids: {str(ids)}")
raise RCodesignError("More than one submission id found in the logs")
return ids.pop()
async def rcodesign_notarize(app_path, creds_path, staple=False):
"""Call rcodesign notary-submit
Args:
app_path (str): Path to notarize
creds_path (str): Path to credentials file
staple (boolean): If rcodesign should staple (wait and staple in one go)
Returns:
(str) The submission id
"""
command = ["rcodesign", "notary-submit"]
if staple:
command.append("--staple")
command.extend(["--api-key-path", creds_path, app_path])
exitcode, logs = await _execute_command(command)
if exitcode > 0:
raise RCodesignError(f"Error notarizing app. Exit code {exitcode}")
return find_submission_id(logs)
async def METHOD_NAME(submission_id, creds_path):
"""Polls Apple services for notarization status
Args:
submission_id (str): Notary submission id
creds_path (str): Path to credentials
"""
command = [
"rcodesign",
"notary-wait",
"--api-key-path",
creds_path,
submission_id,
]
log.info(f"Polling Apple Notary service for notarization status. Submission ID {submission_id}")
exitcode, logs = await _execute_command(command)
if exitcode > 0:
raise RCodesignError(f"Error polling notary service. Exit code {exitcode}")
await rcodesign_check_result(logs)
return
async def rcodesign_check_result(logs):
"""Checks notarization results
rcodesign cli call can exit with 0 even though the notarization has failed
Args:
logs (list<str>): output from polling result
"""
re_inprogress = re.compile("^poll state after.*InProgress$")
re_accepted = re.compile("^poll state after.*Accepted$")
re_invalid = re.compile("^poll state after.*Invalid$")
for line in logs:
if re_inprogress.search(line):
continue
if re_accepted.search(line):
# Notarization Accepted - should be good
return
if re_invalid.search(line):
raise RCodesignError("Notarization failed!")
return
async def rcodesign_staple(path):
"""Staples a given app
Args:
path (str): Path to be stapled
Returns:
(Tuple) exit code, log lines
"""
command = [
"rcodesign",
"staple",
path,
]
log.info(f"Stapling binary at path {path}")
exitcode, _ = await _execute_command(command)
if exitcode > 0:
raise RCodesignError(f"Error stapling notarization. Exit code {exitcode}")
return |
4,688 | get wireguard file | # Copyright 2017 the Isard-vdi project authors:
# Josep Maria Viñolas Auquer
# Alberto Larraz Dalmases
# License: AGPLv3
#!/usr/bin/env python
# coding=utf-8
import os
import traceback
from pydoc import describe
from rethinkdb import RethinkDB
from api import app
from .._common.api_exceptions import Error
from ..libv2.log import *
r = RethinkDB()
from ..libv2.flask_rethink import RDB
db = RDB(app)
db.init_app(app)
class isardVpn:
def __init__(self):
pass
def vpn_data(self, vpn, kind, op_sys, itemid=False):
if vpn == "users":
if itemid == False:
raise Error(
"bad_request",
"Vpn missing itemid",
traceback.format_exc(),
description_code="vpn_missing_itemid",
)
with app.app_context():
wgdata = r.table("users").get(itemid).pluck("id", "vpn").run(db.conn)
port = os.environ.get("WG_USERS_PORT", "443")
mtu = "1420"
# Wireguard Windows client doesn't support PostUp empty value
# colon command does nothing on Windows and GNU/Linux
postup = ":"
endpoint = os.environ["DOMAIN"]
elif vpn == "hypers":
with app.app_context():
hyper = (
r.table("hypervisors")
.get(itemid)
.pluck("id", "isard_hyper_vpn_host", "vpn")
.run(db.conn)
)
wgdata = hyper
port = "4443"
mtu = os.environ.get("VPN_MTU", "1600")
postup = "iptables -A FORWARD -p tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu"
endpoint = hyper.get("isard_hyper_vpn_host", "isard-vpn")
elif vpn == "remotevpn":
if not itemid:
raise Error(
"bad_request",
"Vpn missing itemid",
traceback.format_exc(),
description_code="vpn_missing_itemid",
)
with app.app_context():
wgdata = (
r.table("remotevpn").get(itemid).pluck("id", "vpn").run(db.conn)
)
port = "443"
mtu = os.environ.get("VPN_MTU", "1600")
# Windows client doesn't support PostUp empty value
# colon command does nothing on Windows and GNU/Linux
postup = ":"
endpoint = os.environ["DOMAIN"]
else:
raise Error(
"not_found",
"Vpn kind does not exist",
traceback.format_exc(),
description_code="vpn_kind_not_found",
)
if wgdata == None or "vpn" not in wgdata.keys():
raise Error(
"not_found",
"Vpn data not found for user",
traceback.format_exc(),
description_code="vpn_data_not_found",
)
## First up time the wireguard config keys are missing till isard-vpn populates it.
if not getattr(app, "wireguard_server_keys", False):
if vpn == "hypers":
vpn_kind_keys = "vpn_hypers"
else:
vpn_kind_keys = "vpn_users"
sysconfig = r.db("isard").table("config").get(1).run(db.conn)
wireguard_server_keys = (
sysconfig.get(vpn_kind_keys, {}).get("wireguard", {}).get("keys", False)
)
if not wireguard_server_keys:
raise Error(
"precondition_required",
"There are no wireguard keys in webapp config yet. Try again in a few seconds...",
traceback.format_exc(),
description_code="no_wireguard_keys",
)
wireguard_data = [endpoint, wgdata, port, mtu, postup, wireguard_server_keys]
if kind == "config":
return {
"kind": "file",
"name": "isard-vpn",
"ext": "conf",
"mime": "text/plain",
"content": self.METHOD_NAME(*wireguard_data),
}
elif kind == "install":
ext = "sh" if op_sys == "Linux" else "vb"
return {
"kind": "file",
"name": "isard-vpn-setup",
"ext": ext,
"mime": "text/plain",
"content": self.get_wireguard_install_script(wireguard_data),
}
raise Error(
"internal_server",
"Unable to process vpn file",
traceback.format_exc(),
description_code="unable_to_process_vpnfile",
)
def METHOD_NAME(
self, endpoint, peer, port, mtu, postup, wireguard_server_keys
):
return """[Interface]
Address = %s
PrivateKey = %s
MTU = %s
PostUp = %s
[Peer]
PublicKey = %s
Endpoint = %s:%s
AllowedIPs = %s
PersistentKeepalive = 25
""" % (
peer["vpn"]["wireguard"]["Address"],
peer["vpn"]["wireguard"]["keys"]["private"],
mtu,
postup,
wireguard_server_keys["public"],
endpoint,
port,
peer["vpn"]["wireguard"]["AllowedIPs"],
)
def get_wireguard_install_script(self, wireguard_data):
wireguard_file_contents = self.METHOD_NAME(*wireguard_data)
return f"""#!/bin/bash
echo "Installing wireguard. Ubuntu/Debian script."
apt install -y wireguard git dh-autoreconf libglib2.0-dev intltool build-essential libgtk-3-dev libnma-dev libsecret-1-dev network-manager-dev resolvconf
git clone https://github.com/max-moser/network-manager-wireguard
cd network-manager-wireguard
./autogen.sh --without-libnm-glib
./configure --without-libnm-glib --prefix=/usr --sysconfdir=/etc --libdir=/usr/lib/x86_64-linux-gnu --libexecdir=/usr/lib/NetworkManager --localstatedir=/var
make
sudo make install
cd ..
echo "{wireguard_file_contents}" > isard-vpn.conf
echo "You have your user vpn configuration to use it with NetworkManager: isard-vpn.conf""" |
4,689 | get first or none | """
A simple XPath-like language for tree traversal.
This works by creating a filter chain of generator functions. Each
function selects a part of the expression, e.g. a child node, a
specific descendant or a node that holds an attribute.
"""
from __future__ import absolute_import
import re
import operator
import sys
if sys.version_info[0] >= 3:
_unicode = str
else:
_unicode = unicode
path_tokenizer = re.compile(
r"("
r"'[^']*'|\"[^\"]*\"|"
r"//?|"
r"\(\)|"
r"==?|"
r"[/.*\[\]()@])|"
r"([^/\[\]()@=\s]+)|"
r"\s+"
).findall
def iterchildren(node, attr_name):
# returns an iterable of all child nodes of that name
child = getattr(node, attr_name)
if child is not None:
if type(child) is list:
return child
else:
return [child]
else:
return ()
def METHOD_NAME(it):
try:
try:
_next = it.next
except AttributeError:
return next(it)
else:
return _next()
except StopIteration:
return None
def type_name(node):
return node.__class__.__name__.split('.')[-1]
def parse_func(next, token):
name = token[1]
token = next()
if token[0] != '(':
raise ValueError("Expected '(' after function name '%s'" % name)
predicate = handle_predicate(next, token)
return name, predicate
def handle_func_not(next, token):
"""
not(...)
"""
name, predicate = parse_func(next, token)
def select(result):
for node in result:
if METHOD_NAME(predicate([node])) is None:
yield node
return select
def handle_name(next, token):
"""
/NodeName/
or
func(...)
"""
name = token[1]
if name in functions:
return functions[name](next, token)
def select(result):
for node in result:
for attr_name in node.child_attrs:
for child in iterchildren(node, attr_name):
if type_name(child) == name:
yield child
return select
def handle_star(next, token):
"""
/*/
"""
def select(result):
for node in result:
for name in node.child_attrs:
for child in iterchildren(node, name):
yield child
return select
def handle_dot(next, token):
"""
/./
"""
def select(result):
return result
return select
def handle_descendants(next, token):
"""
//...
"""
token = next()
if token[0] == "*":
def iter_recursive(node):
for name in node.child_attrs:
for child in iterchildren(node, name):
yield child
for c in iter_recursive(child):
yield c
elif not token[0]:
node_name = token[1]
def iter_recursive(node):
for name in node.child_attrs:
for child in iterchildren(node, name):
if type_name(child) == node_name:
yield child
for c in iter_recursive(child):
yield c
else:
raise ValueError("Expected node name after '//'")
def select(result):
for node in result:
for child in iter_recursive(node):
yield child
return select
def handle_attribute(next, token):
token = next()
if token[0]:
raise ValueError("Expected attribute name")
name = token[1]
value = None
try:
token = next()
except StopIteration:
pass
else:
if token[0] == '=':
value = parse_path_value(next)
readattr = operator.attrgetter(name)
if value is None:
def select(result):
for node in result:
try:
attr_value = readattr(node)
except AttributeError:
continue
if attr_value is not None:
yield attr_value
else:
def select(result):
for node in result:
try:
attr_value = readattr(node)
except AttributeError:
continue
if attr_value == value:
yield attr_value
elif (isinstance(attr_value, bytes) and isinstance(value, _unicode) and
attr_value == value.encode()):
# allow a bytes-to-string comparison too
yield attr_value
return select
def parse_path_value(next):
token = next()
value = token[0]
if value:
if value[:1] == "'" or value[:1] == '"':
return value[1:-1]
try:
return int(value)
except ValueError:
pass
elif token[1].isdigit():
return int(token[1])
else:
name = token[1].lower()
if name == 'true':
return True
elif name == 'false':
return False
raise ValueError("Invalid attribute predicate: '%s'" % value)
def handle_predicate(next, token):
token = next()
selector = []
while token[0] != ']':
selector.append( operations[token[0]](next, token) )
try:
token = next()
except StopIteration:
break
else:
if token[0] == "/":
token = next()
if not token[0] and token[1] == 'and':
return logical_and(selector, handle_predicate(next, token))
def select(result):
for node in result:
subresult = iter((node,))
for select in selector:
subresult = select(subresult)
predicate_result = METHOD_NAME(subresult)
if predicate_result is not None:
yield node
return select
def logical_and(lhs_selects, rhs_select):
def select(result):
for node in result:
subresult = iter((node,))
for select in lhs_selects:
subresult = select(subresult)
predicate_result = METHOD_NAME(subresult)
subresult = iter((node,))
if predicate_result is not None:
for result_node in rhs_select(subresult):
yield node
return select
operations = {
"@": handle_attribute,
"": handle_name,
"*": handle_star,
".": handle_dot,
"//": handle_descendants,
"[": handle_predicate,
}
functions = {
'not' : handle_func_not
}
def _build_path_iterator(path):
# parse pattern
stream = iter([ (special,text)
for (special,text) in path_tokenizer(path)
if special or text ])
try:
_next = stream.next
except AttributeError:
# Python 3
def _next():
return next(stream)
token = _next()
selector = []
while 1:
try:
selector.append(operations[token[0]](_next, token))
except StopIteration:
raise ValueError("invalid path")
try:
token = _next()
if token[0] == "/":
token = _next()
except StopIteration:
break
return selector
# main module API
def iterfind(node, path):
selector_chain = _build_path_iterator(path)
result = iter((node,))
for select in selector_chain:
result = select(result)
return result
def find_first(node, path):
return METHOD_NAME(iterfind(node, path))
def find_all(node, path):
return list(iterfind(node, path)) |
4,690 | set inf depex content item list | ## @file
# This file is used to define class objects of INF file [Depex] section.
# It will consumed by InfParser.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
'''
InfDepexObject
'''
from Library import DataType as DT
from Library import GlobalData
import Logger.Log as Logger
from Logger import ToolError
from Logger import StringTable as ST
from Object.Parser.InfCommonObject import InfSectionCommonDef
from Library.ParserValidate import IsValidArch
class InfDepexContentItem():
def __init__(self):
self.SectionType = ''
self.SectionString = ''
def SetSectionType(self, SectionType):
self.SectionType = SectionType
def GetSectionType(self):
return self.SectionType
def SetSectionString(self, SectionString):
self.SectionString = SectionString
def GetSectionString(self):
return self.SectionString
class InfDepexItem():
def __init__(self):
self.DepexContent = ''
self.ModuleType = ''
self.SupArch = ''
self.HelpString = ''
self.FeatureFlagExp = ''
self.InfDepexContentItemList = []
def SetFeatureFlagExp(self, FeatureFlagExp):
self.FeatureFlagExp = FeatureFlagExp
def GetFeatureFlagExp(self):
return self.FeatureFlagExp
def SetSupArch(self, Arch):
self.SupArch = Arch
def GetSupArch(self):
return self.SupArch
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
def SetModuleType(self, Type):
self.ModuleType = Type
def GetModuleType(self):
return self.ModuleType
def SetDepexConent(self, Content):
self.DepexContent = Content
def GetDepexContent(self):
return self.DepexContent
def METHOD_NAME(self, InfDepexContentItemList):
self.InfDepexContentItemList = InfDepexContentItemList
def GetInfDepexContentItemList(self):
return self.InfDepexContentItemList
## InfDepexObject
#
#
#
class InfDepexObject(InfSectionCommonDef):
def __init__(self):
self.Depex = []
self.AllContent = ''
self.SectionContent = ''
InfSectionCommonDef.__init__(self)
def SetDepex(self, DepexContent, KeyList=None, CommentList=None):
for KeyItem in KeyList:
Arch = KeyItem[0]
ModuleType = KeyItem[1]
InfDepexItemIns = InfDepexItem()
#
# Validate Arch
#
if IsValidArch(Arch.strip().upper()):
InfDepexItemIns.SetSupArch(Arch)
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_NAME_INVALID % (Arch),
File=GlobalData.gINF_MODULE_NAME,
Line=KeyItem[2])
#
# Validate Module Type
#
if ModuleType and ModuleType != 'COMMON':
if ModuleType in DT.VALID_DEPEX_MODULE_TYPE_LIST:
InfDepexItemIns.SetModuleType(ModuleType)
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_DEPEX_SECTION_MODULE_TYPE_ERROR % (ModuleType),
File=GlobalData.gINF_MODULE_NAME,
Line=KeyItem[2])
#
# Parser content in [Depex] section.
#
DepexString = ''
HelpString = ''
#
# Get Depex Expression
#
for Line in DepexContent:
LineContent = Line[0].strip()
if LineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
LineContent = LineContent[:LineContent.find(DT.TAB_COMMENT_SPLIT)]
if LineContent:
DepexString = DepexString + LineContent + DT.END_OF_LINE
continue
if DepexString.endswith(DT.END_OF_LINE):
DepexString = DepexString[:-1]
if not DepexString.strip():
continue
#
# Get Help Text
#
for HelpLine in CommentList:
HelpString = HelpString + HelpLine + DT.END_OF_LINE
if HelpString.endswith(DT.END_OF_LINE):
HelpString = HelpString[:-1]
InfDepexItemIns.SetDepexConent(DepexString)
InfDepexItemIns.SetHelpString(HelpString)
self.Depex.append(InfDepexItemIns)
return True
def GetDepex(self):
return self.Depex
def GetAllContent(self):
return self.AllContent |
4,691 | get | from __future__ import annotations
from rest_framework import serializers
from rest_framework.request import Request
from rest_framework.response import Response
from sentry import features
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import region_silo_endpoint
from sentry.api.bases.project import ProjectEndpoint
from sentry.integrations.base import IntegrationInstallation
from sentry.integrations.mixins import RepositoryMixin
from sentry.models import Project, RepositoryProjectPathConfig
from sentry.services.hybrid_cloud.integration import integration_service
from sentry.shared_integrations.exceptions import ApiError
from sentry.utils.sdk import set_measurement
from .project_stacktrace_link import get_code_mapping_configs
MAX_CODE_MAPPINGS_USED = 3
class StacktraceLinksSerializer(serializers.Serializer):
file = serializers.ListField(child=serializers.CharField())
# falls back to the default branch
ref = serializers.CharField(required=False)
@region_silo_endpoint
class ProjectStacktraceLinksEndpoint(ProjectEndpoint):
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
}
"""
Returns valid links for source code providers so that
users can go from files in the stack trace to the
provider of their choice.
Similar to `ProjectStacktraceLinkEndpoint` but allows
for bulk resolution.
`file`: The file paths from the stack trace
`ref` (optional): The commit_id for the last commit of the
release associated to the stack trace's event
"""
def METHOD_NAME(self, request: Request, project: Project) -> Response:
if not features.has(
"organizations:profiling-stacktrace-links", project.organization, actor=request.user
):
return Response(status=404)
serializer = StacktraceLinksSerializer(data=request.GET)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
data = serializer.validated_data
result = {"files": [{"file": file} for file in data["file"]]}
mappings_used = 0
mappings_attempted = 0
configs = get_code_mapping_configs(project)
default_error = "stack_root_mismatch" if configs else "no_code_mappings"
for config in configs:
# find all the files that match the current code mapping's stack_root
# and have not already been resolved by another code mapping
#
# if the's an error from a previous code mapping attempted, but this
# current code mapping can be used, we should try again
files = [
file
for file in result["files"]
if file.METHOD_NAME("sourceUrl") is None and file["file"].startswith(config.stack_root)
]
if not files:
continue
mappings_attempted += 1
# safety to limit the maximum number of mappings used
# to avoid reaching API rate limits
if mappings_used >= MAX_CODE_MAPPINGS_USED:
for file in files:
if not file.METHOD_NAME("error") and file.METHOD_NAME("sourceUrl") is None:
file["error"] = "max_code_mappings_applied"
continue
mappings_used += 1
install = get_installation(config)
# should always be overwritten
error: str | None = "file_not_checked"
# since the same code mapping stack root matches all these files, we only check the
# first file and we will assume the other matching files will resolve the same way
ref = data.METHOD_NAME("ref")
if ref:
error = check_file(install, config, files[0]["file"], ref)
if not ref or error:
ref = config.default_branch
error = check_file(install, config, files[0]["file"], ref)
for file in files:
formatted_path = file["file"].replace(config.stack_root, config.source_root, 1)
url = install.format_source_url(config.repository, formatted_path, ref)
if error:
file["error"] = error
file["attemptedUrl"] = url
else:
file["sourceUrl"] = url
# there may be an error from an previous code mapping, clear it
if "error" in file:
del file["error"]
if "attemptedUrl" in file:
del file["attemptedUrl"]
# number of available code mappings
set_measurement("mappings.found", len(configs))
# number of code mappings that matched a stack root
set_measurement("mappings.attempted", mappings_attempted)
# number of code mappings that was attempted
set_measurement("mappings.used", mappings_used)
for file in result["files"]:
if not file.METHOD_NAME("error") and file.METHOD_NAME("sourceUrl") is None:
file["error"] = default_error
return Response(result, status=200)
def get_installation(config: RepositoryProjectPathConfig) -> IntegrationInstallation:
integration = integration_service.get_integration(
organization_integration_id=config.organization_integration_id
)
return integration.get_installation(organization_id=config.project.organization_id)
def check_file(
install: IntegrationInstallation,
config: RepositoryProjectPathConfig,
filepath: str,
ref: str,
) -> str | None:
"""
Checks to see if the given filepath exists using the given code mapping + ref.
Returns a string indicating the error if it doesn't exist, and `None` otherwise.
"""
formatted_path = filepath.replace(config.stack_root, config.source_root, 1)
link = None
try:
if isinstance(install, RepositoryMixin):
# the logic to fall back to the default branch is handled from the caller
link = install.get_stacktrace_link(config.repository, formatted_path, ref, "")
except ApiError as e:
if e.code != 403:
raise
return "integration_link_forbidden"
if not link:
return "file_not_found"
return None |
4,692 | read keys from env | import getpass
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import web3
from eth_account.account import Account # type: ignore
from eth_typing.evm import ChecksumAddress
from hexbytes.main import HexBytes
from web3 import Web3
from web3.contract import Contract, ContractFunction
from web3.providers.ipc import IPCProvider
from web3.providers.rpc import HTTPProvider
from web3.types import ABI, Nonce, TxParams, TxReceipt, Wei
class ContractConstructor:
def __init__(self, *args: Any):
self.args = args
def build_transaction(
web3: Web3,
builder: Union[ContractFunction, Any],
sender: ChecksumAddress,
) -> Union[TxParams, Any]:
"""
Builds transaction json with the given arguments. It is not submitting transaction
Arguments:
- web3: Web3 client
- builder: ContractFunction or other class that has method buildTransaction(TxParams)
- sender: `from` value of transaction, address which is sending this transaction
- maxFeePerGas: Optional, max priority fee for dynamic fee transactions in Wei
- maxPriorityFeePerGas: Optional the part of the fee that goes to the miner
"""
transaction = builder.buildTransaction(
{
"from": sender,
"nonce": get_nonce(web3, sender),
}
)
return transaction
def get_nonce(web3: Web3, address: ChecksumAddress) -> Nonce:
"""
Returns Nonce: number of transactions for given address
"""
nonce = web3.eth.get_transaction_count(address)
return nonce
def submit_transaction(
web3: Web3, transaction: Union[TxParams, Any], signer_private_key: str
) -> HexBytes:
"""
Signs and submits json transaction to blockchain from the name of signer
"""
signed_transaction = web3.eth.account.sign_transaction(
transaction, private_key=signer_private_key
)
return submit_signed_raw_transaction(web3, signed_transaction.rawTransaction)
def submit_signed_raw_transaction(
web3: Web3, signed_raw_transaction: HexBytes
) -> HexBytes:
"""
Submits already signed raw transaction.
"""
transaction_hash = web3.eth.send_raw_transaction(signed_raw_transaction)
return transaction_hash
def wait_for_transaction_receipt(web3: Web3, transaction_hash: HexBytes):
return web3.eth.wait_for_transaction_receipt(transaction_hash)
def deploy_contract(
web3: Web3,
contract_bytecode: str,
contract_abi: List[Dict[str, Any]],
deployer: ChecksumAddress,
deployer_private_key: str,
constructor_arguments: Optional[List[Any]] = None,
) -> Tuple[HexBytes, ChecksumAddress]:
"""
Deploys smart contract to blockchain
Arguments:
- web3: web3 client
- contract_bytecode: Compiled smart contract bytecode
- contract_abi: Json abi of contract. Must include `constructor` function
- deployer: Address which is deploying contract. Deployer will pay transaction fee
- deployer_private_key: Private key of deployer. Needed for signing and submitting transaction
- constructor_arguments: arguments that are passed to `constructor` function of the smart contract
"""
contract = web3.eth.contract(abi=contract_abi, bytecode=contract_bytecode)
if constructor_arguments is None:
transaction = build_transaction(web3, contract.constructor(), deployer)
else:
transaction = build_transaction(
web3, contract.constructor(*constructor_arguments), deployer
)
transaction_hash = submit_transaction(web3, transaction, deployer_private_key)
transaction_receipt = wait_for_transaction_receipt(web3, transaction_hash)
contract_address = transaction_receipt.contractAddress
return transaction_hash, web3.toChecksumAddress(contract_address)
def deploy_contract_from_constructor_function(
web3: Web3,
contract_bytecode: str,
contract_abi: List[Dict[str, Any]],
deployer: ChecksumAddress,
deployer_private_key: str,
constructor: ContractConstructor,
) -> Tuple[HexBytes, ChecksumAddress]:
"""
Deploys smart contract to blockchain from constructor ContractFunction
Arguments:
- web3: web3 client
- contract_bytecode: Compiled smart contract bytecode
- contract_abi: Json abi of contract. Must include `constructor` function
- deployer: Address which is deploying contract. Deployer will pay transaction fee
- deployer_private_key: Private key of deployer. Needed for signing and submitting transaction
- constructor:`constructor` function of the smart contract
"""
contract = web3.eth.contract(abi=contract_abi, bytecode=contract_bytecode)
transaction = build_transaction(
web3, contract.constructor(*constructor.args), deployer
)
transaction_hash = submit_transaction(web3, transaction, deployer_private_key)
transaction_receipt = wait_for_transaction_receipt(web3, transaction_hash)
contract_address = transaction_receipt.contractAddress
return transaction_hash, web3.toChecksumAddress(contract_address)
def decode_transaction_input(web3: Web3, transaction_input: str, abi: Dict[str, Any]):
contract = web3.eth.contract(abi=abi)
return contract.decode_function_input(transaction_input)
def read_keys_from_cli() -> Tuple[ChecksumAddress, str]:
private_key = getpass.getpass(prompt="Enter private key of your address:")
account = Account.from_key(private_key)
return (Web3.toChecksumAddress(account.address), private_key)
def METHOD_NAME() -> Tuple[ChecksumAddress, str]:
private_key = os.environ.get("MOONWORM_ETHEREUM_ADDRESS_PRIVATE_KEY")
if private_key is None:
raise ValueError(
"MOONWORM_ETHEREUM_ADDRESS_PRIVATE_KEY env variable is not set"
)
try:
account = Account.from_key(private_key)
return (Web3.toChecksumAddress(account.address), private_key)
except:
raise ValueError(
"Failed to initiate account from MOONWORM_ETHEREUM_ADDRESS_PRIVATE_KEY"
)
def connect(web3_uri: str) -> Web3:
web3_provider: Union[IPCProvider, HTTPProvider] = Web3.IPCProvider()
if web3_uri.startswith("http://") or web3_uri.startswith("https://"):
web3_provider = Web3.HTTPProvider(web3_uri)
else:
web3_provider = Web3.IPCProvider(web3_uri)
web3_client = Web3(web3_provider)
return web3_client
def read_web3_provider_from_env() -> Web3:
provider_path = os.environ.get("MOONWORM_WEB3_PROVIDER_URI")
if provider_path is None:
raise ValueError("MOONWORM_WEB3_PROVIDER_URI env variable is not set")
return connect(provider_path)
def read_web3_provider_from_cli() -> Web3:
provider_path = input("Enter web3 uri path: ")
return connect(provider_path)
def cast_to_python_type(evm_type: str) -> Callable:
if evm_type.startswith(("uint", "int")):
return int
elif evm_type.startswith("bytes"):
return bytes
elif evm_type == "string":
return str
elif evm_type == "address":
return Web3.toChecksumAddress
elif evm_type == "bool":
return bool
else:
raise ValueError(f"Cannot convert to python type {evm_type}") |
4,693 | test valid daily scheduler invalid days | import datetime
from unittest import mock
from testifycompat import assert_equal
from testifycompat import assert_raises
from testifycompat import run
from testifycompat import TestCase
from tron.config import config_utils
from tron.config import ConfigError
from tron.config import schedule_parse
class TestPadSequence(TestCase):
def test_pad_sequence_short(self):
expected = [0, 1, 2, 3, None, None]
assert_equal(schedule_parse.pad_sequence(range(4), 6), expected)
def test_pad_sequence_long(self):
expected = [0, 1, 2, 3]
assert_equal(schedule_parse.pad_sequence(range(6), 4), expected)
def test_pad_sequence_exact(self):
expected = [0, 1, 2, 3]
assert_equal(schedule_parse.pad_sequence(range(4), 4), expected)
def test_pad_sequence_empty(self):
expected = ["a", "a"]
assert_equal(schedule_parse.pad_sequence([], 2, "a"), expected)
def test_pad_negative_size(self):
assert_equal(schedule_parse.pad_sequence([], -2, "a"), [])
class TestScheduleConfigFromString(TestCase):
@mock.patch(
"tron.config.schedule_parse.parse_groc_expression", autospec=True,
)
def test_groc_config(self, mock_parse_groc):
schedule = "every Mon,Wed at 12:00"
context = config_utils.NullConfigContext
config = schedule_parse.schedule_config_from_string(schedule, context)
assert_equal(config, mock_parse_groc.return_value)
generic_config = schedule_parse.ConfigGenericSchedule("groc daily", schedule, None,)
mock_parse_groc.assert_called_with(generic_config, context)
class TestValidScheduler(TestCase):
@mock.patch("tron.config.schedule_parse.schedulers", autospec=True)
def assert_validation(self, schedule, expected, mock_schedulers):
context = config_utils.NullConfigContext
config = schedule_parse.valid_schedule(schedule, context)
mock_schedulers.__getitem__.assert_called_with("cron")
func = mock_schedulers.__getitem__.return_value
assert_equal(config, func.return_value)
func.assert_called_with(expected, context)
def test_cron_from_dict(self):
schedule = {"type": "cron", "value": "* * * * *"}
config = schedule_parse.ConfigGenericSchedule("cron", schedule["value"], datetime.timedelta(),)
self.assert_validation(schedule, config)
def test_cron_from_dict_with_jitter(self):
schedule = {"type": "cron", "value": "* * * * *", "jitter": "5 min"}
config = schedule_parse.ConfigGenericSchedule("cron", schedule["value"], datetime.timedelta(minutes=5),)
self.assert_validation(schedule, config)
class TestValidCronScheduler(TestCase):
_suites = ["integration"]
def validate(self, line):
config = schedule_parse.ConfigGenericSchedule("cron", line, None)
context = config_utils.NullConfigContext
return schedule_parse.valid_cron_scheduler(config, context)
def test_valid_config(self):
config = self.validate("5 0 L * *")
assert_equal(config.minutes, [5])
assert_equal(config.months, None)
assert_equal(config.monthdays, ["LAST"])
def test_invalid_config(self):
assert_raises(ConfigError, self.validate, "* * *")
class TestValidDailyScheduler(TestCase):
def validate(self, config):
context = config_utils.NullConfigContext
config = schedule_parse.ConfigGenericSchedule("daily", config, None)
return schedule_parse.valid_daily_scheduler(config, context)
def assert_parse(self, config, expected):
config = self.validate(config)
expected = schedule_parse.ConfigDailyScheduler(*expected, jitter=None)
assert_equal(config, expected)
def test_valid_daily_scheduler_start_time(self):
expected = ("14:32 ", 14, 32, 0, set())
self.assert_parse("14:32", expected)
def test_valid_daily_scheduler_just_days(self):
expected = ("00:00:00 MWS", 0, 0, 0, {1, 3, 6})
self.assert_parse("00:00:00 MWS", expected)
def test_valid_daily_scheduler_time_and_day(self):
expected = ("17:02:44 SU", 17, 2, 44, {0, 6})
self.assert_parse("17:02:44 SU", expected)
def test_valid_daily_scheduler_invalid_start_time(self):
assert_raises(ConfigError, self.validate, "5 MWF")
assert_raises(ConfigError, self.validate, "05:30:45:45 MWF")
assert_raises(ConfigError, self.validate, "25:30:45 MWF")
def METHOD_NAME(self):
assert_raises(ConfigError, self.validate, "SUG")
assert_raises(ConfigError, self.validate, "3")
if __name__ == "__main__":
run() |
4,694 | async set up | from __future__ import annotations
import random
from unittest import IsolatedAsyncioTestCase
from functional_tests.conftest import IAMBIC_TEST_DETAILS
from iambic.plugins.v0_1_0.aws.models import Tag
from iambic.plugins.v0_1_0.aws.organizations.scp.models import PolicyTargetProperties
from .utils import generate_scp_policy_template_from_base
class UpdatePolicyTestCase(IsolatedAsyncioTestCase):
async def METHOD_NAME(self):
self.template = await generate_scp_policy_template_from_base(
IAMBIC_TEST_DETAILS.template_dir_path, create_policy=True
)
self.policy_name = self.template.properties.policy_name
self.path = self.template.properties.path
self.all_account_ids = [
account.account_id for account in IAMBIC_TEST_DETAILS.config.aws.accounts
]
self.org_account = next(
filter(
lambda acc: acc.organization_account,
IAMBIC_TEST_DETAILS.config.aws.accounts,
)
)
self.org_client = await self.org_account.get_boto3_client("organizations")
self.accounts = [
acc
for acc in IAMBIC_TEST_DETAILS.config.aws.accounts
if acc.organization_account is False
]
async def asyncTearDown(self):
self.template.deleted = True
await self.template.apply(IAMBIC_TEST_DETAILS.config.aws)
async def test_update_policy_without_attachments(self):
changes = await self.template.apply(IAMBIC_TEST_DETAILS.config.aws)
self.assertEquals(len(changes.exceptions_seen), 0)
current_policy = self.org_client.describe_policy(
PolicyId=self.template.properties.policy_id
)
self.assertEquals(
current_policy.get("Policy").get("PolicySummary").get("Name"),
self.template.properties.policy_name,
)
self.assertEquals(
current_policy.get("Policy").get("PolicySummary").get("Name"),
self.template.identifier,
)
async def test_update_policy_with_targets(self):
account = self.attach_account()
changes = await self.template.apply(IAMBIC_TEST_DETAILS.config.aws)
self.assertEquals(
len(changes.exceptions_seen), 0, f"failed due to {changes.exceptions_seen}"
)
self.check_attach_accounts(account)
# detach policy from account
self.detach_account(account)
changes = await self.template.apply(IAMBIC_TEST_DETAILS.config.aws)
self.assertEquals(len(changes.exceptions_seen), 0)
self.check_detach_account(account)
async def test_update_policy_with_tags(self):
tags = self.attach_tags()
changes = await self.template.apply(IAMBIC_TEST_DETAILS.config.aws)
self.assertEquals(len(changes.exceptions_seen), 0)
self.check_attach_tags(tags)
self.detach_tags(tags)
changes = await self.template.apply(IAMBIC_TEST_DETAILS.config.aws)
self.assertEquals(len(changes.exceptions_seen), 0)
self.check_detach_tags(tags)
async def test_update_policy_with_attachments(self):
tags = self.attach_tags()
account = self.attach_account()
changes = await self.template.apply(IAMBIC_TEST_DETAILS.config.aws)
self.assertEquals(len(changes.exceptions_seen), 0)
self.check_attach_tags(tags)
self.check_attach_accounts(account)
self.detach_tags(tags)
self.detach_account(account)
changes = await self.template.apply(IAMBIC_TEST_DETAILS.config.aws)
self.assertEquals(len(changes.exceptions_seen), 0)
self.check_detach_tags(tags)
self.check_detach_account(account)
def check_detach_account(self, account):
targets = self.org_client.list_targets_for_policy(
PolicyId=self.template.properties.policy_id
).get("Targets")
self.assertNotIn(
account.account_id, [target.get("TargetId") for target in targets]
)
def detach_account(self, account):
self.template.properties.targets.accounts.remove(account.account_id)
def check_attach_accounts(self, account):
targets = self.org_client.list_targets_for_policy(
PolicyId=self.template.properties.policy_id
).get("Targets")
self.assertIn(
account.account_id, [target.get("TargetId") for target in targets]
)
def attach_account(self):
account = random.choice(self.accounts)
if not self.template.properties.targets:
self.template.properties.targets = PolicyTargetProperties()
self.template.properties.targets.accounts.append(account.account_id)
return account
def check_detach_tags(self, tags):
listed_tags = self.org_client.list_tags_for_resource(
ResourceId=self.template.properties.policy_id
).get("Tags")
self.assertNotIn(tags[0].key, [tag.get("Key") for tag in listed_tags])
def detach_tags(self, tags):
self.template.properties.tags = [
tag for tag in self.template.properties.tags if tag.key not in tags[0].key
]
def check_attach_tags(self, tags):
listed_tags = self.org_client.list_tags_for_resource(
ResourceId=self.template.properties.policy_id
).get("Tags")
self.assertIn(tags[0].key, [tag.get("Key") for tag in listed_tags])
def attach_tags(self):
tags = [
Tag(key="functional_test_tag", value="value"), # type: ignore
]
self.template.properties.tags = tags
return tags |
4,695 | test which | from varmat_compatibility_summary import convert_signatures_list_to_functions, select_signatures_matching_functions, remove_signatures_matching_functions, process_results
import unittest
class HelpersTest(unittest.TestCase):
def setUp(self):
self.signatures_list = ["chicken(array[] real) => real", "squirrel(array[] real) => real", "frog(array[] real) => real"]
def test_convert(self):
self.assertSetEqual(set(["chicken", "squirrel", "frog"]), convert_signatures_list_to_functions(self.signatures_list))
def test_select(self):
self.assertSetEqual(set(["chicken(array[] real) => real"]), select_signatures_matching_functions(self.signatures_list, ["chicken"]))
def test_remove(self):
self.assertSetEqual(set(["squirrel(array[] real) => real", "frog(array[] real) => real"]), remove_signatures_matching_functions(self.signatures_list, ["chicken"]))
class ProcessResultsTest(unittest.TestCase):
def setUp(self):
self.results = {
"compatible_signatures" : ["chicken(matrix) => real", "dog(matrix) => real"],
"incompatible_signatures" : ["squirrel(vector) => real", "dog(vector) => real"],
"irrelevant_signatures" : ["chicken(array[] real) => real", "squirrel(array[] real) => real", "frog(array[] real) => real"]
}
def METHOD_NAME(self):
self.assertSetEqual(set(["chicken(matrix) => real", "dog(matrix) => real"]), process_results(self.results, functions = [], which = "compatible", fully = False, names = False))
self.assertSetEqual(set(["squirrel(vector) => real", "dog(vector) => real"]), process_results(self.results, functions = [], which = "incompatible", fully = False, names = False))
self.assertSetEqual(set(["chicken(array[] real) => real", "squirrel(array[] real) => real", "frog(array[] real) => real"]), process_results(self.results, functions = [], which = "irrelevant", fully = False, names = False))
def test_fully(self):
self.assertSetEqual(set(["chicken(matrix) => real"]), process_results(self.results, functions = [], which = "compatible", fully = True, names = False))
self.assertSetEqual(set(["squirrel(vector) => real"]), process_results(self.results, functions = [], which = "incompatible", fully = True, names = False))
self.assertSetEqual(set(["frog(array[] real) => real"]), process_results(self.results, functions = [], which = "irrelevant", fully = True, names = False))
def test_names(self):
self.assertSetEqual(set(["chicken"]), process_results(self.results, functions = [], which = "compatible", fully = True, names = True))
self.assertSetEqual(set(["squirrel"]), process_results(self.results, functions = [], which = "incompatible", fully = True, names = True))
self.assertSetEqual(set(["frog"]), process_results(self.results, functions = [], which = "irrelevant", fully = True, names = True))
def test_functions(self):
self.assertSetEqual(set(["chicken(matrix) => real"]), process_results(self.results, functions = ["chicken"], which = "compatible", fully = False, names = False))
self.assertSetEqual(set(["squirrel(vector) => real"]), process_results(self.results, functions = ["squirrel"], which = "incompatible", fully = False, names = False))
self.assertSetEqual(set(["frog(array[] real) => real"]), process_results(self.results, functions = ["frog"], which = "irrelevant", fully = False, names = False))
def test_functions_names(self):
self.assertSetEqual(set(["chicken"]), process_results(self.results, functions = ["chicken"], which = "compatible", fully = False, names = True))
self.assertSetEqual(set(["squirrel"]), process_results(self.results, functions = ["squirrel"], which = "incompatible", fully = False, names = True))
self.assertSetEqual(set(["frog"]), process_results(self.results, functions = ["frog"], which = "irrelevant", fully = False, names = True))
if __name__ == '__main__':
unittest.main( |
4,696 | close | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import DataBoxManagementClientConfiguration
from .operations import JobsOperations, Operations, ServiceOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class DataBoxManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""The DataBox Client.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.databox.v2018_01_01.aio.operations.Operations
:ivar jobs: JobsOperations operations
:vartype jobs: azure.mgmt.databox.v2018_01_01.aio.operations.JobsOperations
:ivar service: ServiceOperations operations
:vartype service: azure.mgmt.databox.v2018_01_01.aio.operations.ServiceOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Subscription Id. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2018-01-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = DataBoxManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.jobs = JobsOperations(self._client, self._config, self._serialize, self._deserialize)
self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def METHOD_NAME(self) -> None:
await self._client.METHOD_NAME()
async def __aenter__(self) -> "DataBoxManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details) |
4,697 | test val interaction | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
import numpy as np
import torch
from monai.apps.deepedit.interaction import Interaction
from monai.apps.deepedit.transforms import (
AddGuidanceSignalDeepEditd,
AddInitialSeedPointMissingLabelsd,
AddRandomGuidanceDeepEditd,
FindAllValidSlicesMissingLabelsd,
FindDiscrepancyRegionsDeepEditd,
SplitPredsLabeld,
)
from monai.data import DataLoader, Dataset
from monai.engines import SupervisedTrainer
from monai.engines.utils import IterationEvents
from monai.losses import DiceCELoss
from monai.transforms import Activationsd, AsDiscreted, Compose, ToTensord
def add_one(engine):
if engine.state.best_metric == -1:
engine.state.best_metric = 0
else:
engine.state.best_metric = engine.state.best_metric + 1
class TestInteractions(unittest.TestCase):
def run_interaction(self, train):
label_names = {"spleen": 1, "background": 0}
np.random.seed(0)
data = [
{
"image": np.random.randint(0, 256, size=(1, 15, 15, 15)).astype(np.float32),
"label": np.random.randint(0, 2, size=(1, 15, 15, 15)),
"label_names": label_names,
}
for _ in range(5)
]
network = torch.nn.Conv3d(3, len(label_names), 1)
lr = 1e-3
opt = torch.optim.Adam(network.parameters(), lr)
loss = DiceCELoss(to_onehot_y=True, softmax=True)
pre_transforms = Compose(
[
FindAllValidSlicesMissingLabelsd(keys="label", sids="sids"),
AddInitialSeedPointMissingLabelsd(keys="label", guidance="guidance", sids="sids"),
AddGuidanceSignalDeepEditd(keys="image", guidance="guidance", number_intensity_ch=1),
ToTensord(keys=("image", "label")),
]
)
dataset = Dataset(data, transform=pre_transforms)
data_loader = DataLoader(dataset, batch_size=5)
iteration_transforms = [
FindDiscrepancyRegionsDeepEditd(keys="label", pred="pred", discrepancy="discrepancy"),
AddRandomGuidanceDeepEditd(
keys="NA", guidance="guidance", discrepancy="discrepancy", probability="probability"
),
AddGuidanceSignalDeepEditd(keys="image", guidance="guidance", number_intensity_ch=1),
ToTensord(keys=("image", "label")),
]
post_transforms = [
Activationsd(keys="pred", softmax=True),
AsDiscreted(keys=("pred", "label"), argmax=(True, False), to_onehot=len(label_names)),
SplitPredsLabeld(keys="pred"),
ToTensord(keys=("image", "label")),
]
iteration_transforms = Compose(iteration_transforms)
post_transforms = Compose(post_transforms)
i = Interaction(
deepgrow_probability=1.0,
transforms=iteration_transforms,
click_probability_key="probability",
train=train,
label_names=label_names,
)
self.assertEqual(len(i.transforms.transforms), 4, "Mismatch in expected transforms")
# set up engine
engine = SupervisedTrainer(
device="cpu",
max_epochs=1,
train_data_loader=data_loader,
network=network,
optimizer=opt,
loss_function=loss,
postprocessing=post_transforms,
iteration_update=i,
)
engine.add_event_handler(IterationEvents.INNER_ITERATION_STARTED, add_one)
engine.add_event_handler(IterationEvents.INNER_ITERATION_COMPLETED, add_one)
engine.run()
self.assertIsNotNone(engine.state.batch[0].get("guidance"), "guidance is missing")
self.assertEqual(engine.state.best_metric, 1)
def test_train_interaction(self):
self.run_interaction(train=True)
def METHOD_NAME(self):
self.run_interaction(train=False)
if __name__ == "__main__":
unittest.main() |
4,698 | header | import os
import time
import tempfile
import threading
import urllib.parse
import pycurl
from hashlib import sha256
from io import BytesIO
from tenacity import retry, wait_random_exponential, stop_after_attempt
from openpilot.common.file_helpers import mkdirs_exists_ok, atomic_write_in_dir
# Cache chunk size
K = 1000
CHUNK_SIZE = 1000 * K
CACHE_DIR = os.environ.get("COMMA_CACHE", "/tmp/comma_download_cache/")
def hash_256(link):
hsh = str(sha256((link.split("?")[0]).encode('utf-8')).hexdigest())
return hsh
class URLFile:
_tlocal = threading.local()
def __init__(self, url, debug=False, cache=None):
self._url = url
self._pos = 0
self._length = None
self._local_file = None
self._debug = debug
# True by default, false if FILEREADER_CACHE is defined, but can be overwritten by the cache input
self._force_download = not int(os.environ.get("FILEREADER_CACHE", "0"))
if cache is not None:
self._force_download = not cache
try:
self._curl = self._tlocal.curl
except AttributeError:
self._curl = self._tlocal.curl = pycurl.Curl()
mkdirs_exists_ok(CACHE_DIR)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._local_file is not None:
os.remove(self._local_file.name)
self._local_file.close()
self._local_file = None
@retry(wait=wait_random_exponential(multiplier=1, max=5), stop=stop_after_attempt(3), reraise=True)
def get_length_online(self):
c = self._curl
c.reset()
c.setopt(pycurl.NOSIGNAL, 1)
c.setopt(pycurl.TIMEOUT_MS, 500000)
c.setopt(pycurl.FOLLOWLOCATION, True)
c.setopt(pycurl.URL, self._url)
c.setopt(c.NOBODY, 1)
c.perform()
length = int(c.getinfo(c.CONTENT_LENGTH_DOWNLOAD))
c.reset()
return length
def get_length(self):
if self._length is not None:
return self._length
file_length_path = os.path.join(CACHE_DIR, hash_256(self._url) + "_length")
if os.path.exists(file_length_path) and not self._force_download:
with open(file_length_path) as file_length:
content = file_length.read()
self._length = int(content)
return self._length
self._length = self.get_length_online()
if not self._force_download:
with atomic_write_in_dir(file_length_path, mode="w") as file_length:
file_length.write(str(self._length))
return self._length
def read(self, ll=None):
if self._force_download:
return self.read_aux(ll=ll)
file_begin = self._pos
file_end = self._pos + ll if ll is not None else self.get_length()
assert file_end != -1, f"Remote file is empty or doesn't exist: {self._url}"
# We have to align with chunks we store. Position is the begginiing of the latest chunk that starts before or at our file
position = (file_begin // CHUNK_SIZE) * CHUNK_SIZE
response = b""
while True:
self._pos = position
chunk_number = self._pos / CHUNK_SIZE
file_name = hash_256(self._url) + "_" + str(chunk_number)
full_path = os.path.join(CACHE_DIR, str(file_name))
data = None
# If we don't have a file, download it
if not os.path.exists(full_path):
data = self.read_aux(ll=CHUNK_SIZE)
with atomic_write_in_dir(full_path, mode="wb") as new_cached_file:
new_cached_file.write(data)
else:
with open(full_path, "rb") as cached_file:
data = cached_file.read()
response += data[max(0, file_begin - position): min(CHUNK_SIZE, file_end - position)]
position += CHUNK_SIZE
if position >= file_end:
self._pos = file_end
return response
@retry(wait=wait_random_exponential(multiplier=1, max=5), stop=stop_after_attempt(3), reraise=True)
def read_aux(self, ll=None):
download_range = False
headers = ["Connection: keep-alive"]
if self._pos != 0 or ll is not None:
if ll is None:
end = self.get_length() - 1
else:
end = min(self._pos + ll, self.get_length()) - 1
if self._pos >= end:
return b""
headers.append(f"Range: bytes={self._pos}-{end}")
download_range = True
dats = BytesIO()
c = self._curl
c.setopt(pycurl.URL, self._url)
c.setopt(pycurl.WRITEDATA, dats)
c.setopt(pycurl.NOSIGNAL, 1)
c.setopt(pycurl.TIMEOUT_MS, 500000)
c.setopt(pycurl.HTTPHEADER, headers)
c.setopt(pycurl.FOLLOWLOCATION, True)
if self._debug:
print("downloading", self._url)
def METHOD_NAME(x):
if b'MISS' in x:
print(x.strip())
c.setopt(pycurl.HEADERFUNCTION, METHOD_NAME)
def test(debug_type, debug_msg):
print(" debug(%d): %s" % (debug_type, debug_msg.strip()))
c.setopt(pycurl.VERBOSE, 1)
c.setopt(pycurl.DEBUGFUNCTION, test)
t1 = time.time()
c.perform()
if self._debug:
t2 = time.time()
if t2 - t1 > 0.1:
print(f"get {self._url} {headers!r} {t2 - t1:.f} slow")
response_code = c.getinfo(pycurl.RESPONSE_CODE)
if response_code == 416: # Requested Range Not Satisfiable
raise Exception(f"Error, range out of bounds {response_code} {headers} ({self._url}): {repr(dats.getvalue())[:500]}")
if download_range and response_code != 206: # Partial Content
raise Exception(f"Error, requested range but got unexpected response {response_code} {headers} ({self._url}): {repr(dats.getvalue())[:500]}")
if (not download_range) and response_code != 200: # OK
raise Exception(f"Error {response_code} {headers} ({self._url}): {repr(dats.getvalue())[:500]}")
ret = dats.getvalue()
self._pos += len(ret)
return ret
def seek(self, pos):
self._pos = pos
@property
def name(self):
"""Returns a local path to file with the URLFile's contents.
This can be used to interface with modules that require local files.
"""
if self._local_file is None:
_, ext = os.path.splitext(urllib.parse.urlparse(self._url).path)
local_fd, local_path = tempfile.mkstemp(suffix=ext)
try:
os.write(local_fd, self.read())
local_file = open(local_path, "rb")
except Exception:
os.remove(local_path)
raise
finally:
os.close(local_fd)
self._local_file = local_file
self.read = self._local_file.read
self.seek = self._local_file.seek
return self._local_file.name |
4,699 | do prepare partition | #!/usr/bin/env python3
#
# Copyright (c) 2011 Intel, Inc.
#
# SPDX-License-Identifier: GPL-2.0-only
#
__all__ = ['ImagerPlugin', 'SourcePlugin']
import os
import logging
from collections import defaultdict
from importlib.machinery import SourceFileLoader
from wic import WicError
from wic.misc import get_bitbake_var
PLUGIN_TYPES = ["imager", "source"]
SCRIPTS_PLUGIN_DIR = ["scripts/lib/wic/plugins", "lib/wic/plugins"]
logger = logging.getLogger('wic')
PLUGINS = defaultdict(dict)
class PluginMgr:
_plugin_dirs = []
@classmethod
def get_plugins(cls, ptype):
"""Get dictionary of <plugin_name>:<class> pairs."""
if ptype not in PLUGIN_TYPES:
raise WicError('%s is not valid plugin type' % ptype)
# collect plugin directories
if not cls._plugin_dirs:
cls._plugin_dirs = [os.path.join(os.path.dirname(__file__), 'plugins')]
layers = get_bitbake_var("BBLAYERS") or ''
for layer_path in layers.split():
for script_plugin_dir in SCRIPTS_PLUGIN_DIR:
path = os.path.join(layer_path, script_plugin_dir)
path = os.path.abspath(os.path.expanduser(path))
if path not in cls._plugin_dirs and os.path.isdir(path):
cls._plugin_dirs.insert(0, path)
if ptype not in PLUGINS:
# load all ptype plugins
for pdir in cls._plugin_dirs:
ppath = os.path.join(pdir, ptype)
if os.path.isdir(ppath):
for fname in os.listdir(ppath):
if fname.endswith('.py'):
mname = fname[:-3]
mpath = os.path.join(ppath, fname)
logger.debug("loading plugin module %s", mpath)
SourceFileLoader(mname, mpath).load_module()
return PLUGINS.get(ptype)
class PluginMeta(type):
def __new__(cls, name, bases, attrs):
class_type = type.__new__(cls, name, bases, attrs)
if 'name' in attrs:
PLUGINS[class_type.wic_plugin_type][attrs['name']] = class_type
return class_type
class ImagerPlugin(metaclass=PluginMeta):
wic_plugin_type = "imager"
def do_create(self):
raise WicError("Method %s.do_create is not implemented" %
self.__class__.__name__)
class SourcePlugin(metaclass=PluginMeta):
wic_plugin_type = "source"
"""
The methods that can be implemented by --source plugins.
Any methods not implemented in a subclass inherit these.
"""
@classmethod
def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
bootimg_dir, kernel_dir, native_sysroot):
"""
Called after all partitions have been prepared and assembled into a
disk image. This provides a hook to allow finalization of a
disk image e.g. to write an MBR to it.
"""
logger.debug("SourcePlugin: do_install_disk: disk: %s", disk_name)
@classmethod
def do_stage_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
native_sysroot):
"""
Special content staging hook called before do_prepare_partition(),
normally empty.
Typically, a partition will just use the passed-in parame e.g
straight bootimg_dir, etc, but in some cases, things need to
be more tailored e.g. to use a deploy dir + /boot, etc. This
hook allows those files to be staged in a customized fashion.
Not that get_bitbake_var() allows you to acces non-standard
variables that you might want to use for this.
"""
logger.debug("SourcePlugin: do_stage_partition: part: %s", part)
@classmethod
def do_configure_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
native_sysroot):
"""
Called before do_prepare_partition(), typically used to create
custom configuration files for a partition, for example
syslinux or grub config files.
"""
logger.debug("SourcePlugin: do_configure_partition: part: %s", part)
@classmethod
def METHOD_NAME(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir, rootfs_dir,
native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
"""
logger.debug("SourcePlugin: do_prepare_partition: part: %s", part)
@classmethod
def do_post_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir, rootfs_dir,
native_sysroot):
"""
Called after the partition is created. It is useful to add post
operations e.g. security signing the partition.
"""
logger.debug("SourcePlugin: do_post_partition: part: %s", part) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.