id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
6,200 | get featureset version feature | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetFeaturesetVersionFeatureResult',
'AwaitableGetFeaturesetVersionFeatureResult',
'get_featureset_version_feature',
'get_featureset_version_feature_output',
]
@pulumi.output_type
class GetFeaturesetVersionFeatureResult:
"""
Dto object representing feature
"""
def __init__(__self__, data_type=None, description=None, feature_name=None, tags=None):
if data_type and not isinstance(data_type, str):
raise TypeError("Expected argument 'data_type' to be a str")
pulumi.set(__self__, "data_type", data_type)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if feature_name and not isinstance(feature_name, str):
raise TypeError("Expected argument 'feature_name' to be a str")
pulumi.set(__self__, "feature_name", feature_name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="dataType")
def data_type(self) -> Optional[str]:
"""
Specifies type
"""
return pulumi.get(self, "data_type")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Specifies description
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="featureName")
def feature_name(self) -> Optional[str]:
"""
Specifies name
"""
return pulumi.get(self, "feature_name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Specifies tags
"""
return pulumi.get(self, "tags")
class AwaitableGetFeaturesetVersionFeatureResult(GetFeaturesetVersionFeatureResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFeaturesetVersionFeatureResult(
data_type=self.data_type,
description=self.description,
feature_name=self.feature_name,
tags=self.tags)
def METHOD_NAME(feature_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
version: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFeaturesetVersionFeatureResult:
"""
Dto object representing feature
:param str feature_name: Specifies name of the feature.
:param str name: Feature set name. This is case-sensitive.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str version: Feature set version identifier. This is case-sensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['featureName'] = feature_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['version'] = version
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20230201preview:getFeaturesetVersionFeature', __args__, opts=opts, typ=GetFeaturesetVersionFeatureResult).value
return AwaitableGetFeaturesetVersionFeatureResult(
data_type=pulumi.get(__ret__, 'data_type'),
description=pulumi.get(__ret__, 'description'),
feature_name=pulumi.get(__ret__, 'feature_name'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(METHOD_NAME)
def get_featureset_version_feature_output(feature_name: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFeaturesetVersionFeatureResult]:
"""
Dto object representing feature
:param str feature_name: Specifies name of the feature.
:param str name: Feature set name. This is case-sensitive.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str version: Feature set version identifier. This is case-sensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
... |
6,201 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListAppServicePlanHybridConnectionKeysResult',
'AwaitableListAppServicePlanHybridConnectionKeysResult',
'list_app_service_plan_hybrid_connection_keys',
'list_app_service_plan_hybrid_connection_keys_output',
]
@pulumi.output_type
class ListAppServicePlanHybridConnectionKeysResult:
"""
Hybrid Connection key contract. This has the send key name and value for a Hybrid Connection.
"""
def __init__(__self__, METHOD_NAME=None, kind=None, name=None, send_key_name=None, send_key_value=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if send_key_name and not isinstance(send_key_name, str):
raise TypeError("Expected argument 'send_key_name' to be a str")
pulumi.set(__self__, "send_key_name", send_key_name)
if send_key_value and not isinstance(send_key_value, str):
raise TypeError("Expected argument 'send_key_value' to be a str")
pulumi.set(__self__, "send_key_value", send_key_value)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="sendKeyName")
def send_key_name(self) -> str:
"""
The name of the send key.
"""
return pulumi.get(self, "send_key_name")
@property
@pulumi.getter(name="sendKeyValue")
def send_key_value(self) -> str:
"""
The value of the send key.
"""
return pulumi.get(self, "send_key_value")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListAppServicePlanHybridConnectionKeysResult(ListAppServicePlanHybridConnectionKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListAppServicePlanHybridConnectionKeysResult(
METHOD_NAME=self.METHOD_NAME,
kind=self.kind,
name=self.name,
send_key_name=self.send_key_name,
send_key_value=self.send_key_value,
type=self.type)
def list_app_service_plan_hybrid_connection_keys(name: Optional[str] = None,
namespace_name: Optional[str] = None,
relay_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListAppServicePlanHybridConnectionKeysResult:
"""
Get the send key name and value of a Hybrid Connection.
:param str name: Name of the App Service plan.
:param str namespace_name: The name of the Service Bus namespace.
:param str relay_name: The name of the Service Bus relay.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = name
__args__['namespaceName'] = namespace_name
__args__['relayName'] = relay_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:web/v20160901:listAppServicePlanHybridConnectionKeys', __args__, opts=opts, typ=ListAppServicePlanHybridConnectionKeysResult).value
return AwaitableListAppServicePlanHybridConnectionKeysResult(
METHOD_NAME=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
send_key_name=pulumi.get(__ret__, 'send_key_name'),
send_key_value=pulumi.get(__ret__, 'send_key_value'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(list_app_service_plan_hybrid_connection_keys)
def list_app_service_plan_hybrid_connection_keys_output(name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
relay_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListAppServicePlanHybridConnectionKeysResult]:
"""
Get the send key name and value of a Hybrid Connection.
:param str name: Name of the App Service plan.
:param str namespace_name: The name of the Service Bus namespace.
:param str relay_name: The name of the Service Bus relay.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
... |
6,202 | prepare | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GFT engine interface."""
import os
import re
from clusterfuzz._internal.system import new_process
from clusterfuzz.fuzz import engine
_CRASH_REGEX = re.compile(r'.*Reproducer file written to:\s*(.*)$')
class GoogleFuzzTestError(Exception):
"""Base exception class."""
def _get_reproducer_path(line):
"""Get the reproducer path, if any."""
crash_match = _CRASH_REGEX.match(line)
if not crash_match:
return None
return crash_match.group(1)
class Engine(engine.Engine):
"""GFT engine implementation."""
@property
def name(self):
return 'googlefuzztest'
def METHOD_NAME(self, corpus_dir, target_path, build_dir): # pylint: disable=unused-argument
"""Prepare for a fuzzing session, by generating options. Returns a
FuzzOptions object.
Args:
corpus_dir: The main corpus directory.
target_path: Path to the target.
build_dir: Path to the build directory.
Returns:
A FuzzOptions object.
"""
os.chmod(target_path, 0o775)
return engine.FuzzOptions(corpus_dir, [], {})
def fuzz(self, target_path, options, reproducers_dir, max_time):
"""Run a fuzz session.
Args:
target_path: Path to the target.
options: The FuzzOptions object returned by prepare().
reproducers_dir: The directory to put reproducers in when crashes
are found.
max_time: Maximum allowed time for the fuzzing to run.
Returns:
A FuzzResult object.
"""
del options # Unused.
runner = new_process.UnicodeProcessRunner(target_path)
fuzz_result = runner.run_and_wait(
timeout=max_time,
extra_env={
'FUZZTEST_REPRODUCERS_OUT_DIR': reproducers_dir,
})
log_lines = fuzz_result.output.splitlines()
crashes = []
for line in log_lines:
reproducer_path = _get_reproducer_path(line)
if reproducer_path:
crashes.append(
engine.Crash(
reproducer_path,
fuzz_result.output,
reproduce_args=[],
crash_time=int(fuzz_result.time_executed)))
continue
# TODO(ochang): Implement stats parsing.
stats = {}
return engine.FuzzResult(fuzz_result.output, fuzz_result.command, crashes,
stats, fuzz_result.time_executed)
def reproduce(self, target_path, input_path, arguments, max_time): # pylint: disable=unused-argument
"""Reproduce a crash given an input.
Args:
target_path: Path to the target.
input_path: Path to the reproducer input.
arguments: Additional arguments needed for reproduction.
max_time: Maximum allowed time for the reproduction.
Returns:
A ReproduceResult.
"""
os.chmod(target_path, 0o775)
runner = new_process.UnicodeProcessRunner(target_path)
result = runner.run_and_wait(
timeout=max_time, extra_env={'FUZZTEST_REPLAY': input_path})
return engine.ReproduceResult(result.command, result.return_code,
result.time_executed, result.output)
def minimize_corpus(self, target_path, arguments, input_dirs, output_dir,
reproducers_dir, max_time):
"""Optional (but recommended): run corpus minimization.
Args:
target_path: Path to the target.
arguments: Additional arguments needed for corpus minimization.
input_dirs: Input corpora.
output_dir: Output directory to place minimized corpus.
reproducers_dir: The directory to put reproducers in when crashes are
found.
max_time: Maximum allowed time for the minimization.
Returns:
A FuzzResult object.
Raises:
TimeoutError: If the corpus minimization exceeds max_time.
Error: If the merge failed in some other way.
"""
raise NotImplementedError
def minimize_testcase(self, target_path, arguments, input_path, output_path,
max_time):
"""Optional (but recommended): Minimize a testcase.
Args:
target_path: Path to the target.
arguments: Additional arguments needed for testcase minimization.
input_path: Path to the reproducer input.
output_path: Path to the minimized output.
max_time: Maximum allowed time for the minimization.
Returns:
A ReproduceResult.
Raises:
TimeoutError: If the testcase minimization exceeds max_time.
"""
raise NotImplementedError
def cleanse(self, target_path, arguments, input_path, output_path, max_time):
"""Optional (but recommended): Cleanse a testcase.
Args:
target_path: Path to the target.
arguments: Additional arguments needed for testcase cleanse.
input_path: Path to the reproducer input.
output_path: Path to the cleansed output.
max_time: Maximum allowed time for the cleanse.
Returns:
A ReproduceResult.
Raises:
TimeoutError: If the cleanse exceeds max_time.
"""
raise NotImplementedError |
6,203 | create file temp | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import stat
from twisted.internet import defer
from twisted.python.filepath import FilePath
from twisted.trial import unittest
from buildbot.secrets.providers.file import SecretInAFile
from buildbot.test.util.config import ConfigErrorsMixin
from buildbot.util.misc import writeLocalFile
class TestSecretInFile(ConfigErrorsMixin, unittest.TestCase):
def createTempDir(self, dirname):
tempdir = FilePath(self.mktemp())
tempdir.createDirectory()
return tempdir.path
def METHOD_NAME(self, tempdir, filename, text="", chmodRights=0o700):
file_path = os.path.join(tempdir, filename)
writeLocalFile(file_path, text, chmodRights)
return file_path
@defer.inlineCallbacks
def setUp(self):
self.tmp_dir = self.createTempDir("temp")
self.filepath = self.METHOD_NAME(self.tmp_dir, "tempfile.txt",
text="key value\n")
self.srvfile = SecretInAFile(self.tmp_dir)
yield self.srvfile.startService()
@defer.inlineCallbacks
def tearDown(self):
yield self.srvfile.stopService()
def testCheckConfigSecretInAFileService(self):
self.assertEqual(self.srvfile.name, "SecretInAFile")
self.assertEqual(self.srvfile._dirname, self.tmp_dir)
def testCheckConfigErrorSecretInAFileService(self):
if os.name != "posix":
self.skipTest("Permission checks only works on posix systems")
filepath = self.METHOD_NAME(self.tmp_dir, "tempfile2.txt",
chmodRights=stat.S_IRGRP)
expctd_msg_error = " on file tempfile2.txt are too " \
"open. It is required that your secret files are" \
" NOT accessible by others!"
with self.assertRaisesConfigError(expctd_msg_error):
self.srvfile.checkConfig(self.tmp_dir)
os.remove(filepath)
@defer.inlineCallbacks
def testCheckConfigfileExtension(self):
filepath = self.METHOD_NAME(self.tmp_dir, "tempfile2.ini",
text="test suffix",
chmodRights=stat.S_IRWXU)
filepath2 = self.METHOD_NAME(self.tmp_dir, "tempfile2.txt",
text="some text",
chmodRights=stat.S_IRWXU)
yield self.srvfile.reconfigService(self.tmp_dir, suffixes=[".ini"])
self.assertEqual(self.srvfile.get("tempfile2"), "test suffix")
self.assertEqual(self.srvfile.get("tempfile3"), None)
os.remove(filepath)
os.remove(filepath2)
@defer.inlineCallbacks
def testReconfigSecretInAFileService(self):
otherdir = self.createTempDir("temp2")
yield self.srvfile.reconfigService(otherdir)
self.assertEqual(self.srvfile.name, "SecretInAFile")
self.assertEqual(self.srvfile._dirname, otherdir)
def testGetSecretInFile(self):
value = self.srvfile.get("tempfile.txt")
self.assertEqual(value, "key value")
@defer.inlineCallbacks
def testGetSecretInFileSuffixes(self):
yield self.srvfile.reconfigService(self.tmp_dir, suffixes=[".txt"])
value = self.srvfile.get("tempfile")
self.assertEqual(value, "key value")
def testGetSecretInFileNotFound(self):
value = self.srvfile.get("tempfile2.txt")
self.assertEqual(value, None)
@defer.inlineCallbacks
def testGetSecretInFileNoStrip(self):
yield self.srvfile.reconfigService(self.tmp_dir, strip=False)
value = self.srvfile.get("tempfile.txt")
self.assertEqual(value, "key value\n") |
6,204 | test to string scalar | """
Tests the Angle string formatting capabilities. SkyCoord formatting is in
test_sky_coord
"""
import pytest
from astropy import units as u
from astropy.coordinates.angles import Angle
def test_to_string_precision():
# There are already some tests in test_api.py, but this is a regression
# test for the bug in issue #1319 which caused incorrect formatting of the
# seconds for precision=0
angle = Angle(-1.23456789, unit=u.degree)
assert angle.to_string(precision=3) == "-1d14m04.444s"
assert angle.to_string(precision=1) == "-1d14m04.4s"
assert angle.to_string(precision=0) == "-1d14m04s"
angle2 = Angle(-1.23456789, unit=u.hourangle)
assert angle2.to_string(precision=3, unit=u.hour) == "-1h14m04.444s"
assert angle2.to_string(precision=1, unit=u.hour) == "-1h14m04.4s"
assert angle2.to_string(precision=0, unit=u.hour) == "-1h14m04s"
# Regression test for #7141
angle3 = Angle(-0.5, unit=u.degree)
assert angle3.to_string(precision=0, fields=3) == "-0d30m00s"
assert angle3.to_string(precision=0, fields=2) == "-0d30m"
assert angle3.to_string(precision=0, fields=1) == "-1d"
def test_to_string_decimal():
# There are already some tests in test_api.py, but this is a regression
# test for the bug in issue #1323 which caused decimal formatting to not
# work
angle1 = Angle(2.0, unit=u.degree)
assert angle1.to_string(decimal=True, precision=3) == "2.000"
assert angle1.to_string(decimal=True, precision=1) == "2.0"
assert angle1.to_string(decimal=True, precision=0) == "2"
angle2 = Angle(3.0, unit=u.hourangle)
assert angle2.to_string(decimal=True, precision=3) == "3.000"
assert angle2.to_string(decimal=True, precision=1) == "3.0"
assert angle2.to_string(decimal=True, precision=0) == "3"
angle3 = Angle(4.0, unit=u.radian)
assert angle3.to_string(decimal=True, precision=3) == "4.000"
assert angle3.to_string(decimal=True, precision=1) == "4.0"
assert angle3.to_string(decimal=True, precision=0) == "4"
with pytest.raises(ValueError, match="sexagesimal notation"):
angle3.to_string(decimal=True, sep="abc")
def test_to_string_formats():
a = Angle(1.113355, unit=u.deg)
latex_str = r"$1^\circ06{}^\prime48.078{}^{\prime\prime}$"
assert a.to_string(format="latex") == latex_str
assert a.to_string(format="latex_inline") == latex_str
assert a.to_string(format="unicode") == "1°06′48.078″"
a = Angle(1.113355, unit=u.hour)
latex_str = r"$1^{\mathrm{h}}06^{\mathrm{m}}48.078^{\mathrm{s}}$"
assert a.to_string(format="latex") == latex_str
assert a.to_string(format="latex_inline") == latex_str
assert a.to_string(format="unicode") == "1ʰ06ᵐ48.078ˢ"
a = Angle(1.113355, unit=u.radian)
assert a.to_string(format="latex") == r"$1.11336\;\mathrm{rad}$"
assert a.to_string(format="latex_inline") == r"$1.11336\;\mathrm{rad}$"
assert a.to_string(format="unicode") == "1.11336 rad"
def test_to_string_decimal_formats():
angle1 = Angle(2.0, unit=u.degree)
assert angle1.to_string(decimal=True, format="generic") == "2 deg"
assert angle1.to_string(decimal=True, format="latex") == "$2\\mathrm{{}^{\\circ}}$"
assert angle1.to_string(decimal=True, format="unicode") == "2°"
angle2 = Angle(3.0, unit=u.hourangle)
assert angle2.to_string(decimal=True, format="generic") == "3 hourangle"
assert angle2.to_string(decimal=True, format="latex") == "$3\\mathrm{{}^{h}}$"
assert angle2.to_string(decimal=True, format="unicode") == "3ʰ"
angle3 = Angle(4.0, unit=u.radian)
assert angle3.to_string(decimal=True, format="generic") == "4 rad"
assert angle3.to_string(decimal=True, format="latex") == "$4\\;\\mathrm{rad}$"
assert angle3.to_string(decimal=True, format="unicode") == "4 rad"
with pytest.raises(ValueError, match="Unknown format"):
angle3.to_string(decimal=True, format="myformat")
def test_to_string_fields():
a = Angle(1.113355, unit=u.deg)
assert a.to_string(fields=1) == r"1d"
assert a.to_string(fields=2) == r"1d07m"
assert a.to_string(fields=3) == r"1d06m48.078s"
def test_to_string_padding():
a = Angle(0.5653, unit=u.deg)
assert a.to_string(unit="deg", sep=":", pad=True) == r"00:33:55.08"
# Test to make sure negative angles are padded correctly
a = Angle(-0.5653, unit=u.deg)
assert a.to_string(unit="deg", sep=":", pad=True) == r"-00:33:55.08"
def test_sexagesimal_rounding_up():
a = Angle(359.999999999999, unit=u.deg)
assert a.to_string(precision=None) == "360d00m00s"
assert a.to_string(precision=4) == "360d00m00.0000s"
assert a.to_string(precision=5) == "360d00m00.00000s"
assert a.to_string(precision=6) == "360d00m00.000000s"
assert a.to_string(precision=7) == "360d00m00.0000000s"
assert a.to_string(precision=8) == "360d00m00.00000000s"
assert a.to_string(precision=9) == "359d59m59.999999996s"
a = Angle(3.999999, unit=u.deg)
assert a.to_string(fields=2, precision=None) == "4d00m"
assert a.to_string(fields=2, precision=1) == "4d00m"
assert a.to_string(fields=2, precision=5) == "4d00m"
assert a.to_string(fields=1, precision=1) == "4d"
assert a.to_string(fields=1, precision=5) == "4d"
def METHOD_NAME():
a = Angle(1.113355, unit=u.deg)
assert isinstance(a.to_string(), str)
def test_to_string_radian_with_precision():
"""
Regression test for a bug that caused ``to_string`` to crash for angles in
radians when specifying the precision.
"""
# Check that specifying the precision works
a = Angle(3.0, unit=u.rad)
assert a.to_string(precision=3, sep="fromunit") == "3.000 rad"
def test_sexagesimal_round_down():
a1 = Angle(1, u.deg).to(u.hourangle)
a2 = Angle(2, u.deg)
assert a1.to_string() == "0h04m00s"
assert a2.to_string() == "2d00m00s"
def test_to_string_fields_colon():
a = Angle(1.113355, unit=u.deg)
assert a.to_string(fields=2, sep=":") == "1:07"
assert a.to_string(fields=3, sep=":") == "1:06:48.078"
assert a.to_string(fields=1, sep=":") == "1" |
6,205 | do ignore | import signal
import sys
from bdb import Bdb
from cmd import Cmd
from collections.abc import Callable, Iterable, Mapping, Sequence
from inspect import _SourceObjectType
from types import CodeType, FrameType, TracebackType
from typing import IO, Any, ClassVar, TypeVar
from typing_extensions import ParamSpec, Self
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace", "post_mortem", "help"]
_T = TypeVar("_T")
_P = ParamSpec("_P")
line_prefix: str # undocumented
class Restart(Exception): ...
def run(statement: str, globals: dict[str, Any] | None = None, locals: Mapping[str, Any] | None = None) -> None: ...
def runeval(expression: str, globals: dict[str, Any] | None = None, locals: Mapping[str, Any] | None = None) -> Any: ...
def runctx(statement: str, globals: dict[str, Any], locals: Mapping[str, Any]) -> None: ...
def runcall(func: Callable[_P, _T], *args: _P.args, **kwds: _P.kwargs) -> _T | None: ...
def set_trace(*, header: str | None = None) -> None: ...
def post_mortem(t: TracebackType | None = None) -> None: ...
def pm() -> None: ...
class Pdb(Bdb, Cmd):
# Everything here is undocumented, except for __init__
commands_resuming: ClassVar[list[str]]
aliases: dict[str, str]
mainpyfile: str
_wait_for_mainpyfile: bool
rcLines: list[str]
commands: dict[int, list[str]]
commands_doprompt: dict[int, bool]
commands_silent: dict[int, bool]
commands_defining: bool
commands_bnum: int | None
lineno: int | None
stack: list[tuple[FrameType, int]]
curindex: int
curframe: FrameType | None
curframe_locals: Mapping[str, Any]
def __init__(
self,
completekey: str = "tab",
stdin: IO[str] | None = None,
stdout: IO[str] | None = None,
skip: Iterable[str] | None = None,
nosigint: bool = False,
readrc: bool = True,
) -> None: ...
def forget(self) -> None: ...
def setup(self, f: FrameType | None, tb: TracebackType | None) -> None: ...
def execRcLines(self) -> None: ...
def bp_commands(self, frame: FrameType) -> bool: ...
def interaction(self, frame: FrameType | None, traceback: TracebackType | None) -> None: ...
def displayhook(self, obj: object) -> None: ...
def handle_command_def(self, line: str) -> bool: ...
def defaultFile(self) -> str: ...
def lineinfo(self, identifier: str) -> tuple[None, None, None] | tuple[str, str, int]: ...
def checkline(self, filename: str, lineno: int) -> int: ...
def _getval(self, arg: str) -> object: ...
def print_stack_trace(self) -> None: ...
def print_stack_entry(self, frame_lineno: tuple[FrameType, int], prompt_prefix: str = "\n-> ") -> None: ...
def lookupmodule(self, filename: str) -> str | None: ...
if sys.version_info < (3, 11):
def _runscript(self, filename: str) -> None: ...
def do_commands(self, arg: str) -> bool | None: ...
def do_break(self, arg: str, temporary: bool = ...) -> bool | None: ...
def do_tbreak(self, arg: str) -> bool | None: ...
def do_enable(self, arg: str) -> bool | None: ...
def do_disable(self, arg: str) -> bool | None: ...
def do_condition(self, arg: str) -> bool | None: ...
def METHOD_NAME(self, arg: str) -> bool | None: ...
def do_clear(self, arg: str) -> bool | None: ...
def do_where(self, arg: str) -> bool | None: ...
def do_up(self, arg: str) -> bool | None: ...
def do_down(self, arg: str) -> bool | None: ...
def do_until(self, arg: str) -> bool | None: ...
def do_step(self, arg: str) -> bool | None: ...
def do_next(self, arg: str) -> bool | None: ...
def do_run(self, arg: str) -> bool | None: ...
def do_return(self, arg: str) -> bool | None: ...
def do_continue(self, arg: str) -> bool | None: ...
def do_jump(self, arg: str) -> bool | None: ...
def do_debug(self, arg: str) -> bool | None: ...
def do_quit(self, arg: str) -> bool | None: ...
def do_EOF(self, arg: str) -> bool | None: ...
def do_args(self, arg: str) -> bool | None: ...
def do_retval(self, arg: str) -> bool | None: ...
def do_p(self, arg: str) -> bool | None: ...
def do_pp(self, arg: str) -> bool | None: ...
def do_list(self, arg: str) -> bool | None: ...
def do_whatis(self, arg: str) -> bool | None: ...
def do_alias(self, arg: str) -> bool | None: ...
def do_unalias(self, arg: str) -> bool | None: ...
def do_help(self, arg: str) -> bool | None: ...
do_b = do_break
do_cl = do_clear
do_w = do_where
do_bt = do_where
do_u = do_up
do_d = do_down
do_unt = do_until
do_s = do_step
do_n = do_next
do_restart = do_run
do_r = do_return
do_c = do_continue
do_cont = do_continue
do_j = do_jump
do_q = do_quit
do_exit = do_quit
do_a = do_args
do_rv = do_retval
do_l = do_list
do_h = do_help
def help_exec(self) -> None: ...
def help_pdb(self) -> None: ...
def sigint_handler(self, signum: signal.Signals, frame: FrameType) -> None: ...
def message(self, msg: str) -> None: ...
def error(self, msg: str) -> None: ...
if sys.version_info >= (3, 12):
def set_convenience_variable(self, frame: FrameType, name: str, value: Any) -> None: ...
def _select_frame(self, number: int) -> None: ...
def _getval_except(self, arg: str, frame: FrameType | None = None) -> object: ...
def _print_lines(
self, lines: Sequence[str], start: int, breaks: Sequence[int] = (), frame: FrameType | None = None
) -> None: ...
def _cmdloop(self) -> None: ...
def do_display(self, arg: str) -> bool | None: ...
def do_interact(self, arg: str) -> bool | None: ...
def do_longlist(self, arg: str) -> bool | None: ...
def do_source(self, arg: str) -> bool | None: ...
def do_undisplay(self, arg: str) -> bool | None: ...
do_ll = do_longlist
def _complete_location(self, text: str, line: str, begidx: int, endidx: int) -> list[str]: ...
def _complete_bpnumber(self, text: str, line: str, begidx: int, endidx: int) -> list[str]: ...
def _complete_expression(self, text: str, line: str, begidx: int, endidx: int) -> list[str]: ...
def complete_undisplay(self, text: str, line: str, begidx: int, endidx: int) -> list[str]: ...
def complete_unalias(self, text: str, line: str, begidx: int, endidx: int) -> list[str]: ...
complete_commands = _complete_bpnumber
complete_break = _complete_location
complete_b = _complete_location
complete_tbreak = _complete_location
complete_enable = _complete_bpnumber
complete_disable = _complete_bpnumber
complete_condition = _complete_bpnumber
complete_ignore = _complete_bpnumber
complete_clear = _complete_location
complete_cl = _complete_location
complete_debug = _complete_expression
complete_print = _complete_expression
complete_p = _complete_expression
complete_pp = _complete_expression
complete_source = _complete_expression
complete_whatis = _complete_expression
complete_display = _complete_expression
if sys.version_info < (3, 11):
def _runmodule(self, module_name: str) -> None: ...
# undocumented
def find_function(funcname: str, filename: str) -> tuple[str, str, int] | None: ...
def main() -> None: ...
def help() -> None: ...
if sys.version_info < (3, 10):
def getsourcelines(obj: _SourceObjectType) -> tuple[list[str], int]: ...
def lasti2lineno(code: CodeType, lasti: int) -> int: ...
class _rstr(str):
def __repr__(self) -> Self: ... |
6,206 | train epoch | import os
import random
from pathlib import Path
import nni
import torch
import torch.nn.functional as F
# remember to import nni.retiarii.nn.pytorch as nn, instead of torch.nn as nn
import nni.retiarii.nn.pytorch as nn
import nni.retiarii.strategy as strategy
from nni.retiarii import model_wrapper
from nni.retiarii.evaluator import FunctionalEvaluator
from nni.retiarii.experiment.pytorch import RetiariiExeConfig, RetiariiExperiment, debug_mutated_model
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
class DepthwiseSeparableConv(nn.Module):
def __init__(self, in_ch, out_ch):
super().__init__()
self.depthwise = nn.Conv2d(in_ch, in_ch, kernel_size=3, groups=in_ch)
self.pointwise = nn.Conv2d(in_ch, out_ch, kernel_size=1)
def forward(self, x):
return self.pointwise(self.depthwise(x))
@model_wrapper
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
# LayerChoice is used to select a layer between Conv2d and DwConv.
self.conv2 = nn.LayerChoice([
nn.Conv2d(32, 64, 3, 1),
DepthwiseSeparableConv(32, 64)
])
# ValueChoice is used to select a dropout rate.
# ValueChoice can be used as parameter of modules wrapped in `nni.retiarii.nn.pytorch`
# or customized modules wrapped with `@basic_unit`.
self.dropout1 = nn.Dropout(nn.ValueChoice([0.25, 0.5, 0.75]))
self.dropout2 = nn.Dropout(0.5)
feature = nn.ValueChoice([64, 128, 256])
# Same value choice can be used multiple times
self.fc1 = nn.Linear(9216, feature)
self.fc2 = nn.Linear(feature, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(self.conv2(x), 2)
x = torch.flatten(self.dropout1(x), 1)
x = self.fc2(self.dropout2(F.relu(self.fc1(x))))
return x
def METHOD_NAME(model, device, train_loader, optimizer, epoch):
loss_fn = torch.nn.CrossEntropyLoss()
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test_epoch(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
print('\nTest set: Accuracy: {}/{} ({:.0f}%)\n'.format(
correct, len(test_loader.dataset), accuracy))
return accuracy
def evaluate_model(model_cls):
# "model_cls" is a class, need to instantiate
model = model_cls()
# export model for visualization
if 'NNI_OUTPUT_DIR' in os.environ:
torch.onnx.export(model, (torch.randn(1, 1, 28, 28), ),
Path(os.environ['NNI_OUTPUT_DIR']) / 'model.onnx')
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
transf = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(MNIST('data/mnist', download=True, transform=transf), batch_size=64, shuffle=True)
test_loader = DataLoader(MNIST('data/mnist', download=True, train=False, transform=transf), batch_size=64)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.to(device)
for epoch in range(3):
# train the model for one epoch
METHOD_NAME(model, device, train_loader, optimizer, epoch)
# test the model for one epoch
accuracy = test_epoch(model, device, test_loader)
# call report intermediate result. Result can be float or dict
nni.report_intermediate_result(accuracy)
# report final test result
nni.report_final_result(accuracy)
if __name__ == '__main__':
base_model = Net()
search_strategy = strategy.Random()
model_evaluator = FunctionalEvaluator(evaluate_model)
exp = RetiariiExperiment(base_model, model_evaluator, [], search_strategy)
exp_config = RetiariiExeConfig('local')
exp_config.experiment_name = 'mnist_search'
exp_config.trial_concurrency = 2
exp_config.max_trial_number = 20
exp_config.training_service.use_active_gpu = False
export_formatter = 'dict'
# uncomment this for graph-based execution engine
# exp_config.execution_engine = 'base'
# export_formatter = 'code'
exp.run(exp_config, 8080)
print('Final model:')
for model_code in exp.export_top_models(formatter=export_formatter):
print(model_code) |
6,207 | initialize options | #
# Copyright (C) 2014-2017, 2018, 2020, 2021, 2022, 2023
# Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from setuptools import Command
import re
from .extensions import build_ext
# What versions of XSPEC do we support? I am not sure what the
# naming of the XSPEC components are, but let's stick with
# major, minor, and micro. We drop the patch level - e.g.
# "c" in "12.12.0c" as that is not helpful to track here.
#
SUPPORTED_VERSIONS = [(12, 12, 0), (12, 12, 1),
(12, 13, 0), (12, 13, 1)]
# We could use packaging.versions.Version here, but for our needs we
# can get away with a tuple of integers. That is, we do not need the
# full support for PEP-440.
#
MIN_VERSION = min(SUPPORTED_VERSIONS)
MAX_VERSION = max(SUPPORTED_VERSIONS)
def clean(xs):
"Remove all '' entries from xs, returning the new list."
return [x for x in xs if x != '']
def get_version(version):
"""Strip out any XSPEC patch level.
So '12.12.0c' gets converted to '12.12.0', and then to (12, 12,
0). This is helpful as then it makes version comparison easier, as
we can rely on the standard tuple ordering.
Parameters
----------
version : str
The XSPEC version string, of the form "12.12.0c", so it can
include the XSPEC patch level.
Returns
-------
(major, minor, micro) : tuple of int
The XSPEC patchlevel is ignored.
"""
# XSPEC versions do not match PEP 440, so strip out the trailing
# text (which indicates the XSPEC patch level).
#
match = re.search(r'^(\d+)\.(\d+)\.(\d+)', version)
if match is None:
raise ValueError(f"Invalid XSPEC version string: {version}")
return (int(match[1]), int(match[2]), int(match[3]))
class xspec_config(Command):
description = "Configure XSPEC Models external module (optional) "
user_options = [
('with-xspec', None, "Whether sherpa must build the XSPEC module (default False)"),
('xspec-version', None, "the XSPEC version (default 12.12.0)"),
('xspec-lib-dirs', None, "Where the xspec libraries are located, if with-xspec is True"),
('xspec-libraries', None, "Name of the libraries that should be linked for xspec"),
('cfitsio-lib-dirs', None, "Where the cfitsio libraries are located, if with-xspec is True"),
('cfitsio-libraries', None, "Name of the libraries that should be linked for cfitsio"),
('ccfits-lib-dirs', None, "Where the CCfits libraries are located, if with-xspec is True"),
('ccfits-libraries', None, "Name of the libraries that should be linked for CCfits"),
('wcslib-lib-dirs', None, "Where the WCSLIB libraries are located, if with-xspec is True"),
('wcslib-libraries', None, "Name of the libraries that should be linked for WCSLIB"),
('gfortran-lib-dirs', None, "Where the gfortran libraries are located, if with-xspec is True"),
('gfortran-libraries', None, "Name of the libraries that should be linked for gfortran"),
]
def METHOD_NAME(self):
self.with_xspec = False
self.xspec_version = '12.12.0'
self.xspec_include_dirs = ''
self.xspec_lib_dirs = ''
# This is set up for how CIAO builds XSPEC; other users may require more libraries
self.xspec_libraries = 'XSFunctions XSUtil XS'
self.cfitsio_include_dirs = ''
self.cfitsio_lib_dirs = ''
self.cfitsio_libraries = ''
self.ccfits_include_dirs = ''
self.ccfits_lib_dirs = ''
self.ccfits_libraries = ''
self.wcslib_include_dirs = ''
self.wcslib_lib_dirs = ''
self.wcslib_libraries = ''
self.gfortran_include_dirs = ''
self.gfortran_lib_dirs = ''
self.gfortran_libraries = ''
def finalize_options(self):
pass
def run(self):
if not self.with_xspec:
return
macros = []
if self.xspec_version:
self.announce(f"Found XSPEC version: {self.xspec_version}", level=2)
xspec_version = get_version(self.xspec_version)
if xspec_version < MIN_VERSION:
raise ValueError(f"XSPEC Version {xspec_version} is less than {MIN_VERSION}, which is the earliest supported version for Sherpa")
for version in SUPPORTED_VERSIONS:
if xspec_version >= version:
major, minor, micro = version
macros += [(f'XSPEC_{major}_{minor}_{micro}', None)]
if xspec_version > MAX_VERSION:
self.warn(f"XSPEC Version is greater than {MAX_VERSION}, which is the latest supported version for Sherpa")
extension = build_ext(self, 'xspec', 'xspec', 'cfitsio', 'ccfits',
'wcslib', 'gfortran', define_macros=macros)
self.distribution.ext_modules.append(extension) |
6,208 | output types | # Copyright (c) 2020, MeetKai Inc. All rights reserved.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, List, Optional, Tuple
import numpy as np
from nemo.collections.common.tokenizers import AutoTokenizer
from nemo.core.classes import Dataset
from nemo.core.neural_types import ChannelType, MaskType, NeuralType
__all__ = ["Text2SparqlDataset"]
class Text2SparqlDataset(Dataset):
"""A dataset class that converts raw data to a dataset that can be used by NeuralMachineTranslationModel.
Args:
filepath: .tsv file to sequence + label.
the first line is header (sentence [tab] label)
each line should be [sentence][tab][label]
encoder_tokenizer: encoder tokenizer object such as AutoTokenizer
decoder_tokenizer: decoder tokenizer object. If using BART or end to end model, set this to encoder_tokenizer
max_seq_length: max sequence length including bos and eos tokens
num_samples: number of samples you want to use for the dataset. If -1, use all dataset. Useful for testing.
convert_labels: if true, converts labels for masked lm and updates pad_id to -100
for hf masked loss
"""
@property
def METHOD_NAME(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
"input_ids": NeuralType(("B", "T"), ChannelType()),
"attention_mask": NeuralType(("B", "T"), MaskType()),
"decoder_input_ids": NeuralType(("B", "T"), ChannelType()),
"lm_labels": NeuralType(("B", "T"), ChannelType()),
}
def __init__(
self,
filepath: str,
encoder_tokenizer: AutoTokenizer,
decoder_tokenizer: AutoTokenizer,
encoder_add_special_tokens: bool,
decoder_add_special_tokens: bool,
max_seq_length: int,
num_samples: int = -1,
convert_labels: bool = False,
):
self.filepath = filepath
self.encoder_tokenizer = encoder_tokenizer
self.decoder_tokenizer = decoder_tokenizer
self.encoder_add_special_tokens = encoder_add_special_tokens
self.decoder_add_special_tokens = decoder_add_special_tokens
self.max_seq_length = max_seq_length
self.num_samples = num_samples
self.convert_labels = convert_labels
if num_samples == 0:
raise ValueError("num_samples has to be positive.", num_samples)
if self.max_seq_length and self.max_seq_length <= 2:
self.max_seq_length = None
if not os.path.exists(filepath):
raise FileNotFoundError(
f"{filepath} not found. The filepath must be set in train_ds.filepath and validation_ds.filepath."
)
with open(filepath) as f:
lines = f.readlines()[1:]
if num_samples > 0:
lines = lines[:num_samples]
input_ids, input_masks, label_ids = [], [], []
for line in lines:
try:
sentence, label = line.split("\t")
except ValueError:
raise ValueError("Each line of input file should contain the format [sentence][tab][label].")
ids, mask = self.text_to_ids(
sentence, tokenizer=encoder_tokenizer, add_special_tokens=encoder_add_special_tokens
)
input_ids.append(ids)
input_masks.append(mask)
label_ids.append(
self.text_to_ids(label, tokenizer=decoder_tokenizer, add_special_tokens=decoder_add_special_tokens)[0]
)
self.input_ids = np.asarray(input_ids)
self.input_masks = np.asarray(input_masks)
self.label_ids = np.asarray(label_ids)
def text_to_ids(
self, text: str, tokenizer: AutoTokenizer, add_special_tokens=False
) -> Tuple[List[int], List[int]]:
"""Converts text to ids. Truncates and adds padding."""
text_tokens = tokenizer.text_to_ids(text.strip())
num_special_tokens = 2 if add_special_tokens else 0
if self.max_seq_length and self.max_seq_length > num_special_tokens:
text_tokens = text_tokens[: self.max_seq_length - num_special_tokens]
if add_special_tokens:
text_tokens = [tokenizer.bos_id] + text_tokens + [tokenizer.eos_id]
mask = [1] * len(text_tokens)
if self.max_seq_length and self.max_seq_length > num_special_tokens:
pad_length = self.max_seq_length - len(text_tokens)
text_tokens += [tokenizer.pad_id] * pad_length
mask += [0] * pad_length
return text_tokens, mask
def __len__(self):
return len(self.input_ids)
def convert_label_ids(self, label_ids: List[int]) -> Tuple[List[int], List[int]]:
decoder_input_ids = label_ids[:-1]
lm_labels = label_ids[1:].copy()
lm_labels[label_ids[1:] == self.decoder_tokenizer.pad_id] = -100 # for huggingface masked lm loss
return decoder_input_ids, lm_labels
def __getitem__(self, idx):
if self.convert_labels:
decoder_input_ids, lm_labels = self.convert_label_ids(self.label_ids[idx])
else:
decoder_input_ids = self.label_ids[idx]
lm_labels = self.label_ids[idx]
return self.input_ids[idx], self.input_masks[idx], decoder_input_ids, lm_labels |
6,209 | test command os error | import json
import subprocess
import logging
import pytest
from unittest import mock
from elastalert.alerters.command import CommandAlerter
from elastalert.alerts import BasicMatchString
from elastalert.util import EAException
from tests.alerts_test import mock_rule
def test_command_getinfo():
# Test command as list with a formatted arg
rule = {'command': ['/bin/test/', '--arg', '%(somefield)s']}
alert = CommandAlerter(rule)
match = {'@timestamp': '2014-01-01T00:00:00',
'somefield': 'foobarbaz',
'nested': {'field': 1}}
with mock.patch("elastalert.alerters.command.subprocess.Popen") as mock_popen:
alert.alert([match])
assert mock_popen.called_with(['/bin/test', '--arg', 'foobarbaz'], stdin=subprocess.PIPE, shell=False)
expected_data = {
'type': 'command',
'command': '/bin/test/ --arg foobarbaz'
}
actual_data = alert.get_info()
assert expected_data == actual_data
def test_command_old_style_string_format1(caplog):
caplog.set_level(logging.INFO)
# Test command as string with formatted arg (old-style string format)
rule = {'command': '/bin/test/ --arg %(somefield)s'}
match = {'@timestamp': '2014-01-01T00:00:00',
'somefield': 'foobarbaz',
'nested': {'field': 1}}
alert = CommandAlerter(rule)
with mock.patch("elastalert.alerters.command.subprocess.Popen") as mock_popen:
alert.alert([match])
assert mock_popen.called_with('/bin/test --arg foobarbaz', stdin=subprocess.PIPE, shell=False)
assert ('elastalert', logging.WARNING, 'Warning! You could be vulnerable to shell injection!') == caplog.record_tuples[0]
assert ('elastalert', logging.INFO, 'Alert sent to Command') == caplog.record_tuples[1]
def test_command_old_style_string_format2():
# Test command as string without formatted arg (old-style string format)
rule = {'command': '/bin/test/foo.sh'}
match = {'@timestamp': '2014-01-01T00:00:00',
'somefield': 'foobarbaz',
'nested': {'field': 1}}
alert = CommandAlerter(rule)
with mock.patch("elastalert.alerters.command.subprocess.Popen") as mock_popen:
alert.alert([match])
assert mock_popen.called_with('/bin/test/foo.sh', stdin=subprocess.PIPE, shell=True)
def test_command_pipe_match_json():
rule = {'command': ['/bin/test/', '--arg', '%(somefield)s'],
'pipe_match_json': True}
alert = CommandAlerter(rule)
match = {'@timestamp': '2014-01-01T00:00:00',
'somefield': 'foobarbaz'}
with mock.patch("elastalert.alerters.command.subprocess.Popen") as mock_popen:
mock_subprocess = mock.Mock()
mock_popen.return_value = mock_subprocess
mock_subprocess.communicate.return_value = (None, None)
alert.alert([match])
assert mock_popen.called_with(['/bin/test', '--arg', 'foobarbaz'], stdin=subprocess.PIPE, shell=False)
assert mock_subprocess.communicate.called_with(input=json.dumps(match))
def test_command_pipe_alert_text():
rule = {'command': ['/bin/test/', '--arg', '%(somefield)s'],
'pipe_alert_text': True, 'type': mock_rule(), 'name': 'Test'}
alert = CommandAlerter(rule)
match = {'@timestamp': '2014-01-01T00:00:00',
'somefield': 'foobarbaz'}
alert_text = str(BasicMatchString(rule, match))
with mock.patch("elastalert.alerters.command.subprocess.Popen") as mock_popen:
mock_subprocess = mock.Mock()
mock_popen.return_value = mock_subprocess
mock_subprocess.communicate.return_value = (None, None)
alert.alert([match])
assert mock_popen.called_with(['/bin/test', '--arg', 'foobarbaz'], stdin=subprocess.PIPE, shell=False)
assert mock_subprocess.communicate.called_with(input=alert_text.encode())
def test_command_fail_on_non_zero_exit():
rule = {'command': ['/bin/test/', '--arg', '%(somefield)s'],
'fail_on_non_zero_exit': True}
alert = CommandAlerter(rule)
match = {'@timestamp': '2014-01-01T00:00:00',
'somefield': 'foobarbaz'}
with pytest.raises(Exception) as exception:
with mock.patch("elastalert.alerters.command.subprocess.Popen") as mock_popen:
mock_subprocess = mock.Mock()
mock_popen.return_value = mock_subprocess
mock_subprocess.wait.return_value = 1
alert.alert([match])
assert mock_popen.called_with(['/bin/test', '--arg', 'foobarbaz'], stdin=subprocess.PIPE, shell=False)
assert "Non-zero exit code while running command" in str(exception)
def METHOD_NAME():
rule = {'command': ['/bin/test/', '--arg', '%(somefield)s'],
'pipe_alert_text': True, 'type': mock_rule(), 'name': 'Test'}
alert = CommandAlerter(rule)
match = {'@timestamp': '2014-01-01T00:00:00',
'somefield': 'foobarbaz'}
with pytest.raises(EAException) as ea:
mock_run = mock.MagicMock(side_effect=OSError)
with mock.patch("elastalert.alerters.command.subprocess.Popen", mock_run), pytest.raises(OSError) as mock_popen:
mock_subprocess = mock.Mock()
mock_popen.return_value = mock_subprocess
mock_subprocess.communicate.return_value = (None, None)
alert.alert([match])
assert 'Error while running command /bin/test/ --arg foobarbaz: ' in str(ea)
def test_command_key_error():
with pytest.raises(EAException) as ea:
rule = {}
alert = CommandAlerter(rule)
match = {'@timestamp': '2014-01-01T00:00:00',
'somefield': 'foobarbaz',
'nested': {'field': 1}}
with mock.patch("elastalert.alerters.command.subprocess.Popen"):
alert.alert([match])
assert 'Error formatting command:' in str(ea) |
6,210 | parse start | import re
from collections import defaultdict
from datetime import datetime, time
from city_scrapers_core.constants import BOARD
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
class IlProcurementPolicySpider(CityScrapersSpider):
name = "il_procurement_policy"
agency = "Illinois Procurement Policy Board"
timezone = "America/Chicago"
start_urls = [
"https://www2.illinois.gov/sites/ppb/Pages/future_board_minutes.aspx",
"https://www2.illinois.gov/sites/ppb/Pages/board_minutes.aspx",
]
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping
needs.
"""
if "future" in response.url:
yield from self._upcoming_meetings(response)
else:
yield from self._prev_meetings(response)
def _parse_classification(self, item):
"""Parse or generate classification from allowed options."""
return BOARD
def METHOD_NAME(self, item):
"""Parse start datetime as a naive datetime object."""
time_object = time(10, 0)
date_str = " ".join(item.css("*::text").extract()).strip()
date_str = re.sub("Agenda.pdf", "", date_str).strip()
try:
date_object = datetime.strptime(date_str, "%B %d, %Y").date()
return datetime.combine(date_object, time_object)
except ValueError:
return
def _parse_links(self, item, response):
"""Parse or generate links."""
links = []
title_str = " ".join(item.css("*::text").extract()).strip()
if "pdf" in title_str:
title_str = re.sub("Agenda.pdf", "", title_str).strip()
title_str += " Agenda"
links.append(
{"title": title_str, "href": response.urljoin(item.attrib["href"])}
)
return links
def _link_date_map(self, response):
link_map = defaultdict(list)
for item in response.css(".ms-rtestate-field p a"):
date = self._past_start(item)
title_str = date.strftime("%B %d, %Y")
link_map[date].append(
{"title": title_str, "href": response.urljoin(item.attrib["href"])}
)
for item in response.css(".ms-rtestate-field .list-unstyled li a"):
date = self._past_start(item)
if date is None:
continue
title_str = date.strftime("%B %d, %Y")
link_map[date].append(
{"title": title_str, "href": response.urljoin(item.attrib["href"])}
)
return link_map
def _past_start(self, item):
"""parse or generate links from past meetings"""
str_list = [".docx", ".pdf", "-", "["]
time_object = time(10, 0)
date_str = " ".join(item.css("*::text").extract()).strip()
if len(date_str) == 0:
return None
date_str = date_str.replace("\u200b", "")
index = None
for item in str_list:
if item in date_str:
index = date_str.index(item)
break
date_str = date_str[:index]
date_object = datetime.strptime(date_str.strip(), "%B %d, %Y").date()
return datetime.combine(date_object, time_object)
def _parse_source(self, response):
"""Parse or generate source."""
return response.url
def _upcoming_meetings(self, response):
for item in response.css(".ms-rtestate-field p strong a"):
start = self.METHOD_NAME(item)
if not start:
continue
meeting = Meeting(
title="Procurement Policy Board",
description="",
classification=self._parse_classification(item),
start=start,
all_day=False,
time_notes="",
location={
"name": "Stratton Office Building",
"address": "401 S Spring St, Springfield, IL 62704",
},
links=self._parse_links(item, response),
source=response.url,
)
meeting["status"] = self._get_status(meeting)
meeting["id"] = self._get_id(meeting)
yield meeting
def _prev_meetings(self, response):
meets = self._link_date_map(response)
last_year = datetime.today().replace(year=datetime.today().year - 1)
for item in meets:
if item < last_year and not self.settings.getbool("CITY_SCRAPERS_ARCHIVE"):
continue
meeting = Meeting(
title="Procurement Policy Board",
description="",
classification=BOARD,
start=item,
all_day=False,
time_notes="",
location={
"name": "Stratton Office Building",
"address": "401 S Spring St, Springfield, IL 62704",
},
links=meets[item],
source=response.url,
)
meeting["status"] = self._get_status(meeting)
meeting["id"] = self._get_id(meeting)
yield meeting |
6,211 | test schedule release test no plans to | import logging
import re
from contextlib import ExitStack
from unittest import mock
from unittest.mock import patch
import pytest
from cumulusci.core.config import OrgConfig
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.core.management.base import CommandError
from requests.exceptions import HTTPError
from metadeploy.api.management.commands.schedule_release_test import (
execute_release_test,
get_plans_to_test,
)
from metadeploy.api.models import Job, Plan, PreflightResult, ScratchOrg
@pytest.mark.django_db()
def test_run_plan(plan_factory):
plan = plan_factory(preflight_checks=[{"when": "False", "action": "error"}])
org_config = OrgConfig(
{
"instance_url": "https://sample.salesforce.org/",
"access_token": "abc123",
"refresh_token": "abc123",
"org_id": "00Dxxxxxxxxxxxxxxx",
},
"Release",
)
with ExitStack() as stack:
stack.enter_context(patch("metadeploy.api.jobs.local_github_checkout"))
stack.enter_context(
patch("metadeploy.api.salesforce.OrgConfig.refresh_oauth_token")
)
stack.enter_context(
patch("cumulusci.core.flowrunner.PreflightFlowCoordinator.run")
)
stack.enter_context(patch("cumulusci.core.flowrunner.FlowCoordinator.run"))
stack.enter_context(
patch(
"metadeploy.api.jobs.create_scratch_org_on_sf",
return_value=(org_config, None, None),
)
)
stack.enter_context(patch("metadeploy.api.jobs.delete_scratch_org_on_sf"))
call_command("run_plan", str(plan.id))
# We can't query the scratch org since it's been deleted
job = Job.objects.get()
preflight_result = PreflightResult.objects.get()
assert job.status == "complete"
assert job.is_release_test
assert preflight_result.is_release_test
with pytest.raises(ScratchOrg.DoesNotExist):
ScratchOrg.objects.get(plan=plan)
@pytest.mark.django_db
def test_run_plan__no_plan_exists():
with pytest.raises(CommandError):
call_command("run_plan", "abc123")
@pytest.mark.django_db()
@mock.patch("metadeploy.api.management.commands.run_plan.setup_scratch_org")
def test_run_plan__scratch_org_creation_fails(setup_scratch_org, plan_factory, caplog):
caplog.set_level(logging.INFO)
setup_scratch_org.side_effect = Exception("Scratch org creation failed")
plan = plan_factory(preflight_checks=[{"when": "False", "action": "error"}])
with pytest.raises(Exception, match="Scratch org creation failed"):
call_command("run_plan", str(plan.id))
assert "Scratch org creation failed" in caplog.text
@pytest.mark.django_db
def METHOD_NAME(caplog):
caplog.set_level(logging.INFO)
caplog.clear()
execute_release_test()
assert caplog.records[0].getMessage() == "No plans found for regression testing."
@pytest.mark.django_db
def test_get_plans_to_test(plan_template_factory, plan_factory):
template_1 = plan_template_factory()
# two plans of tier 'primary'
plan_factory(tier=Plan.Tier.primary, plan_template=template_1)
plan_factory(tier=Plan.Tier.primary, plan_template=template_1)
# one plan with tier of 'additional' (should not be tested).
plan_factory(tier=Plan.Tier.additional, plan_template=template_1)
template_2 = plan_template_factory()
# two plans of tier 'secondary'
plan_factory(tier=Plan.Tier.secondary, plan_template=template_2)
plan_factory(tier=Plan.Tier.secondary, plan_template=template_2)
# one plan with tier of 'additional' (should not be tested).
plan_factory(tier=Plan.Tier.additional, plan_template=template_2)
# one template that has opted out of testing completely
template_opted_out = plan_template_factory(regression_test_opt_out=True)
plan_factory(tier=Plan.Tier.primary, plan_template=template_opted_out)
plans_to_test = get_plans_to_test()
assert len(plans_to_test) == 1
@pytest.mark.django_db
@mock.patch("metadeploy.api.management.commands.schedule_release_test.requests.post")
def test_schedule_release_test__happy_path(post, plan_template_factory, plan_factory):
template = plan_template_factory()
plan_factory(plan_template=template)
post.return_value = mock.Mock(status_code=200, text="Fatal Error")
execute_release_test()
@pytest.mark.django_db
@mock.patch("metadeploy.api.management.commands.schedule_release_test.requests.post")
def test_schedule_release_test__bad_response(post, plan_factory, plan_template_factory):
template = plan_template_factory()
plan_factory(plan_template=template)
post.return_value = mock.Mock(status_code=500, text="Fatal Error")
with pytest.raises(HTTPError, match="An internal server error occurred."):
execute_release_test()
@pytest.mark.django_db
def test_schedule_release_test__no_heroku_worker_app_name(
plan_template_factory, plan_factory
):
template = plan_template_factory()
plan_factory(plan_template=template)
with mock.patch.object(settings, "HEROKU_APP_NAME", None):
with pytest.raises(
ImproperlyConfigured,
match="The HEROKU_APP_NAME environment variable is required for regression testing.",
):
execute_release_test()
@pytest.mark.django_db
def test_schedule_release_test__no_heroku_token(plan_template_factory, plan_factory):
template = plan_template_factory()
plan_factory(plan_template=template)
with mock.patch.object(settings, "HEROKU_TOKEN", None):
with pytest.raises(
ImproperlyConfigured,
match=re.escape(
"The HEROKU_TOKEN environment variable is required for regression testing."
),
):
execute_release_test() |
6,212 | close milestone | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2021-present Kaleidos Ventures SL
from taiga.base.utils import db
from taiga.events import events
from taiga.projects.history.services import take_snapshot
from taiga.projects.services import apply_order_updates
from taiga.projects.issues.models import Issue
from taiga.projects.tasks.models import Task
from taiga.projects.userstories.models import UserStory
def calculate_milestone_is_closed(milestone):
all_us_closed = all([user_story.is_closed for user_story in
milestone.user_stories.all()])
all_tasks_closed = all([task.status is not None and task.status.is_closed for task in
milestone.tasks.filter(user_story__isnull=True)])
all_issues_closed = all([issue.is_closed for issue in
milestone.issues.all()])
uss_check = (milestone.user_stories.all().count() > 0
and all_tasks_closed and all_us_closed and all_issues_closed)
tasks_check = (milestone.tasks.filter(user_story__isnull=True).count() > 0
and all_tasks_closed and all_issues_closed and all_us_closed)
issues_check = (milestone.issues.all().count() > 0
and all_issues_closed and all_tasks_closed and all_us_closed)
return uss_check or issues_check or tasks_check
def METHOD_NAME(milestone):
if not milestone.closed:
milestone.closed = True
milestone.save(update_fields=["closed",])
def open_milestone(milestone):
if milestone.closed:
milestone.closed = False
milestone.save(update_fields=["closed",])
def update_userstories_milestone_in_bulk(bulk_data: list, milestone: object):
"""
Update the milestone and the milestone order of some user stories adding
the extra orders needed to keep consistency.
`bulk_data` should be a list of dicts with the following format:
[{'us_id': <value>, 'order': <value>}, ...]
"""
user_stories = milestone.user_stories.all()
us_orders = {us.id: getattr(us, "sprint_order") for us in user_stories}
new_us_orders = {}
for e in bulk_data:
new_us_orders[e["us_id"]] = e["order"]
# The base orders where we apply the new orders must containg all
# the values
us_orders[e["us_id"]] = e["order"]
apply_order_updates(us_orders, new_us_orders)
us_milestones = {e["us_id"]: milestone.id for e in bulk_data}
user_story_ids = us_milestones.keys()
events.emit_event_for_ids(ids=user_story_ids,
content_type="userstories.userstory",
projectid=milestone.project.pk)
us_instance_list = []
us_values = []
for us_id in user_story_ids:
us = UserStory.objects.get(pk=us_id)
us_instance_list.append(us)
us_values.append({'milestone_id': milestone.id})
db.update_in_bulk(us_instance_list, us_values)
db.update_attr_in_bulk_for_ids(us_orders, "sprint_order", UserStory)
# Updating the milestone for the tasks
Task.objects.filter(
user_story_id__in=[e["us_id"] for e in bulk_data]).update(
milestone=milestone)
return us_orders
def snapshot_userstories_in_bulk(bulk_data, user):
for us_data in bulk_data:
try:
us = UserStory.objects.get(pk=us_data['us_id'])
take_snapshot(us, user=user)
except UserStory.DoesNotExist:
pass
def update_tasks_milestone_in_bulk(bulk_data: list, milestone: object):
"""
Update the milestone and the milestone order of some tasks adding
the extra orders needed to keep consistency.
`bulk_data` should be a list of dicts with the following format:
[{'task_id': <value>, 'order': <value>}, ...]
"""
tasks = milestone.tasks.all()
task_orders = {task.id: getattr(task, "taskboard_order") for task in tasks}
new_task_orders = {}
for e in bulk_data:
new_task_orders[e["task_id"]] = e["order"]
# The base orders where we apply the new orders must containg all
# the values
task_orders[e["task_id"]] = e["order"]
apply_order_updates(task_orders, new_task_orders)
task_milestones = {e["task_id"]: milestone.id for e in bulk_data}
task_ids = task_milestones.keys()
events.emit_event_for_ids(ids=task_ids,
content_type="tasks.task",
projectid=milestone.project.pk)
task_instance_list = []
task_values = []
for task_id in task_ids:
task = Task.objects.get(pk=task_id)
task_instance_list.append(task)
task_values.append({'milestone_id': milestone.id})
db.update_in_bulk(task_instance_list, task_values)
db.update_attr_in_bulk_for_ids(task_orders, "taskboard_order", Task)
return task_milestones
def snapshot_tasks_in_bulk(bulk_data, user):
for task_data in bulk_data:
try:
task = Task.objects.get(pk=task_data['task_id'])
take_snapshot(task, user=user)
except Task.DoesNotExist:
pass
def update_issues_milestone_in_bulk(bulk_data: list, milestone: object):
"""
Update the milestone some issues adding
`bulk_data` should be a list of dicts with the following format:
[{'task_id': <value>}, ...]
"""
issue_milestones = {e["issue_id"]: milestone.id for e in bulk_data}
issue_ids = issue_milestones.keys()
events.emit_event_for_ids(ids=issue_ids,
content_type="issues.issues",
projectid=milestone.project.pk)
issues_instance_list = []
issues_values = []
for issue_id in issue_ids:
issue = Issue.objects.get(pk=issue_id)
issues_instance_list.append(issue)
issues_values.append({'milestone_id': milestone.id})
db.update_in_bulk(issues_instance_list, issues_values)
return issue_milestones
def snapshot_issues_in_bulk(bulk_data, user):
for issue_data in bulk_data:
try:
issue = Issue.objects.get(pk=issue_data['issue_id'])
take_snapshot(issue, user=user)
except Issue.DoesNotExist:
pass |
6,213 | export actions | # Copyright (c) 2012 Charles University, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2012 Tomas Machalek <tomas.machalek@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
A 'taghelper' plug-in implementation. It provides a data-driven, interactive way
how to create a tag query. Please note that this works only with tag formats with
fixed positions for defined categories (e.g.: part of speech = character 0,
gender = character 1, case = character 2,...)
Please note that this module requires a proper Corptree plug-in configuration and data.
Required XML:
element taghelper {
element module { "default_taghelper" }
element clear_interval {
text # TTL - number of seconds
}
element tags_cache_dir {
text # a path to a dir where files are cached
}
}
"""
from typing import Any, Dict
import plugins
from action.control import http_action
from action.errors import UserReadableException
from action.krequest import KRequest
from action.model.corpus import CorpusActionModel
from action.response import KResponse
from plugin_types.corparch import AbstractCorporaArchive
from plugin_types.taghelper import (AbstractTaghelper,
AbstractTagsetInfoLoader,
AbstractValueSelectionFetcher)
from plugins.default_taghelper.fetchers import NullSelectionFetcher
from plugins.default_taghelper.fetchers.keyval import KeyvalSelectionFetcher
from plugins.default_taghelper.fetchers.positional import \
PositionalSelectionFetcher
from plugins.default_taghelper.loaders import NullTagVariantLoader
from plugins.default_taghelper.loaders.keyval import KeyvalTagVariantLoader
from plugins.default_taghelper.loaders.positional import \
PositionalTagVariantLoader
from sanic.blueprints import Blueprint
bp = Blueprint('default_taghelper', 'corpora')
@bp.route('/ajax_get_tag_variants')
@http_action(return_type='json', action_model=CorpusActionModel)
async def ajax_get_tag_variants(amodel: CorpusActionModel, req: KRequest, resp: KResponse):
"""
"""
corpname = req.args.get('corpname')
tagset_name = req.args.get('tagset')
fetcher = await plugins.runtime.TAGHELPER.instance.fetcher(amodel.plugin_ctx, corpname, tagset_name)
values_selection = await fetcher.fetch(req)
try:
tag_loader = await plugins.runtime.TAGHELPER.instance.loader(amodel.plugin_ctx, corpname, tagset_name)
except IOError:
raise UserReadableException(
req.translate('Corpus {corpname} is not supported by this widget.'))
if await fetcher.is_empty(values_selection):
ans = await tag_loader.get_initial_values(req.ui_lang, req.translate)
else:
ans = await tag_loader.get_variant(values_selection, req.ui_lang, req.translate)
return ans
class Taghelper(AbstractTaghelper):
def __init__(self, conf: Dict[str, Any], corparch: AbstractCorporaArchive):
self._conf = conf
self._corparch = corparch
self._loaders = {}
self._fetchers = {}
async def loader(self, plugin_ctx, corpus_name, tagset_name) -> AbstractTagsetInfoLoader:
if (corpus_name, tagset_name) not in self._loaders:
for tagset in (await self._corparch.get_corpus_info(plugin_ctx, corpus_name)).tagsets:
if tagset.type == 'positional':
self._loaders[(corpus_name, tagset.ident)] = PositionalTagVariantLoader(
corpus_name=corpus_name, tagset_name=tagset.ident,
cache_dir=self._conf['tags_cache_dir'],
tags_src_dir=self._conf['tags_src_dir'],
cache_clear_interval=self._conf['clear_interval'],
taglist_path=self._conf['taglist_path'])
self._fetchers[(corpus_name, tagset.ident)] = PositionalSelectionFetcher()
elif tagset.type == 'keyval':
self._loaders[(corpus_name, tagset.ident)] = KeyvalTagVariantLoader(
corpus_name=corpus_name, tagset_name=tagset.ident,
tags_src_dir=self._conf['tags_src_dir'],
)
self._fetchers[(corpus_name, tagset.ident)] = KeyvalSelectionFetcher()
else:
self._loaders[(corpus_name, tagset.ident)] = NullTagVariantLoader()
self._fetchers[(corpus_name, tagset.ident)] = NullSelectionFetcher()
return self._loaders[(corpus_name, tagset_name)]
async def fetcher(self, plugin_ctx, corpus_name, tagset_name) -> AbstractValueSelectionFetcher:
if (corpus_name, tagset_name) not in self._fetchers:
for tagset in (await self._corparch.get_corpus_info(plugin_ctx, corpus_name)).tagsets:
if tagset.type == 'positional':
self._fetchers[(corpus_name, tagset.ident)] = PositionalSelectionFetcher()
elif tagset.type == 'keyval':
self._fetchers[(corpus_name, tagset.ident)] = KeyvalSelectionFetcher()
else:
self._fetchers[(corpus_name, tagset.ident)] = NullSelectionFetcher()
return self._fetchers[(corpus_name, tagset_name)]
async def tags_available_for(self, plugin_ctx, corpus_name, tagset_id) -> bool:
for tagset in (await self._corparch.get_corpus_info(plugin_ctx, corpus_name)).tagsets:
if tagset.ident == tagset_id:
loader = await self.loader(plugin_ctx, corpus_name, tagset.ident)
return await loader.is_available(plugin_ctx.translate)
return False
@staticmethod
def METHOD_NAME():
return bp
async def export(self, plugin_ctx):
tagsets = {}
try:
for corp in ([plugin_ctx.current_corpus.corpname] + plugin_ctx.available_aligned_corpora):
info = await self._corparch.get_corpus_info(plugin_ctx, corp)
for tagset in info.tagsets:
tagsets[tagset.ident] = tagset
except AttributeError:
pass # we ignore if plugin_ctx does not support 'current_corpus' - please see AbstractTaghelper
return dict(corp_tagsets=[x.to_dict() for x in tagsets.values()])
@plugins.inject(plugins.runtime.CORPARCH)
def create_instance(conf, corparch: AbstractCorporaArchive):
"""
arguments:
conf -- KonText's settings module or a compatible object
"""
return Taghelper(conf.get('plugins', 'taghelper'), corparch) |
6,214 | test extra needed exit | import sys
import pytest
from conftest import ImportRaiser, remove_import_raiser, reload_module
from fontbakery.status import FAIL
from fontbakery.codetesting import (
assert_PASS,
assert_results_contain,
CheckTester,
TEST_FILE,
)
from fontbakery.profiles import iso15008 as iso15008_profile
def METHOD_NAME(monkeypatch):
module_name = "uharfbuzz"
sys.meta_path.insert(0, ImportRaiser(module_name))
monkeypatch.delitem(sys.modules, module_name, raising=False)
with pytest.raises(SystemExit):
reload_module("fontbakery.profiles.iso15008")
remove_import_raiser(module_name)
def test_check_iso15008_proportions():
"""Check if 0.65 => (H width / H height) => 0.80"""
check = CheckTester(iso15008_profile, "com.google.fonts/check/iso15008_proportions")
# Cabin has a proportion of 0.7, so that's good.
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font), "with a good font...")
# Wonky Paths doesn't have an H
font = TEST_FILE("wonky_paths/WonkySourceSansPro-Regular.otf")
assert_results_contain(
check(font),
FAIL,
"glyph-not-present",
"with a font that does not have an 'H' glyph...",
)
# Covered By Your Grace is really tall (proportion 0.39)
font = TEST_FILE("coveredbyyourgrace/CoveredByYourGrace.ttf")
assert_results_contain(
check(font),
FAIL,
"invalid-proportion",
"with a very tall font (proportion of 'H' width to 'H' height)...",
)
def test_check_iso15008_stem_width():
"""Check if 0.10 <= (stem width / ascender) <= 0.82"""
check = CheckTester(iso15008_profile, "com.google.fonts/check/iso15008_stem_width")
font = TEST_FILE("cabin/Cabin-SemiBold.ttf")
assert_PASS(check(font), "with a good font...")
# Wonky Paths doesn't have an 'l'
font = TEST_FILE("wonky_paths/WonkySourceSansPro-Regular.otf")
assert_results_contain(
check(font), FAIL, "no-stem-width", "with a font lacking an 'l' glyph..."
)
# Cabin Regular is actually slightly too thin for displays
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_results_contain(
check(font),
FAIL,
"invalid-proportion",
"with a too thin font (proportion of stem width to ascender)...",
)
def test_check_iso15008_intercharacter_spacing():
"""Check if spacing between characters is adequate for display use"""
check = CheckTester(
iso15008_profile, "com.google.fonts/check/iso15008_intercharacter_spacing"
)
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font), "with a good font...")
font = TEST_FILE("cabin/Cabin-SemiBold.ttf")
# l stem width is 111, LSB at x-height is 59, RSB at x-Height is 83
# 142 / 111 = 128%, so this font is too tight.
assert_results_contain(
check(font),
FAIL,
"bad-vertical-vertical-spacing",
"with a too tight font (space between vertical strokes)...",
)
# v LSB is 5, lv kern is -6 (!) so lv distance is 83+5-6 = 82
# 82 / 111 = 0.73%, so that fails too.
assert_results_contain(
check(font),
FAIL,
"bad-vertical-diagonal-spacing",
"with bad spacing between vertical and diagonal strokes...",
)
font = TEST_FILE("montserrat/Montserrat-Black.ttf")
# vv touches
assert_results_contain(
check(font),
FAIL,
"bad-diagonal-diagonal-spacing",
"with diagonal strokes (vv) that are touching...",
)
def test_check_iso15008_interword_spacing():
"""Check if spacing between words is adequate for display use"""
check = CheckTester(
iso15008_profile, "com.google.fonts/check/iso15008_interword_spacing"
)
font = TEST_FILE("cabin/CabinCondensed-Bold.ttf")
# lm space is 112; m+space+l space is 286; 286/112 = 255%
assert_PASS(check(font), "with a good font...")
font = TEST_FILE("cabin/Cabin-Regular.ttf")
# lm space is 147; m+space+l space is 341; 341/147 = 232%
assert_results_contain(
check(font), FAIL, "bad-interword-spacing", "with bad interword space..."
)
def test_check_iso15008_interline_spacing():
"""Check if spacing between lines is adequate for display use"""
check = CheckTester(
iso15008_profile, "com.google.fonts/check/iso15008_interline_spacing"
)
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font), "with a good font...")
font = TEST_FILE("source-sans-pro/TTF/SourceSansPro-Bold.ttf")
# 39 units at bottom of g + 49 units at top of h + no typolinegap = 88
# stem width = 147
assert_results_contain(
check(font), FAIL, "bad-interline-spacing", "with bad interline space..."
) |
6,215 | characters | ## Original version of code heavily based on recipe written by Wai Yip
## Tung, released under PSF license.
## http://code.activestate.com/recipes/534109/
import re
import os
import xml.sax.handler
class DataNode (object):
def __init__ (self, **kwargs):
self._attrs = {} # XML attributes and child elements
self._data = None # child text data
self._ncDict = kwargs.get ('nameChangeDict', {})
def __len__ (self):
# treat single element as a list of 1
return 1
def __getitem__ (self, key):
if isinstance (key, str):
return self._attrs.get(key,None)
else:
return [self][key]
def __contains__ (self, name):
return name in self._attrs
def __nonzero__ (self):
return bool (self._attrs or self._data)
def __getattr__ (self, name):
if name.startswith('__'):
# need to do this for Python special methods???
raise AttributeError (name)
return self._attrs.get (name, None)
def _add_xml_attr (self, name, value):
change = self._ncDict.get (name)
if change:
name = change
if name in self._attrs:
# multiple attribute of the same name are represented by a list
children = self._attrs[name]
if not isinstance(children, list):
children = [children]
self._attrs[name] = children
children.append(value)
else:
self._attrs[name] = value
def __str__ (self):
return self._data or ''
def __repr__ (self):
items = sorted (self._attrs.items())
if self._data:
items.append(('data', self._data))
return u'{%s}' % ', '.join([u'%s:%s' % (k,repr(v)) for k,v in items])
def attributes (self):
return self._attrs
class TreeBuilder (xml.sax.handler.ContentHandler):
non_id_char = re.compile('[^_0-9a-zA-Z]')
def __init__ (self, **kwargs):
self._stack = []
self._text_parts = []
self._ncDict = kwargs.get ('nameChangeDict', {})
self._root = DataNode (nameChangeDict = self._ncDict)
self.current = self._root
def startElement (self, name, attrs):
self._stack.append( (self.current, self._text_parts))
self.current = DataNode (nameChangeDict = self._ncDict)
self._text_parts = []
# xml attributes --> python attributes
for k, v in attrs.items():
self.current._add_xml_attr (TreeBuilder._name_mangle(k), v)
def endElement (self, name):
text = ''.join (self._text_parts).strip()
if text:
self.current._data = text
if self.current.attributes():
obj = self.current
else:
# a text only node is simply represented by the string
obj = text or ''
self.current, self._text_parts = self._stack.pop()
self.current._add_xml_attr (TreeBuilder._name_mangle(name), obj)
def METHOD_NAME (self, content):
self._text_parts.append(content)
def root (self):
return self._root
def topLevel (self):
'''Returns top level object'''
return list(self._root.attributes().values())[0]
@staticmethod
def _name_mangle (name):
return TreeBuilder.non_id_char.sub('_', name)
regexList = [ (re.compile (r'&'), '&' ),
(re.compile (r'<'), '<' ),
(re.compile (r'>'), '>' ),
(re.compile (r'"'), '"e;' ),
(re.compile (r"'"), ''' )
]
quoteRE = re.compile (r'(\w\s*=\s*")([^"]+)"')
def fixQuoteValue (match):
'''Changes all characters inside of the match'''
quote = match.group(2)
for regexTup in regexList:
quote = regexTup[0].sub( regexTup[1], quote )
return match.group(1) + quote + '"'
def xml2obj (**kwargs):
''' Converts XML data into native Python object. Takes either
file handle or string as input. Does NOT fix illegal characters.
input source: Exactly one of the three following is needed
filehandle - input from file handle
contents - input from string
filename - input from filename
options:
filtering - boolean value telling code whether or not to fileter
input selection to remove illegal XML characters
nameChangeDict - dictionaries of names to change in python object'''
# make sure we have exactly 1 input source
filehandle = kwargs.get ('filehandle')
contents = kwargs.get ('contents')
filename = kwargs.get ('filename')
if not filehandle and not contents and not filename:
raise RuntimeError("You must provide 'filehandle', 'contents', or 'filename'")
if filehandle and contents or \
filehandle and filename or \
contents and filename:
raise RuntimeError("You must provide only ONE of 'filehandle', 'contents', or 'filename'")
# are we filtering?
filtering = kwargs.get ('filtering')
if filtering:
# if we are filtering, we need to read in the contents to modify them
if not contents:
if not filehandle:
try:
filehandle = open (filename, 'r')
except:
raise RuntimeError("Failed to open '%s'" % filename)
contents = ''
for line in filehandle:
contents += line
filehandle.close()
filehandle = filename = ''
contents = quoteRE.sub (fixQuoteValue, contents)
ncDict = kwargs.get ('nameChangeDict', {})
builder = TreeBuilder (nameChangeDict = ncDict)
if contents:
xml.sax.parseString(contents, builder)
else:
if not filehandle:
try:
filehandle = open (filename, 'r')
except:
raise RuntimeError("Failed to open '%s'" % filename)
xml.sax.parse(filehandle, builder)
return builder.topLevel() |
6,216 | test no header | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tensorboard.backend.client_feature_flags`."""
import json
from werkzeug import test as werkzeug_test
from werkzeug import wrappers
from tensorboard import context
from tensorboard import errors
from tensorboard import test as tb_test
from tensorboard.backend import client_feature_flags
class ClientFeatureFlagsMiddlewareTest(tb_test.TestCase):
"""Tests for `ClientFeatureFlagsMiddleware`."""
def _echo_app(self, environ, start_response):
# https://www.python.org/dev/peps/pep-0333/#environ-variables
data = {
"client_feature_flags": context.from_environ(
environ
).client_feature_flags,
}
body = json.dumps(data)
start_response("200 OK", [("Content-Type", "application/json")])
return [body]
def _assert_ok(self, response, client_feature_flags):
self.assertEqual(response.status_code, 200)
actual = json.loads(response.get_data())
expected = dict(client_feature_flags=client_feature_flags)
self.assertEqual(actual, expected)
def METHOD_NAME(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
response = server.get("")
self._assert_ok(response, {})
def test_no_query_string(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
response = server.get("")
self._assert_ok(response, {})
def test_header_with_no_value(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
response = server.get("", headers=[("X-TensorBoard-Feature-Flags", "")])
self._assert_ok(response, {})
def test_query_string_with_no_value(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
response = server.get("", query_string={"tensorBoardFeatureFlags": ""})
self._assert_ok(response, {})
def test_header_with_no_flags(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
response = server.get(
"", headers=[("X-TensorBoard-Feature-Flags", "{}")]
)
self._assert_ok(response, {})
def test_query_string_with_no_flags(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
response = server.get(
"", query_string={"tensorBoardFeatureFlags": "{}"}
)
self._assert_ok(response, {})
def test_header_with_client_feature_flags(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
response = server.get(
"",
headers=[
(
"X-TensorBoard-Feature-Flags",
'{"str": "hi", "bool": true, "strArr": ["one", "two"]}',
)
],
)
self._assert_ok(
response,
{
"str": "hi",
"bool": True,
"strArr": ["one", "two"],
},
)
def test_query_string_with_client_feature_flags(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
response = server.get(
"",
query_string={
"tensorBoardFeatureFlags": '{"str": "hi", "bool": true, "strArr": ["one", "two"]}'
},
)
self._assert_ok(
response,
{
"str": "hi",
"bool": True,
"strArr": ["one", "two"],
},
)
def test_header_with_json_not_decodable(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
with self.assertRaisesRegex(
errors.InvalidArgumentError, "cannot be JSON decoded."
):
response = server.get(
"",
headers=[
(
"X-TensorBoard-Feature-Flags",
"some_invalid_json {} {}",
)
],
)
def test_query_string_with_json_not_decodable(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
with self.assertRaisesRegex(
errors.InvalidArgumentError, "cannot be JSON decoded."
):
response = server.get(
"",
query_string={
"tensorBoardFeatureFlags": "some_invalid_json {} {}",
},
)
def test_header_with_json_not_dict(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
with self.assertRaisesRegex(
errors.InvalidArgumentError, "cannot be decoded to a dict"
):
response = server.get(
"",
headers=[
(
"X-TensorBoard-Feature-Flags",
'["not", "a", "dict"]',
)
],
)
def test_query_string_with_json_not_dict(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
with self.assertRaisesRegex(
errors.InvalidArgumentError, "cannot be decoded to a dict"
):
response = server.get(
"",
query_string={
"tensorBoardFeatureFlags": '["not", "a", "dict"]',
},
)
def test_header_feature_flags_take_precedence(self):
app = client_feature_flags.ClientFeatureFlagsMiddleware(self._echo_app)
server = werkzeug_test.Client(app, wrappers.Response)
response = server.get(
"",
headers=[
(
"X-TensorBoard-Feature-Flags",
'{"a": "1", "b": "2"}',
)
],
query_string={"tensorBoardFeatureFlags": '{"a": "2", "c": "3"}'},
)
self._assert_ok(
response,
{
"a": "1",
"b": "2",
"c": "3",
},
)
if __name__ == "__main__":
tb_test.main() |
6,217 | set up | # coding: utf-8
#
# Copyright 2022 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for methods defined in learner group fetchers."""
from __future__ import annotations
from core.domain import learner_group_fetchers
from core.domain import learner_group_services
from core.tests import test_utils
class LearnerGroupFetchersUnitTests(test_utils.GenericTestBase):
"""Tests for skill fetchers."""
FACILITATOR_ID = 'facilitator_user_1'
LEARNER_ID_1 = 'learner_user_1'
LEARNER_ID_2 = 'learner_user_2'
def METHOD_NAME(self) -> None:
super().METHOD_NAME()
self.LEARNER_GROUP_ID = (
learner_group_fetchers.get_new_learner_group_id()
)
self.learner_group = learner_group_services.create_learner_group(
self.LEARNER_GROUP_ID, 'Learner Group Name', 'Description',
[self.FACILITATOR_ID], [self.LEARNER_ID_1, self.LEARNER_ID_2],
['subtopic_id_1'], ['story_id_1'])
def test_get_new_learner_group_id(self) -> None:
self.assertIsNotNone(learner_group_fetchers.get_new_learner_group_id())
def test_get_learner_group_by_id(self) -> None:
fake_learner_group_id = 'fake_learner_group_id'
fake_learner_group = learner_group_fetchers.get_learner_group_by_id(
fake_learner_group_id)
self.assertIsNone(fake_learner_group)
learner_group = learner_group_fetchers.get_learner_group_by_id(
self.LEARNER_GROUP_ID
)
# Ruling out the possibility of None for mypy type checking.
assert learner_group is not None
self.assertIsNotNone(learner_group)
self.assertEqual(learner_group.group_id, self.LEARNER_GROUP_ID)
with self.assertRaisesRegex(
Exception,
'No LearnerGroupModel found for the given group_id: '
'fake_learner_group_id'
):
learner_group_fetchers.get_learner_group_by_id(
fake_learner_group_id, strict=True
)
def test_raises_error_if_learner_group_model_is_fetched_with_strict_and_invalid_id( # pylint: disable=line-too-long
self
) -> None:
with self.assertRaisesRegex(
Exception,
'No LearnerGroupsUserModel exists for the user_id: invalid_id'
):
learner_group_fetchers.get_learner_group_models_by_ids(
['invalid_id'], strict=True
)
def test_get_learner_groups_of_facilitator(self) -> None:
fake_facilitator_id = 'fake_facilitator_id'
fake_learner_groups = (
learner_group_fetchers.get_learner_groups_of_facilitator(
fake_facilitator_id
)
)
self.assertEqual(len(fake_learner_groups), 0)
learner_groups = (
learner_group_fetchers.get_learner_groups_of_facilitator(
self.FACILITATOR_ID
)
)
self.assertEqual(len(learner_groups), 1)
self.assertEqual(learner_groups[0].group_id, self.LEARNER_GROUP_ID)
def test_can_multi_learners_share_progress(self) -> None:
learner_group_services.add_learner_to_learner_group(
self.LEARNER_GROUP_ID, self.LEARNER_ID_1, True)
learner_group_services.add_learner_to_learner_group(
self.LEARNER_GROUP_ID, self.LEARNER_ID_2, False)
self.assertEqual(
learner_group_fetchers.can_multi_learners_share_progress(
[self.LEARNER_ID_1, self.LEARNER_ID_2], self.LEARNER_GROUP_ID
), [True, False])
def test_get_invited_learner_groups_of_learner(self) -> None:
fake_learner_id = 'fake_learner_id'
learner_groups = (
learner_group_fetchers.get_invited_learner_groups_of_learner(
fake_learner_id
)
)
self.assertEqual(len(learner_groups), 0)
learner_groups = (
learner_group_fetchers.get_invited_learner_groups_of_learner(
self.LEARNER_ID_1
)
)
self.assertEqual(len(learner_groups), 1)
self.assertEqual(learner_groups[0].group_id, self.LEARNER_GROUP_ID)
def test_get_learner_groups_joined_by_learner(self) -> None:
learner_groups = (
learner_group_fetchers.get_learner_groups_joined_by_learner(
self.LEARNER_ID_1
)
)
self.assertEqual(len(learner_groups), 0)
learner_group_services.add_learner_to_learner_group(
self.LEARNER_GROUP_ID, self.LEARNER_ID_1, True)
learner_groups = (
learner_group_fetchers.get_learner_groups_joined_by_learner(
self.LEARNER_ID_1
)
)
self.assertEqual(len(learner_groups), 1)
self.assertEqual(learner_groups[0].group_id, self.LEARNER_GROUP_ID) |
6,218 | test setrusage refcount | import unittest
from test import test_support
import time
resource = test_support.import_module('resource')
# This test is checking a few specific problem spots with the resource module.
class ResourceTest(unittest.TestCase):
def test_args(self):
self.assertRaises(TypeError, resource.getrlimit)
self.assertRaises(TypeError, resource.getrlimit, 42, 42)
self.assertRaises(TypeError, resource.setrlimit)
self.assertRaises(TypeError, resource.setrlimit, 42, 42, 42)
def test_fsize_ismax(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really big
# number on a platform with large file support. On these platforms,
# we need to test that the get/setrlimit functions properly convert
# the number to a C long long and that the conversion doesn't raise
# an error.
self.assertEqual(resource.RLIM_INFINITY, max)
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
def test_fsize_enforced(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# Check to see what happens when the RLIMIT_FSIZE is small. Some
# versions of Python were terminated by an uncaught SIGXFSZ, but
# pythonrun.c has been fixed to ignore that exception. If so, the
# write() should return EFBIG when the limit is exceeded.
# At least one platform has an unlimited RLIMIT_FSIZE and attempts
# to change it raise ValueError instead.
try:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (1024, max))
limit_set = True
except ValueError:
limit_set = False
f = open(test_support.TESTFN, "wb")
try:
f.write("X" * 1024)
try:
f.write("Y")
f.flush()
# On some systems (e.g., Ubuntu on hppa) the flush()
# doesn't always cause the exception, but the close()
# does eventually. Try flushing several times in
# an attempt to ensure the file is really synced and
# the exception raised.
for i in range(5):
time.sleep(.1)
f.flush()
except IOError:
if not limit_set:
raise
if limit_set:
# Close will attempt to flush the byte we wrote
# Restore limit first to avoid getting a spurious error
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
finally:
f.close()
finally:
if limit_set:
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
test_support.unlink(test_support.TESTFN)
def test_fsize_toobig(self):
# Be sure that setrlimit is checking for really large values
too_big = 10L**50
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (too_big, max))
except (OverflowError, ValueError):
pass
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (max, too_big))
except (OverflowError, ValueError):
pass
def test_getrusage(self):
self.assertRaises(TypeError, resource.getrusage)
self.assertRaises(TypeError, resource.getrusage, 42, 42)
usageself = resource.getrusage(resource.RUSAGE_SELF)
usagechildren = resource.getrusage(resource.RUSAGE_CHILDREN)
# May not be available on all systems.
try:
usageboth = resource.getrusage(resource.RUSAGE_BOTH)
except (ValueError, AttributeError):
pass
# Issue 6083: Reference counting bug
def METHOD_NAME(self):
try:
limits = resource.getrlimit(resource.RLIMIT_CPU)
except AttributeError:
pass
else:
class BadSequence:
def __len__(self):
return 2
def __getitem__(self, key):
if key in (0, 1):
return len(tuple(range(1000000)))
raise IndexError
resource.setrlimit(resource.RLIMIT_CPU, BadSequence())
def test_main(verbose=None):
test_support.run_unittest(ResourceTest)
if __name__ == "__main__":
test_main() |
6,219 | htmlparser trace | """Diagnostic functions, mainly for use when doing tech support."""
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
import cProfile
from io import BytesIO
from html.parser import HTMLParser
import bs4
from bs4 import BeautifulSoup, __version__
from bs4.builder import builder_registry
import os
import pstats
import random
import tempfile
import time
import traceback
import sys
import cProfile
def diagnose(data):
"""Diagnostic suite for isolating common problems.
:param data: A string containing markup that needs to be explained.
:return: None; diagnostics are printed to standard output.
"""
print(("Diagnostic running on Beautiful Soup %s" % __version__))
print(("Python version %s" % sys.version))
basic_parsers = ["html.parser", "html5lib", "lxml"]
for name in basic_parsers:
for builder in builder_registry.builders:
if name in builder.features:
break
else:
basic_parsers.remove(name)
print((
"I noticed that %s is not installed. Installing it may help." %
name))
if 'lxml' in basic_parsers:
basic_parsers.append("lxml-xml")
try:
from lxml import etree
print(("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))))
except ImportError as e:
print(
"lxml is not installed or couldn't be imported.")
if 'html5lib' in basic_parsers:
try:
import html5lib
print(("Found html5lib version %s" % html5lib.__version__))
except ImportError as e:
print(
"html5lib is not installed or couldn't be imported.")
if hasattr(data, 'read'):
data = data.read()
elif data.startswith("http:") or data.startswith("https:"):
print(('"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data))
print("You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup.")
return
else:
try:
if os.path.exists(data):
print(('"%s" looks like a filename. Reading data from the file.' % data))
with open(data) as fp:
data = fp.read()
except ValueError:
# This can happen on some platforms when the 'filename' is
# too long. Assume it's data and not a filename.
pass
print("")
for parser in basic_parsers:
print(("Trying to parse your markup with %s" % parser))
success = False
try:
soup = BeautifulSoup(data, features=parser)
success = True
except Exception as e:
print(("%s could not parse the markup." % parser))
traceback.print_exc()
if success:
print(("Here's what %s did with the markup:" % parser))
print((soup.prettify()))
print(("-" * 80))
def lxml_trace(data, html=True, **kwargs):
"""Print out the lxml events that occur during parsing.
This lets you see how lxml parses a document when no Beautiful
Soup code is running. You can use this to determine whether
an lxml-specific problem is in Beautiful Soup's lxml tree builders
or in lxml itself.
:param data: Some markup.
:param html: If True, markup will be parsed with lxml's HTML parser.
if False, lxml's XML parser will be used.
"""
from lxml import etree
recover = kwargs.pop('recover', True)
if isinstance(data, str):
data = data.encode("utf8")
reader = BytesIO(data)
for event, element in etree.iterparse(
reader, html=html, recover=recover, **kwargs
):
print(("%s, %4s, %s" % (event, element.tag, element.text)))
class AnnouncingParser(HTMLParser):
"""Subclass of HTMLParser that announces parse events, without doing
anything else.
You can use this to get a picture of how html.parser sees a given
document. The easiest way to do this is to call `htmlparser_trace`.
"""
def _p(self, s):
print(s)
def handle_starttag(self, name, attrs):
self._p("%s START" % name)
def handle_endtag(self, name):
self._p("%s END" % name)
def handle_data(self, data):
self._p("%s DATA" % data)
def handle_charref(self, name):
self._p("%s CHARREF" % name)
def handle_entityref(self, name):
self._p("%s ENTITYREF" % name)
def handle_comment(self, data):
self._p("%s COMMENT" % data)
def handle_decl(self, data):
self._p("%s DECL" % data)
def unknown_decl(self, data):
self._p("%s UNKNOWN-DECL" % data)
def handle_pi(self, data):
self._p("%s PI" % data)
def METHOD_NAME(data):
"""Print out the HTMLParser events that occur during parsing.
This lets you see how HTMLParser parses a document when no
Beautiful Soup code is running.
:param data: Some markup.
"""
parser = AnnouncingParser()
parser.feed(data)
_vowels = "aeiou"
_consonants = "bcdfghjklmnpqrstvwxyz"
def rword(length=5):
"Generate a random word-like string."
s = ''
for i in range(length):
if i % 2 == 0:
t = _consonants
else:
t = _vowels
s += random.choice(t)
return s
def rsentence(length=4):
"Generate a random sentence-like string."
return " ".join(rword(random.randint(4,9)) for i in range(length))
def rdoc(num_elements=1000):
"""Randomly generate an invalid HTML document."""
tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table']
elements = []
for i in range(num_elements):
choice = random.randint(0,3)
if choice == 0:
# New tag.
tag_name = random.choice(tag_names)
elements.append("<%s>" % tag_name)
elif choice == 1:
elements.append(rsentence(random.randint(1,4)))
elif choice == 2:
# Close a tag.
tag_name = random.choice(tag_names)
elements.append("</%s>" % tag_name)
return "<html>" + "\n".join(elements) + "</html>"
def benchmark_parsers(num_elements=100000):
"""Very basic head-to-head performance benchmark."""
print(("Comparative parser benchmark on Beautiful Soup %s" % __version__))
data = rdoc(num_elements)
print(("Generated a large invalid HTML document (%d bytes)." % len(data)))
for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
success = False
try:
a = time.time()
soup = BeautifulSoup(data, parser)
b = time.time()
success = True
except Exception as e:
print(("%s could not parse the markup." % parser))
traceback.print_exc()
if success:
print(("BS4+%s parsed the markup in %.2fs." % (parser, b-a)))
from lxml import etree
a = time.time()
etree.HTML(data)
b = time.time()
print(("Raw lxml parsed the markup in %.2fs." % (b-a)))
import html5lib
parser = html5lib.HTMLParser()
a = time.time()
parser.parse(data)
b = time.time()
print(("Raw html5lib parsed the markup in %.2fs." % (b-a)))
def profile(num_elements=100000, parser="lxml"):
"""Use Python's profiler on a randomly generated document."""
filehandle = tempfile.NamedTemporaryFile()
filename = filehandle.name
data = rdoc(num_elements)
vars = dict(bs4=bs4, data=data, parser=parser)
cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename)
stats = pstats.Stats(filename)
# stats.strip_dirs()
stats.sort_stats("cumulative")
stats.print_stats('_html5lib|bs4', 50)
# If this file is run as a script, standard input is diagnosed.
if __name__ == '__main__':
diagnose(sys.stdin.read()) |
6,220 | test main | # Copyright (c) 2001-2006 Twisted Matrix Laboratories.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Tests for epoll wrapper.
"""
import socket
import errno
import time
import select
import unittest
from test import test_support
if not hasattr(select, "epoll"):
raise unittest.SkipTest("test works only on Linux 2.6")
try:
select.epoll()
except IOError, e:
if e.errno == errno.ENOSYS:
raise unittest.SkipTest("kernel doesn't support epoll()")
raise
class TestEPoll(unittest.TestCase):
def setUp(self):
self.serverSocket = socket.socket()
self.serverSocket.bind(('127.0.0.1', 0))
self.serverSocket.listen(1)
self.connections = [self.serverSocket]
def tearDown(self):
for skt in self.connections:
skt.close()
def _connected_pair(self):
client = socket.socket()
client.setblocking(False)
try:
client.connect(('127.0.0.1', self.serverSocket.getsockname()[1]))
except socket.error, e:
self.assertEqual(e.args[0], errno.EINPROGRESS)
else:
raise AssertionError("Connect should have raised EINPROGRESS")
server, addr = self.serverSocket.accept()
self.connections.extend((client, server))
return client, server
def test_create(self):
try:
ep = select.epoll(16)
except OSError, e:
raise AssertionError(str(e))
self.assertTrue(ep.fileno() > 0, ep.fileno())
self.assertTrue(not ep.closed)
ep.close()
self.assertTrue(ep.closed)
self.assertRaises(ValueError, ep.fileno)
def test_badcreate(self):
self.assertRaises(TypeError, select.epoll, 1, 2, 3)
self.assertRaises(TypeError, select.epoll, 'foo')
self.assertRaises(TypeError, select.epoll, None)
self.assertRaises(TypeError, select.epoll, ())
self.assertRaises(TypeError, select.epoll, ['foo'])
self.assertRaises(TypeError, select.epoll, {})
def test_add(self):
server, client = self._connected_pair()
ep = select.epoll(2)
try:
ep.register(server.fileno(), select.EPOLLIN | select.EPOLLOUT)
ep.register(client.fileno(), select.EPOLLIN | select.EPOLLOUT)
finally:
ep.close()
# adding by object w/ fileno works, too.
ep = select.epoll(2)
try:
ep.register(server, select.EPOLLIN | select.EPOLLOUT)
ep.register(client, select.EPOLLIN | select.EPOLLOUT)
finally:
ep.close()
ep = select.epoll(2)
try:
# TypeError: argument must be an int, or have a fileno() method.
self.assertRaises(TypeError, ep.register, object(),
select.EPOLLIN | select.EPOLLOUT)
self.assertRaises(TypeError, ep.register, None,
select.EPOLLIN | select.EPOLLOUT)
# ValueError: file descriptor cannot be a negative integer (-1)
self.assertRaises(ValueError, ep.register, -1,
select.EPOLLIN | select.EPOLLOUT)
# IOError: [Errno 9] Bad file descriptor
self.assertRaises(IOError, ep.register, 10000,
select.EPOLLIN | select.EPOLLOUT)
# registering twice also raises an exception
ep.register(server, select.EPOLLIN | select.EPOLLOUT)
self.assertRaises(IOError, ep.register, server,
select.EPOLLIN | select.EPOLLOUT)
finally:
ep.close()
def test_fromfd(self):
server, client = self._connected_pair()
ep = select.epoll(2)
ep2 = select.epoll.fromfd(ep.fileno())
ep2.register(server.fileno(), select.EPOLLIN | select.EPOLLOUT)
ep2.register(client.fileno(), select.EPOLLIN | select.EPOLLOUT)
events = ep.poll(1, 4)
events2 = ep2.poll(0.9, 4)
self.assertEqual(len(events), 2)
self.assertEqual(len(events2), 2)
ep.close()
try:
ep2.poll(1, 4)
except IOError, e:
self.assertEqual(e.args[0], errno.EBADF, e)
else:
self.fail("epoll on closed fd didn't raise EBADF")
def test_control_and_wait(self):
client, server = self._connected_pair()
ep = select.epoll(16)
ep.register(server.fileno(),
select.EPOLLIN | select.EPOLLOUT | select.EPOLLET)
ep.register(client.fileno(),
select.EPOLLIN | select.EPOLLOUT | select.EPOLLET)
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.assertFalse(then - now > 0.1, then - now)
events.sort()
expected = [(client.fileno(), select.EPOLLOUT),
(server.fileno(), select.EPOLLOUT)]
expected.sort()
self.assertEqual(events, expected)
self.assertFalse(then - now > 0.01, then - now)
now = time.time()
events = ep.poll(timeout=2.1, maxevents=4)
then = time.time()
self.assertFalse(events)
client.send("Hello!")
server.send("world!!!")
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.assertFalse(then - now > 0.01)
events.sort()
expected = [(client.fileno(), select.EPOLLIN | select.EPOLLOUT),
(server.fileno(), select.EPOLLIN | select.EPOLLOUT)]
expected.sort()
self.assertEqual(events, expected)
ep.unregister(client.fileno())
ep.modify(server.fileno(), select.EPOLLOUT)
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.assertFalse(then - now > 0.01)
expected = [(server.fileno(), select.EPOLLOUT)]
self.assertEqual(events, expected)
def test_errors(self):
self.assertRaises(ValueError, select.epoll, -2)
self.assertRaises(ValueError, select.epoll().register, -1,
select.EPOLLIN)
def test_unregister_closed(self):
server, client = self._connected_pair()
fd = server.fileno()
ep = select.epoll(16)
ep.register(server)
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.assertFalse(then - now > 0.01)
server.close()
ep.unregister(fd)
def METHOD_NAME():
test_support.run_unittest(TestEPoll)
if __name__ == "__main__":
METHOD_NAME() |
6,221 | test one cluster | import numpy as np
import pytest
from sklearn.cluster import BisectingKMeans
from sklearn.metrics import v_measure_score
from sklearn.utils._testing import assert_allclose, assert_array_equal
from sklearn.utils.fixes import CSR_CONTAINERS
@pytest.mark.parametrize("bisecting_strategy", ["biggest_inertia", "largest_cluster"])
@pytest.mark.parametrize("init", ["k-means++", "random"])
def test_three_clusters(bisecting_strategy, init):
"""Tries to perform bisect k-means for three clusters to check
if splitting data is performed correctly.
"""
X = np.array(
[[1, 1], [10, 1], [3, 1], [10, 0], [2, 1], [10, 2], [10, 8], [10, 9], [10, 10]]
)
bisect_means = BisectingKMeans(
n_clusters=3,
random_state=0,
bisecting_strategy=bisecting_strategy,
init=init,
)
bisect_means.fit(X)
expected_centers = [[2, 1], [10, 1], [10, 9]]
expected_labels = [0, 1, 0, 1, 0, 1, 2, 2, 2]
assert_allclose(
sorted(expected_centers), sorted(bisect_means.cluster_centers_.tolist())
)
assert_allclose(v_measure_score(expected_labels, bisect_means.labels_), 1.0)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse(csr_container):
"""Test Bisecting K-Means with sparse data.
Checks if labels and centers are the same between dense and sparse.
"""
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
X[X < 0.8] = 0
X_csr = csr_container(X)
bisect_means = BisectingKMeans(n_clusters=3, random_state=0)
bisect_means.fit(X_csr)
sparse_centers = bisect_means.cluster_centers_
bisect_means.fit(X)
normal_centers = bisect_means.cluster_centers_
# Check if results is the same for dense and sparse data
assert_allclose(normal_centers, sparse_centers, atol=1e-8)
@pytest.mark.parametrize("n_clusters", [4, 5])
def test_n_clusters(n_clusters):
"""Test if resulting labels are in range [0, n_clusters - 1]."""
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
bisect_means = BisectingKMeans(n_clusters=n_clusters, random_state=0)
bisect_means.fit(X)
assert_array_equal(np.unique(bisect_means.labels_), np.arange(n_clusters))
def METHOD_NAME():
"""Test single cluster."""
X = np.array([[1, 2], [10, 2], [10, 8]])
bisect_means = BisectingKMeans(n_clusters=1, random_state=0).fit(X)
# All labels from fit or predict should be equal 0
assert all(bisect_means.labels_ == 0)
assert all(bisect_means.predict(X) == 0)
assert_allclose(bisect_means.cluster_centers_, X.mean(axis=0).reshape(1, -1))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None])
def test_fit_predict(csr_container):
"""Check if labels from fit(X) method are same as from fit(X).predict(X)."""
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
if csr_container is not None:
X[X < 0.8] = 0
X = csr_container(X)
bisect_means = BisectingKMeans(n_clusters=3, random_state=0)
bisect_means.fit(X)
assert_array_equal(bisect_means.labels_, bisect_means.predict(X))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None])
def test_dtype_preserved(csr_container, global_dtype):
"""Check that centers dtype is the same as input data dtype."""
rng = np.random.RandomState(0)
X = rng.rand(10, 2).astype(global_dtype, copy=False)
if csr_container is not None:
X[X < 0.8] = 0
X = csr_container(X)
km = BisectingKMeans(n_clusters=3, random_state=0)
km.fit(X)
assert km.cluster_centers_.dtype == global_dtype
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None])
def test_float32_float64_equivalence(csr_container):
"""Check that the results are the same between float32 and float64."""
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
if csr_container is not None:
X[X < 0.8] = 0
X = csr_container(X)
km64 = BisectingKMeans(n_clusters=3, random_state=0).fit(X)
km32 = BisectingKMeans(n_clusters=3, random_state=0).fit(X.astype(np.float32))
assert_allclose(km32.cluster_centers_, km64.cluster_centers_)
assert_array_equal(km32.labels_, km64.labels_)
@pytest.mark.parametrize("algorithm", ("lloyd", "elkan"))
def test_no_crash_on_empty_bisections(algorithm):
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/27081
rng = np.random.RandomState(0)
X_train = rng.rand(3000, 10)
bkm = BisectingKMeans(n_clusters=10, algorithm=algorithm).fit(X_train)
# predict on scaled data to trigger pathologic case
# where the inner mask leads to empty bisections.
X_test = 50 * rng.rand(100, 10)
labels = bkm.predict(X_test) # should not crash with idiv by 0
assert np.isin(np.unique(labels), np.arange(10)).all()
def test_one_feature():
# Check that no error is raised when there is only one feature
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/27236
X = np.random.normal(size=(128, 1))
BisectingKMeans(bisecting_strategy="biggest_inertia", random_state=0).fit(X) |
6,222 | test operations varied tolerance | ## Program: VMTK
## Language: Python
## Date: January 12, 2018
## Version: 1.4
## Copyright (c) Richard Izzo, Luca Antiga, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## Richard Izzo (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtksurfacebooleanoperation as surfacebooleanoperation
import vtk
@pytest.mark.skipif(vtk.vtkVersion.GetVTKVersion() == '9.1.0', reason="requires vtk version != 9.1.0")
@pytest.mark.parametrize("operation,paramid", [
('intersection', '1'),
('difference', '2'),
])
def test_operations_default_tolerance_regression(aorta_surface2, aorta_surface_reference,
operation, paramid, compare_surfaces):
name = __name__ + '_test_operations_default_tolerance_' + paramid + '.vtp'
booler = surfacebooleanoperation.vmtkSurfaceBooleanOperation()
booler.Surface = aorta_surface2
booler.Surface2 = aorta_surface_reference
booler.Operation = operation
booler.Execute()
assert compare_surfaces(booler.Surface, name, tolerance=1E-6) == True
@pytest.mark.skipif(vtk.vtkVersion.GetVTKVersion() != '9.1.0', reason="requires vtk == 9.1.0")
@pytest.mark.parametrize("operation,paramid", [
('intersection', '1'),
('difference', '2'),
])
def test_operations_default_tolerance_vtk_v9_1(aorta_surface2, aorta_surface_reference,
operation, paramid, compare_surfaces):
name = __name__ + '_test_operations_default_tolerance_' + paramid + '_vtk-9_1.vtp'
booler = surfacebooleanoperation.vmtkSurfaceBooleanOperation()
booler.Surface = aorta_surface2
booler.Surface2 = aorta_surface_reference
booler.Operation = operation
booler.Execute()
assert compare_surfaces(booler.Surface, name, tolerance=1E-6) == True
@pytest.mark.parametrize("operation,comp_tol,paramid", [
('union', 1e-6, '0'),
('intersection', 1e-1, '1'),
('difference', 1e-1, '2'),
])
def test_operations_default_tolerance(aorta_surface2, aorta_surface_reference,
operation, comp_tol, paramid, compare_surfaces):
name = __name__ + '_test_operations_default_tolerance_' + paramid + '.vtp'
booler = surfacebooleanoperation.vmtkSurfaceBooleanOperation()
booler.Surface = aorta_surface2
booler.Surface2 = aorta_surface_reference
booler.Operation = operation
booler.Execute()
assert compare_surfaces(booler.Surface, name, tolerance=comp_tol) == True
@pytest.mark.parametrize("operation,tolerance,paramid", [
('union', 0.5, '0'),
('intersection', 0.5, '1'),
('difference', 0.5, '2'),
])
def METHOD_NAME(aorta_surface2, aorta_surface_reference,
operation, tolerance, paramid, compare_surfaces):
name = __name__ + '_test_operations_varied_tolerance_' + paramid + '.vtp'
booler = surfacebooleanoperation.vmtkSurfaceBooleanOperation()
booler.Surface = aorta_surface2
booler.Surface2 = aorta_surface_reference
booler.Operation = operation
booler.Tolerance = tolerance
booler.Execute()
assert compare_surfaces(booler.Surface, name, tolerance=1E-3) == True
@pytest.mark.parametrize("operation,paramid", [
('union', '0'),
('intersection', '1'),
('difference', '2'),
])
def test_operations_loop_method(aorta_surface2, aorta_surface_reference,
operation, paramid, compare_surfaces):
name = __name__ + '_test_operations_varied_tolerance_' + paramid + '.vtp'
booler = surfacebooleanoperation.vmtkSurfaceBooleanOperation()
booler.Surface = aorta_surface2
booler.Surface2 = aorta_surface_reference
booler.Operation = operation
booler.Method = 'loop'
booler.Execute() |
6,223 | delete alias | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2023 ScyllaDB
from itertools import cycle
import logging
import botocore
import boto3
LOGGER = logging.getLogger(__name__)
class AwsKms:
NUM_OF_KMS_KEYS = 3
KMS_KEYS_TAGS = {
'Purpose': 'Rotation',
'UsedBy': 'QA',
}
def __init__(self, region_names):
if not region_names:
raise ValueError("'region_names' parameter cannot be empty")
self.region_names = region_names if isinstance(region_names, list) else [region_names]
self.mapping = {
region_name: {
'client': boto3.client('kms', region_name=region_name),
'kms_key_ids': [],
'kms_keys_aliases': {},
} for region_name in self.region_names
}
self.num_of_tags_to_match = len(self.KMS_KEYS_TAGS)
def create_kms_key(self, region_name):
LOGGER.info("Creating KMS key in the '%s' region", region_name)
kms_key = self.mapping[region_name]['client'].create_key(
Description='qa-kms-key-for-rotation',
Tags=[{'TagKey': k, 'TagValue': v} for k, v in self.KMS_KEYS_TAGS.items()],
)
self.mapping[region_name]['kms_key_ids'].append(kms_key['KeyMetadata']['KeyId'])
def get_kms_key_tags(self, kms_key_id, region_name):
try:
tags = self.mapping[region_name]['client'].list_resource_tags(KeyId=kms_key_id, Limit=999)['Tags']
return {tag['TagKey']: tag['TagValue'] for tag in tags}
except botocore.exceptions.ClientError as exc:
LOGGER.debug(exc.response)
if any(msg in exc.response['Error']['Code'] for msg in ('AccessDeniedException', 'NotFound')):
return {}
raise
def get_kms_keys(self, region_name, next_marker=None):
client, kwargs = self.mapping[region_name]['client'], {'Limit': 30}
if next_marker:
kwargs['Marker'] = next_marker
kms_keys = client.list_keys(**kwargs)
for kms_key in kms_keys['Keys']:
current_kms_key_id = kms_key['KeyId']
if not client.describe_key(KeyId=current_kms_key_id)['KeyMetadata']['Enabled']:
continue
yield current_kms_key_id
if kms_keys.get("NextMarker"):
yield from self.get_kms_keys(region_name=region_name, next_marker=kms_keys["NextMarker"])
def find_or_create_suitable_kms_keys(self):
for region_name in self.region_names:
if self.NUM_OF_KMS_KEYS <= len(self.mapping[region_name]['kms_key_ids']):
continue
for current_kms_key_id in self.get_kms_keys(region_name):
current_kms_key_tags = self.get_kms_key_tags(current_kms_key_id, region_name)
if not current_kms_key_tags:
continue
kms_key_tags_match_counter = 0
for expected_k, expected_v in self.KMS_KEYS_TAGS.items():
if current_kms_key_tags.get(expected_k) != expected_v:
break
kms_key_tags_match_counter += 1
if kms_key_tags_match_counter >= self.num_of_tags_to_match:
self.mapping[region_name]['kms_key_ids'].append(current_kms_key_id)
if self.NUM_OF_KMS_KEYS == len(self.mapping[region_name]['kms_key_ids']):
break
while self.NUM_OF_KMS_KEYS > len(self.mapping[region_name]['kms_key_ids']):
self.create_kms_key(region_name)
def get_next_kms_key(self, kms_key_alias_name, region_name):
# Create endless KMS keys iterator
if kms_key_alias_name not in self.mapping[region_name]['kms_keys_aliases']:
self.mapping[region_name]['kms_keys_aliases'][kms_key_alias_name] = cycle(
self.mapping[region_name]['kms_key_ids'])
kms_key_id_candidate = next(self.mapping[region_name]['kms_keys_aliases'][kms_key_alias_name])
# Walk through the aliases of the KMS key candidate and check that our alias is not there
for alias in self.mapping[region_name]['client'].list_aliases(
KeyId=kms_key_id_candidate, Limit=999)['Aliases']:
if kms_key_alias_name == alias['AliasName']:
# Current KMS Key candidate is already assigned to the alias, so, return another one
return next(self.mapping[region_name]['kms_keys_aliases'][kms_key_alias_name])
# Current KMS Key candidate is not assigned to the alias, use it
return kms_key_id_candidate
def create_alias(self, kms_key_alias_name, tolerate_already_exists=True):
self.find_or_create_suitable_kms_keys()
for region_name in self.region_names:
kms_key_id = self.get_next_kms_key(kms_key_alias_name, region_name)
LOGGER.info(
"Creating '%s' alias for the '%s' KMS key in the '%s' region",
kms_key_alias_name, kms_key_id, region_name)
try:
self.mapping[region_name]['client'].create_alias(
AliasName=kms_key_alias_name, TargetKeyId=kms_key_id)
except botocore.exceptions.ClientError as exc:
LOGGER.debug(exc.response)
if not ('AlreadyExistsException' in exc.response['Error']['Code'] and tolerate_already_exists):
raise
def rotate_kms_key(self, kms_key_alias_name):
self.find_or_create_suitable_kms_keys()
for region_name in self.region_names:
new_kms_key_id = self.get_next_kms_key(kms_key_alias_name, region_name)
LOGGER.info(
"Assigning the '%s' alias to the '%s' KMS key in the '%s' region",
kms_key_alias_name, new_kms_key_id, region_name)
self.mapping[region_name]['client'].update_alias(
AliasName=kms_key_alias_name, TargetKeyId=new_kms_key_id)
def METHOD_NAME(self, kms_key_alias_name, tolerate_errors=True):
LOGGER.info("Deleting the '%s' alias in the KMS", kms_key_alias_name)
for region_name in self.region_names:
try:
self.mapping[region_name]['client'].METHOD_NAME(AliasName=kms_key_alias_name)
if kms_key_alias_name in self.mapping[region_name]['kms_keys_aliases']:
self.mapping[region_name]['kms_keys_aliases'].remove(kms_key_alias_name)
except botocore.exceptions.ClientError as exc:
LOGGER.debug(exc.response)
if not tolerate_errors:
raise |
6,224 | handle gripper | """Pipette Sensors OT3."""
import argparse
import asyncio
from typing import Optional, Dict, List
from opentrons.hardware_control.ot3api import OT3API
from opentrons.hardware_control.instruments.ot3.pipette import Pipette
from opentrons_hardware.firmware_bindings.constants import SensorId, SensorType
from hardware_testing.opentrons_api.types import OT3Mount
from hardware_testing.opentrons_api import helpers_ot3
READ_INTERVAL_SECONDS = 0.5
async def _read_sensor(
api: OT3API, mount: OT3Mount, sensor_type: SensorType, sensor_id: SensorId
) -> List[float]:
if sensor_type == SensorType.capacitive:
val = await helpers_ot3.get_capacitance_ot3(api, mount, sensor_id)
vals = [val]
elif sensor_type == SensorType.pressure:
val = await helpers_ot3.get_pressure_ot3(api, mount, sensor_id)
vals = [val]
elif sensor_type == SensorType.environment:
vals_tuple = await helpers_ot3.get_temperature_humidity_ot3(
api, mount, sensor_id
)
vals = list(vals_tuple)
else:
raise ValueError(f"unexpected sensor type: {sensor_type.name}")
return [round(v, 1) for v in vals]
async def _read_sensors_in_while_loop(
api: OT3API,
mount: OT3Mount,
sensors: Dict[SensorType, List[SensorId]],
tip_sensor: bool = False,
) -> None:
while True:
print("======================================================")
for s_type, s_chnl in sensors.items():
for s_id in s_chnl:
vals = await _read_sensor(api, mount, s_type, s_id)
print(f"{s_type.name}: {s_id.name} = {vals}")
if tip_sensor:
# TODO: implement tip-sensor readings after added to helpers_ot3
# TODO: how do we handle 2x tip-sensors in firmware? (for 96ch)
pass
await asyncio.sleep(READ_INTERVAL_SECONDS)
async def METHOD_NAME(api: OT3API) -> None:
assert api.has_gripper(), "no gripper found"
sensors: Dict[SensorType, List[SensorId]] = {
SensorType.capacitive: [SensorId.S0, SensorId.S1],
}
await _read_sensors_in_while_loop(api, OT3Mount.GRIPPER, sensors)
async def _handle_pipette(api: OT3API, mount: OT3Mount) -> None:
pip: Optional[Pipette] = api.hardware_pipettes[mount.to_mount()]
assert pip, f"no pipette found on {mount.name}"
num_channels = pip.channels
if num_channels == 1:
capacitive_channels = [SensorId.S0]
pressure_channels = [SensorId.S0]
environment_channels = [SensorId.S0]
elif num_channels == 8:
# NOTE: on 8ch, capacitive is 1x IC using 2x channels
capacitive_channels = [SensorId.S0, SensorId.S1]
pressure_channels = [SensorId.S0, SensorId.S1]
environment_channels = [SensorId.S0]
elif num_channels == 96:
capacitive_channels = [SensorId.S0, SensorId.S1]
pressure_channels = [SensorId.S0, SensorId.S1]
environment_channels = [SensorId.S0, SensorId.S1]
else:
raise ValueError(f"unexpected number of channels: {num_channels}")
sensors: Dict[SensorType, List[SensorId]] = {
SensorType.capacitive: capacitive_channels,
SensorType.pressure: pressure_channels,
SensorType.environment: environment_channels,
}
await _read_sensors_in_while_loop(api, mount, sensors, tip_sensor=True)
async def _main(is_simulating: bool, mount: OT3Mount) -> None:
api = await helpers_ot3.build_async_ot3_hardware_api(is_simulating=is_simulating)
if mount == OT3Mount.GRIPPER:
await METHOD_NAME(api)
else:
await _handle_pipette(api, mount)
if __name__ == "__main__":
mount_options = {
"left": OT3Mount.LEFT,
"right": OT3Mount.RIGHT,
"gripper": OT3Mount.GRIPPER,
}
parser = argparse.ArgumentParser()
parser.add_argument("--simulate", action="store_true")
parser.add_argument(
"--mount", type=str, choices=list(mount_options.keys()), default="left"
)
args = parser.parse_args()
_mount = mount_options[args.mount]
asyncio.run(_main(args.simulate, _mount)) |
6,225 | extract data | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationListResult"]
"""Lists all of the available Network Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def METHOD_NAME(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, METHOD_NAME
)
list.metadata = {'url': '/providers/Microsoft.Network/operations'} # type: ignore |
6,226 | test can be implemented | import pytest
from traitlets import TraitError
from traitlets.config.loader import Config
from ctapipe.containers import ArrayEventContainer
from ctapipe.core import Component
from ctapipe.io import DataLevel, EventSource, SimTelEventSource
from ctapipe.utils import get_dataset_path
prod5_path = "gamma_20deg_0deg_run2___cta-prod5-paranal_desert-2147m-Paranal-dark_cone10-100evts.simtel.zst"
def test_construct():
# at least one of input_url / parent / config is required
with pytest.raises(ValueError):
EventSource()
class DummyEventSource(EventSource):
"""
Simple working EventSource
"""
def _generator(self):
for i in range(5):
yield ArrayEventContainer(count=i)
@staticmethod
def is_compatible(file_path):
with open(file_path, "rb") as f:
marker = f.read(5)
return marker == b"dummy"
@property
def subarray(self):
return None
@property
def is_simulation(self):
return False
@property
def scheduling_blocks(self):
return dict()
@property
def observation_blocks(self):
return dict()
@property
def datalevels(self):
return (DataLevel.R0,)
def METHOD_NAME():
dataset = get_dataset_path(prod5_path)
test_reader = DummyEventSource(input_url=dataset)
assert test_reader is not None
def test_is_iterable():
dataset = get_dataset_path(prod5_path)
test_reader = DummyEventSource(input_url=dataset)
for _ in test_reader:
pass
def test_function():
dataset = get_dataset_path(prod5_path)
reader = EventSource(input_url=dataset)
assert isinstance(reader, SimTelEventSource)
assert reader.input_url == dataset
def test_function_incompatible_file():
with pytest.raises(ValueError):
dataset = get_dataset_path("optics.ecsv.txt")
EventSource(input_url=dataset)
def test_function_nonexistant_file():
with pytest.raises(TraitError):
dataset = "/fake_path/fake_file.fake_extension"
EventSource(input_url=dataset)
def test_from_config(tmp_path):
dataset = get_dataset_path(prod5_path)
config = Config({"EventSource": {"input_url": dataset}})
reader = EventSource(config=config)
assert isinstance(reader, SimTelEventSource)
assert reader.input_url == dataset
# create dummy file
dataset = tmp_path / "test.dummy"
with dataset.open("wb") as f:
f.write(b"dummy")
config = Config({"EventSource": {"input_url": dataset}})
reader = EventSource(config=config)
assert isinstance(reader, DummyEventSource)
assert reader.input_url == dataset
def test_parent():
dataset = get_dataset_path(prod5_path)
class Parent(Component):
def __init__(self, config=None, parent=None):
super().__init__(config=config, parent=parent)
self.source = EventSource(parent=self)
# test with EventSource in root of config
config = Config({"EventSource": {"input_url": dataset}})
parent = Parent(config=config)
assert isinstance(parent.source, SimTelEventSource)
assert parent.source.parent.__weakref__ is parent.__weakref__
# test with EventSource as subconfig of parent
config = Config({"Parent": {"EventSource": {"input_url": dataset}}})
parent = Parent(config=config)
assert isinstance(parent.source, SimTelEventSource)
assert parent.source.parent.__weakref__ is parent.__weakref__
def test_from_config_default():
old_default = EventSource.input_url.default_value
dataset = get_dataset_path(prod5_path)
EventSource.input_url.default_value = dataset
config = Config()
reader = EventSource(config=config)
assert isinstance(reader, SimTelEventSource)
assert reader.input_url == dataset
EventSource.input_url.default_value = old_default
def test_from_config_invalid_type():
dataset = get_dataset_path(prod5_path)
EventSource.input_url.default_value = dataset
config = Config({"EventSource": {"input_url": 124}})
with pytest.raises(TraitError):
EventSource(config=config)
def test_event_source_input_url_config_override():
dataset1 = get_dataset_path(
"gamma_LaPalma_baseline_20Zd_180Az_prod3b_test.simtel.gz"
)
dataset2 = get_dataset_path(prod5_path)
config = Config({"EventSource": {"input_url": dataset1}})
reader = EventSource(input_url=dataset2, config=config)
assert isinstance(reader, SimTelEventSource)
assert reader.input_url == dataset2
def test_max_events():
max_events = 10
dataset = get_dataset_path(prod5_path)
reader = EventSource(input_url=dataset, max_events=max_events)
assert reader.max_events == max_events
def test_max_events_from_config():
dataset = get_dataset_path(prod5_path)
max_events = 10
config = Config({"EventSource": {"input_url": dataset, "max_events": max_events}})
reader = EventSource(config=config)
assert reader.max_events == max_events
def test_allowed_tels():
dataset = get_dataset_path(prod5_path)
reader = EventSource(input_url=dataset)
assert reader.allowed_tels is None
reader = EventSource(input_url=dataset, allowed_tels={1, 3})
assert reader.allowed_tels == {1, 3}
def test_allowed_tels_from_config():
dataset = get_dataset_path(prod5_path)
config = Config({"EventSource": {"input_url": dataset, "allowed_tels": {1, 3}}})
reader = EventSource(config=config, parent=None)
assert reader.allowed_tels == {1, 3} |
6,227 | get actual data | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe import _
from erpnext.accounts.doctype.monthly_distribution.monthly_distribution import (
get_periodwise_distribution_data,
)
from erpnext.accounts.report.financial_statements import get_period_list
from erpnext.accounts.utils import get_fiscal_year
def get_data_column(filters, partner_doctype):
data = []
period_list = get_period_list(
filters.fiscal_year,
filters.fiscal_year,
"",
"",
"Fiscal Year",
filters.period,
company=filters.company,
)
rows = get_data(filters, period_list, partner_doctype)
columns = get_columns(filters, period_list, partner_doctype)
if not rows:
return columns, data
for key, value in rows.items():
value.update({frappe.scrub(partner_doctype): key[0], "item_group": key[1]})
data.append(value)
return columns, data
def get_data(filters, period_list, partner_doctype):
sales_field = frappe.scrub(partner_doctype)
sales_users_data = get_parents_data(filters, partner_doctype)
if not sales_users_data:
return
sales_users = []
sales_user_wise_item_groups = {}
for d in sales_users_data:
if d.parent not in sales_users:
sales_users.append(d.parent)
sales_user_wise_item_groups.setdefault(d.parent, [])
if d.item_group:
sales_user_wise_item_groups[d.parent].append(d.item_group)
date_field = "transaction_date" if filters.get("doctype") == "Sales Order" else "posting_date"
actual_data = METHOD_NAME(filters, sales_users, date_field, sales_field)
return prepare_data(
filters,
sales_users_data,
sales_user_wise_item_groups,
actual_data,
date_field,
period_list,
sales_field,
)
def get_columns(filters, period_list, partner_doctype):
fieldtype, options = "Currency", "currency"
if filters.get("target_on") == "Quantity":
fieldtype, options = "Float", ""
columns = [
{
"fieldname": frappe.scrub(partner_doctype),
"label": _(partner_doctype),
"fieldtype": "Link",
"options": partner_doctype,
"width": 150,
},
{
"fieldname": "item_group",
"label": _("Item Group"),
"fieldtype": "Link",
"options": "Item Group",
"width": 150,
},
]
for period in period_list:
target_key = "target_{}".format(period.key)
variance_key = "variance_{}".format(period.key)
columns.extend(
[
{
"fieldname": target_key,
"label": _("Target ({})").format(period.label),
"fieldtype": fieldtype,
"options": options,
"width": 150,
},
{
"fieldname": period.key,
"label": _("Achieved ({})").format(period.label),
"fieldtype": fieldtype,
"options": options,
"width": 150,
},
{
"fieldname": variance_key,
"label": _("Variance ({})").format(period.label),
"fieldtype": fieldtype,
"options": options,
"width": 150,
},
]
)
columns.extend(
[
{
"fieldname": "total_target",
"label": _("Total Target"),
"fieldtype": fieldtype,
"options": options,
"width": 150,
},
{
"fieldname": "total_achieved",
"label": _("Total Achieved"),
"fieldtype": fieldtype,
"options": options,
"width": 150,
},
{
"fieldname": "total_variance",
"label": _("Total Variance"),
"fieldtype": fieldtype,
"options": options,
"width": 150,
},
]
)
return columns
def prepare_data(
filters,
sales_users_data,
sales_user_wise_item_groups,
actual_data,
date_field,
period_list,
sales_field,
):
rows = {}
target_qty_amt_field = "target_qty" if filters.get("target_on") == "Quantity" else "target_amount"
qty_or_amount_field = "stock_qty" if filters.get("target_on") == "Quantity" else "base_net_amount"
for d in sales_users_data:
key = (d.parent, d.item_group)
dist_data = get_periodwise_distribution_data(
d.distribution_id, period_list, filters.get("period")
)
if key not in rows:
rows.setdefault(key, {"total_target": 0, "total_achieved": 0, "total_variance": 0})
details = rows[key]
for period in period_list:
p_key = period.key
if p_key not in details:
details[p_key] = 0
target_key = "target_{}".format(p_key)
variance_key = "variance_{}".format(p_key)
details[target_key] = (d.get(target_qty_amt_field) * dist_data.get(p_key)) / 100
details[variance_key] = 0
details["total_target"] += details[target_key]
for r in actual_data:
if (
r.get(sales_field) == d.parent
and period.from_date <= r.get(date_field)
and r.get(date_field) <= period.to_date
and (not sales_user_wise_item_groups.get(d.parent) or r.item_group == d.item_group)
):
details[p_key] += r.get(qty_or_amount_field, 0)
details[variance_key] = details.get(p_key) - details.get(target_key)
details["total_achieved"] += details.get(p_key)
details["total_variance"] = details.get("total_achieved") - details.get("total_target")
return rows
def METHOD_NAME(filters, sales_users_or_territory_data, date_field, sales_field):
fiscal_year = get_fiscal_year(fiscal_year=filters.get("fiscal_year"), as_dict=1)
dates = [fiscal_year.year_start_date, fiscal_year.year_end_date]
select_field = "`tab{0}`.{1}".format(filters.get("doctype"), sales_field)
child_table = "`tab{0}`".format(filters.get("doctype") + " Item")
if sales_field == "sales_person":
select_field = "`tabSales Team`.sales_person"
child_table = "`tab{0}`, `tabSales Team`".format(filters.get("doctype") + " Item")
cond = """`tabSales Team`.parent = `tab{0}`.name and
`tabSales Team`.sales_person in ({1}) """.format(
filters.get("doctype"), ",".join(["%s"] * len(sales_users_or_territory_data))
)
else:
cond = "`tab{0}`.{1} in ({2})".format(
filters.get("doctype"), sales_field, ",".join(["%s"] * len(sales_users_or_territory_data))
)
return frappe.db.sql(
""" SELECT `tab{child_doc}`.item_group,
`tab{child_doc}`.stock_qty, `tab{child_doc}`.base_net_amount,
{select_field}, `tab{parent_doc}`.{date_field}
FROM `tab{parent_doc}`, {child_table}
WHERE
`tab{child_doc}`.parent = `tab{parent_doc}`.name
and `tab{parent_doc}`.docstatus = 1 and {cond}
and `tab{parent_doc}`.{date_field} between %s and %s""".format(
cond=cond,
date_field=date_field,
select_field=select_field,
child_table=child_table,
parent_doc=filters.get("doctype"),
child_doc=filters.get("doctype") + " Item",
),
tuple(sales_users_or_territory_data + dates),
as_dict=1,
)
def get_parents_data(filters, partner_doctype):
filters_dict = {"parenttype": partner_doctype}
target_qty_amt_field = "target_qty" if filters.get("target_on") == "Quantity" else "target_amount"
if filters.get("fiscal_year"):
filters_dict["fiscal_year"] = filters.get("fiscal_year")
return frappe.get_all(
"Target Detail",
filters=filters_dict,
fields=["parent", "item_group", target_qty_amt_field, "fiscal_year", "distribution_id"],
) |
6,228 | test exec command stderr | import os
import pytest
import sys
from tempfile import TemporaryFile
from numpy.distutils import exec_command
from numpy.distutils.exec_command import get_pythonexe
from numpy.testing import tempdir, assert_, assert_warns, IS_WASM
# In python 3 stdout, stderr are text (unicode compliant) devices, so to
# emulate them import StringIO from the io module.
from io import StringIO
class redirect_stdout:
"""Context manager to redirect stdout for exec_command test."""
def __init__(self, stdout=None):
self._stdout = stdout or sys.stdout
def __enter__(self):
self.old_stdout = sys.stdout
sys.stdout = self._stdout
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush()
sys.stdout = self.old_stdout
# note: closing sys.stdout won't close it.
self._stdout.close()
class redirect_stderr:
"""Context manager to redirect stderr for exec_command test."""
def __init__(self, stderr=None):
self._stderr = stderr or sys.stderr
def __enter__(self):
self.old_stderr = sys.stderr
sys.stderr = self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stderr.flush()
sys.stderr = self.old_stderr
# note: closing sys.stderr won't close it.
self._stderr.close()
class emulate_nonposix:
"""Context manager to emulate os.name != 'posix' """
def __init__(self, osname='non-posix'):
self._new_name = osname
def __enter__(self):
self._old_name = os.name
os.name = self._new_name
def __exit__(self, exc_type, exc_value, traceback):
os.name = self._old_name
def test_exec_command_stdout():
# Regression test for gh-2999 and gh-2915.
# There are several packages (nose, scipy.weave.inline, Sage inline
# Fortran) that replace stdout, in which case it doesn't have a fileno
# method. This is tested here, with a do-nothing command that fails if the
# presence of fileno() is assumed in exec_command.
# The code has a special case for posix systems, so if we are on posix test
# both that the special case works and that the generic code works.
# Test posix version:
with redirect_stdout(StringIO()):
with redirect_stderr(TemporaryFile()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
if os.name == 'posix':
# Test general (non-posix) version:
with emulate_nonposix():
with redirect_stdout(StringIO()):
with redirect_stderr(TemporaryFile()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
def METHOD_NAME():
# Test posix version:
with redirect_stdout(TemporaryFile(mode='w+')):
with redirect_stderr(StringIO()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
if os.name == 'posix':
# Test general (non-posix) version:
with emulate_nonposix():
with redirect_stdout(TemporaryFile()):
with redirect_stderr(StringIO()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
class TestExecCommand:
def setup_method(self):
self.pyexe = get_pythonexe()
def check_nt(self, **kws):
s, o = exec_command.exec_command('cmd /C echo path=%path%')
assert_(s == 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe)
assert_(s == 0)
assert_(o == 'win32')
def check_posix(self, **kws):
s, o = exec_command.exec_command("echo Hello", **kws)
assert_(s == 0)
assert_(o == 'Hello')
s, o = exec_command.exec_command('echo $AAA', **kws)
assert_(s == 0)
assert_(o == '')
s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws)
assert_(s == 0)
assert_(o == 'Tere')
s, o = exec_command.exec_command('echo "$AAA"', **kws)
assert_(s == 0)
assert_(o == '')
if 'BBB' not in os.environ:
os.environ['BBB'] = 'Hi'
s, o = exec_command.exec_command('echo "$BBB"', **kws)
assert_(s == 0)
assert_(o == 'Hi')
s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws)
assert_(s == 0)
assert_(o == 'Hey')
s, o = exec_command.exec_command('echo "$BBB"', **kws)
assert_(s == 0)
assert_(o == 'Hi')
del os.environ['BBB']
s, o = exec_command.exec_command('echo "$BBB"', **kws)
assert_(s == 0)
assert_(o == '')
s, o = exec_command.exec_command('this_is_not_a_command', **kws)
assert_(s != 0)
assert_(o != '')
s, o = exec_command.exec_command('echo path=$PATH', **kws)
assert_(s == 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys,os;sys.stderr.write(os.name)"' %
self.pyexe, **kws)
assert_(s == 0)
assert_(o == 'posix')
def check_basic(self, *kws):
s, o = exec_command.exec_command(
'"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws)
assert_(s != 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(\'0\');'
'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' %
self.pyexe, **kws)
assert_(s == 0)
assert_(o == '012')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws)
assert_(s == 15)
assert_(o == '')
s, o = exec_command.exec_command(
'"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws)
assert_(s == 0)
assert_(o == 'Heipa')
def check_execute_in(self, **kws):
with tempdir() as tmpdir:
fn = "file"
tmpfile = os.path.join(tmpdir, fn)
with open(tmpfile, 'w') as f:
f.write('Hello')
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); f.close()"' %
(self.pyexe, fn), **kws)
assert_(s != 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); '
'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws)
assert_(s == 0)
assert_(o == 'Hello')
def test_basic(self):
with redirect_stdout(StringIO()):
with redirect_stderr(StringIO()):
with assert_warns(DeprecationWarning):
if os.name == "posix":
self.check_posix(use_tee=0)
self.check_posix(use_tee=1)
elif os.name == "nt":
self.check_nt(use_tee=0)
self.check_nt(use_tee=1)
self.check_execute_in(use_tee=0)
self.check_execute_in(use_tee=1) |
6,229 | run test | #!/usr/bin/env python3
# Copyright (c) 2016-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test version bits warning system.
Generate chains with block versions that appear to be signalling unknown
soft-forks, and test that warning alerts are generated.
"""
import os
import re
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import msg_block
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
VB_PERIOD = 144 # versionbits period length for regtest
VB_THRESHOLD = 108 # versionbits activation threshold for regtest
VB_TOP_BITS = 0x20000000
VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment
VB_UNKNOWN_VERSION = VB_TOP_BITS | (1 << VB_UNKNOWN_BIT)
WARN_UNKNOWN_RULES_ACTIVE = f"Unknown new rules activated (versionbit {VB_UNKNOWN_BIT})"
VB_PATTERN = re.compile("Unknown new rules activated.*versionbit")
class VersionBitsWarningTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
# Open and close to create zero-length file
with open(self.alert_filename, 'w', encoding='utf8'):
pass
self.extra_args = [[f"-alertnotify=echo %s >> \"{self.alert_filename}\""]]
self.setup_nodes()
def send_blocks_with_version(self, peer, numblocks, version):
"""Send numblocks blocks to peer with version set"""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount()
block_time = self.nodes[0].getblockheader(tip)["time"] + 1
tip = int(tip, 16)
for _ in range(numblocks):
block = create_block(tip, create_coinbase(height + 1), block_time, version=version)
block.solve()
peer.send_message(msg_block(block))
block_time += 1
height += 1
tip = block.sha256
peer.sync_with_ping()
def versionbits_in_alert_file(self):
"""Test that the versionbits warning has been written to the alert file."""
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text = f.read()
return VB_PATTERN.search(alert_text) is not None
def METHOD_NAME(self):
node = self.nodes[0]
peer = node.add_p2p_connection(P2PInterface())
node_deterministic_address = node.get_deterministic_priv_key().address
# Mine one period worth of blocks
self.generatetoaddress(node, VB_PERIOD, node_deterministic_address)
self.log.info("Check that there is no warning if previous VB_BLOCKS have <VB_THRESHOLD blocks with unknown versionbits version.")
# Build one period of blocks with < VB_THRESHOLD blocks signaling some unknown bit
self.send_blocks_with_version(peer, VB_THRESHOLD - 1, VB_UNKNOWN_VERSION)
self.generatetoaddress(node, VB_PERIOD - VB_THRESHOLD + 1, node_deterministic_address)
# Check that we're not getting any versionbit-related errors in get*info()
assert not VB_PATTERN.match(node.getmininginfo()["warnings"])
assert not VB_PATTERN.match(node.getnetworkinfo()["warnings"])
# Build one period of blocks with VB_THRESHOLD blocks signaling some unknown bit
self.send_blocks_with_version(peer, VB_THRESHOLD, VB_UNKNOWN_VERSION)
self.generatetoaddress(node, VB_PERIOD - VB_THRESHOLD, node_deterministic_address)
self.log.info("Check that there is a warning if previous VB_BLOCKS have >=VB_THRESHOLD blocks with unknown versionbits version.")
# Mine a period worth of expected blocks so the generic block-version warning
# is cleared. This will move the versionbit state to ACTIVE.
self.generatetoaddress(node, VB_PERIOD, node_deterministic_address)
# Stop-start the node. This is required because bitcoind will only warn once about unknown versions or unknown rules activating.
self.restart_node(0)
# Generating one block guarantees that we'll get out of IBD
self.generatetoaddress(node, 1, node_deterministic_address)
self.wait_until(lambda: not node.getblockchaininfo()['initialblockdownload'])
# Generating one more block will be enough to generate an error.
self.generatetoaddress(node, 1, node_deterministic_address)
# Check that get*info() shows the versionbits unknown rules warning
assert WARN_UNKNOWN_RULES_ACTIVE in node.getmininginfo()["warnings"]
assert WARN_UNKNOWN_RULES_ACTIVE in node.getnetworkinfo()["warnings"]
# Check that the alert file shows the versionbits unknown rules warning
self.wait_until(lambda: self.versionbits_in_alert_file())
if __name__ == '__main__':
VersionBitsWarningTest().main() |
6,230 | to dict | import json
from abc import abstractmethod
from typing import Any, Dict, Optional
from mlflow.data.dataset_source import DatasetSource
from mlflow.entities import Dataset as DatasetEntity
from mlflow.utils.annotations import experimental
@experimental
class Dataset:
"""
Represents a dataset for use with MLflow Tracking, including the name, digest (hash),
schema, and profile of the dataset as well as source information (e.g. the S3 bucket or
managed Delta table from which the dataset was derived). Most datasets expose features
and targets for training and evaluation as well.
"""
def __init__(
self, source: DatasetSource, name: Optional[str] = None, digest: Optional[str] = None
):
"""
Base constructor for a dataset. All subclasses must call this
constructor.
"""
self._name = name
self._source = source
# Note: Subclasses should call super() once they've initialized all of
# the class attributes necessary for digest computation
self._digest = digest or self._compute_digest()
@abstractmethod
def _compute_digest(self) -> str:
"""
Computes a digest for the dataset. Called if the user doesn't supply
a digest when constructing the dataset.
:return: A string digest for the dataset. We recommend a maximum digest length
of 10 characters with an ideal length of 8 characters.
"""
@abstractmethod
def METHOD_NAME(self, base_dict: Dict[str, str]) -> Dict[str, str]:
"""
:param base_dict: A string dictionary of base information about the
dataset, including: name, digest, source, and source
type.
:return: A string dictionary containing the following fields: name,
digest, source, source type, schema (optional), profile
(optional).
"""
def to_json(self) -> str:
"""
Obtains a JSON string representation of the
:py:class:`Dataset <mlflow.data.dataset.Dataset>`.
:return: A JSON string representation of the
:py:class:`Dataset <mlflow.data.dataset.Dataset>`.
"""
base_dict = {
"name": self.name,
"digest": self.digest,
"source": self._source.to_json(),
"source_type": self._source._get_source_type(),
}
return json.dumps(self.METHOD_NAME(base_dict))
@property
def name(self) -> str:
"""
The name of the dataset, e.g. ``"iris_data"``, ``"myschema.mycatalog.mytable@v1"``, etc.
"""
if self._name is not None:
return self._name
else:
return "dataset"
@property
def digest(self) -> str:
"""
A unique hash or fingerprint of the dataset, e.g. ``"498c7496"``.
"""
return self._digest
@property
def source(self) -> DatasetSource:
"""
Information about the dataset's source, represented as an instance of
:py:class:`DatasetSource <mlflow.data.dataset_source.DatasetSource>`. For example, this
may be the S3 location or the name of the managed Delta Table from which the dataset
was derived.
"""
return self._source
@property
@abstractmethod
def profile(self) -> Optional[Any]:
"""
Optional summary statistics for the dataset, such as the number of rows in a table, the
mean / median / std of each table column, etc.
"""
@property
@abstractmethod
def schema(self) -> Optional[Any]:
"""
Optional dataset schema, such as an instance of :py:class:`mlflow.types.Schema` representing
the features and targets of the dataset.
"""
def _to_mlflow_entity(self) -> DatasetEntity:
"""
:return: A DatasetEntity instance representing the dataset.
"""
dataset_json = json.loads(self.to_json())
return DatasetEntity(
name=dataset_json["name"],
digest=dataset_json["digest"],
source_type=dataset_json["source_type"],
source=dataset_json["source"],
schema=dataset_json.get("schema"),
profile=dataset_json.get("profile"),
) |
6,231 | load | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
from pathlib import Path
from typing import Any
import pytest
import timm
import torch
import torchvision
from hydra.utils import instantiate
from lightning.pytorch import Trainer
from omegaconf import OmegaConf
from pytest import MonkeyPatch
from torch.nn import Module
from torchvision.models._api import WeightsEnum
from torchgeo.datasets import SSL4EOS12, SeasonalContrastS2
from torchgeo.models import ResNet18_Weights
from torchgeo.trainers import SimCLRTask
from .test_classification import ClassificationTestModel
def create_model(*args: Any, **kwargs: Any) -> Module:
return ClassificationTestModel(**kwargs)
def METHOD_NAME(url: str, *args: Any, **kwargs: Any) -> dict[str, Any]:
state_dict: dict[str, Any] = torch.METHOD_NAME(url)
return state_dict
class TestSimCLRTask:
@pytest.mark.parametrize(
"name",
[
"chesapeake_cvpr_prior_simclr",
"seco_simclr_1",
"seco_simclr_2",
"ssl4eo_l_simclr_1",
"ssl4eo_l_simclr_2",
"ssl4eo_s12_simclr_1",
"ssl4eo_s12_simclr_2",
],
)
def test_trainer(
self, monkeypatch: MonkeyPatch, name: str, fast_dev_run: bool
) -> None:
conf = OmegaConf.METHOD_NAME(os.path.join("tests", "conf", name + ".yaml"))
if name.startswith("seco"):
monkeypatch.setattr(SeasonalContrastS2, "__len__", lambda self: 2)
if name.startswith("ssl4eo_s12"):
monkeypatch.setattr(SSL4EOS12, "__len__", lambda self: 2)
# Instantiate datamodule
datamodule = instantiate(conf.datamodule)
# Instantiate model
monkeypatch.setattr(timm, "create_model", create_model)
model = instantiate(conf.module)
# Instantiate trainer
trainer = Trainer(
accelerator="cpu",
fast_dev_run=fast_dev_run,
log_every_n_steps=1,
max_epochs=1,
)
trainer.fit(model=model, datamodule=datamodule)
def test_version_warnings(self) -> None:
with pytest.warns(UserWarning, match="SimCLR v1 only uses 2 layers"):
SimCLRTask(version=1, layers=3)
with pytest.warns(UserWarning, match="SimCLR v1 does not use a memory bank"):
SimCLRTask(version=1, memory_bank_size=10)
with pytest.warns(UserWarning, match=r"SimCLR v2 uses 3\+ layers"):
SimCLRTask(version=2, layers=2)
with pytest.warns(UserWarning, match="SimCLR v2 uses a memory bank"):
SimCLRTask(version=2, memory_bank_size=0)
@pytest.fixture
def weights(self) -> WeightsEnum:
return ResNet18_Weights.SENTINEL2_ALL_MOCO
@pytest.fixture
def mocked_weights(
self, tmp_path: Path, monkeypatch: MonkeyPatch, weights: WeightsEnum
) -> WeightsEnum:
path = tmp_path / f"{weights}.pth"
model = timm.create_model(
weights.meta["model"], in_chans=weights.meta["in_chans"]
)
torch.save(model.state_dict(), path)
try:
monkeypatch.setattr(weights.value, "url", str(path))
except AttributeError:
monkeypatch.setattr(weights, "url", str(path))
monkeypatch.setattr(torchvision.models._api, "load_state_dict_from_url", METHOD_NAME)
return weights
def test_weight_file(self, checkpoint: str) -> None:
match = "num classes .* != num classes in pretrained model"
with pytest.warns(UserWarning, match=match):
SimCLRTask(model="resnet18", weights=checkpoint)
def test_weight_enum(self, mocked_weights: WeightsEnum) -> None:
match = "num classes .* != num classes in pretrained model"
with pytest.warns(UserWarning, match=match):
SimCLRTask(
model=mocked_weights.meta["model"],
weights=mocked_weights,
in_channels=mocked_weights.meta["in_chans"],
)
def test_weight_str(self, mocked_weights: WeightsEnum) -> None:
match = "num classes .* != num classes in pretrained model"
with pytest.warns(UserWarning, match=match):
SimCLRTask(
model=mocked_weights.meta["model"],
weights=str(mocked_weights),
in_channels=mocked_weights.meta["in_chans"],
)
@pytest.mark.slow
def test_weight_enum_download(self, weights: WeightsEnum) -> None:
match = "num classes .* != num classes in pretrained model"
with pytest.warns(UserWarning, match=match):
SimCLRTask(
model=weights.meta["model"],
weights=weights,
in_channels=weights.meta["in_chans"],
)
@pytest.mark.slow
def test_weight_str_download(self, weights: WeightsEnum) -> None:
match = "num classes .* != num classes in pretrained model"
with pytest.warns(UserWarning, match=match):
SimCLRTask(
model=weights.meta["model"],
weights=str(weights),
in_channels=weights.meta["in_chans"],
) |
6,232 | set config | #!/usr/bin/env python3
import json
import os
import subprocess
import time
import unittest
import socket
import requests
from osv import client
class HttpError(Exception):
def __init__(self, code):
self.code = code
class Basetest(unittest.TestCase):
@classmethod
def METHOD_NAME(cls, parser):
cls.config = parser.parse_args()
if cls.config.hypervisor == 'firecracker':
module_base = os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')
cls.config.run_script = os.path.join(module_base, "..", "..", "scripts", "firecracker.py")
cls.config.host = '172.16.0.2'
cls._client = client.Client(cls.config)
@classmethod
def get_url(cls, uri):
return cls._client.get_url() + uri
@classmethod
def get_json_api(cls, name):
return cls.get_json_api_from_directory(cls.config.jsondir,name)
@classmethod
def get_json_api_from_directory(cls, directory, name):
json_data = open(os.path.join(directory, name))
data = json.load(json_data)
json_data.close()
return data
def assert_between(self, msg, low, high, val):
self.assertGreaterEqual(val, low, msg=msg)
self.assertLessEqual(val, high, msg=msg)
def assert_key_in(self, key, dic):
self.assertTrue(key in dic, key + " not found in dictionary " + json.dumps(dic))
@classmethod
def get_api(cls, api_definition, nickname):
for api in api_definition["apis"]:
if api["operations"][0]["nickname"] == nickname:
return api
return None
@classmethod
def path_by_nick(cls, api_definition, nickname):
api = cls.get_api(api_definition, nickname)
return api["path"]
@classmethod
def is_jvm_up(cls):
try:
return bool(cls.curl(cls.path_by_nick(cls.jvm_api, "getJavaVersion")))
except HttpError:
return False
@classmethod
def is_reachable(cls):
s = socket.socket()
try:
s.connect((cls._client.get_host(), cls._client.get_port()))
s.close()
if cls.config.check_jvm:
return cls.is_jvm_up()
else:
return True
except socket.error as e:
print(e)
return False
def validate_path(self, api_definition, nickname, value):
path = self.path_by_nick(api_definition, nickname)
self.assertEqual(value, self.curl(path))
def validate_path_regex(self, api_definition, nickname, expr):
path = self.path_by_nick(api_definition, nickname)
self.assertRegex(self.curl(path), expr)
def assertHttpError(self, url, code=404, method='GET', data=None):
try:
self.curl(url, method, data)
except HttpError as e:
if e.code != code:
raise Exception('Expected error code %d but got %d' % (code, e.code))
else:
raise Exception('Expected failure but request succeeded')
@classmethod
def curl(cls, api, method='GET', data=None, timeout=None):
url = cls.get_url(api)
r = {
'GET': requests.get,
'POST': requests.post,
'DELETE': requests.delete,
'PUT': requests.put,
}[method](url, data=data, timeout=timeout, **cls._client.get_request_kwargs())
if r.status_code != 200:
raise HttpError(r.status_code)
if r.text:
return r.json()
@classmethod
def get_client_cert_path(cls):
return cls._client.get_client_cert_path()
@classmethod
def get_client_key_path(cls):
return cls._client.get_client_key_path()
@classmethod
def get_ca_cert_path(cls):
return cls._client.get_cacert_path()
@classmethod
def exec_os(cls):
args = []
if cls.config.hypervisor == 'firecracker':
args += [cls.config.run_script, "-m 2048M", "-n", "-c 4"]
if cls.config.kernel_path:
print('Using kernel at %s' % cls.config.kernel_path)
args += ['-k', cls.config.kernel_path]
elif cls.config.use_sudo:
args += ["/usr/bin/sudo", cls.config.run_script, "-n"]
else:
args += [cls.config.run_script, "--forward", "tcp::" + str(cls._client.get_port()) + "-:" + str(cls._client.get_port())]
if cls.config.kernel_path and cls.config.hypervisor != 'firecracker':
print('Using kernel at %s' % cls.config.kernel_path)
args += ['-k', '--kernel-path', cls.config.kernel_path]
if cls.config.cmd:
args += ["-e", cls.config.cmd]
if cls.config.test_image:
args += ["-i", cls.config.test_image]
return subprocess.Popen(args)
@classmethod
def shutdown(cls):
if cls.config.connect:
return
path = cls.path_by_nick(cls.os_api, "os_poweroff")
try:
cls.curl(path, method='POST', timeout=0.5)
except:
pass
retry = 10
while cls.os_process.poll() == None:
retry -= 1
if retry == 0:
raise Exception("Fail to shutdown server")
time.sleep(1)
@classmethod
def hard_shutdown(cls):
child_pid = subprocess.call(['pgrep', "-P", str(cls.os_process.pid)])
subprocess.call(['kill', '-9', str(child_pid)])
cls.os_process.kill()
cls.os_process.wait()
@classmethod
def start_image(cls):
if cls.config.check_jvm:
jvm_plugin_api_listings_path = \
os.path.join(os.path.realpath(os.path.dirname(__file__)),'../../httpserver-jvm-plugin/api-doc/listings')
cls.jvm_api = cls.get_json_api_from_directory(jvm_plugin_api_listings_path,"jvm.json")
cls.os_api = cls.get_json_api("os.json")
if not cls.config.connect:
cls.os_process = cls.exec_os()
retry = 10
while not cls.is_reachable():
time.sleep(1)
retry -= 1
if retry == 0:
cls.shutdown()
raise Exception("Server is down") |
6,233 | set up | #!/usr/bin/env python
#
# Copyright 2015,2016 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import time
import struct
import pmt
from gnuradio import gr, gr_unittest, digital, blocks
from gnuradio.digital import packet_utils
class test_packet_format_fb(gr_unittest.TestCase):
def METHOD_NAME(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_packet_format_async_default(self):
ac = packet_utils.default_access_code
hdr_format = digital.header_format_default(ac, 0)
formatter = digital.protocol_formatter_async(hdr_format)
snk_hdr = blocks.message_debug()
snk_pld = blocks.message_debug()
self.tb.msg_connect(formatter, 'header', snk_hdr, 'store')
self.tb.msg_connect(formatter, 'payload', snk_pld, 'store')
send_str = b"Hello World"
send_pmt = pmt.make_u8vector(len(send_str), 0)
for i in range(len(send_str)):
pmt.u8vector_set(send_pmt, i, send_str[i])
msg = pmt.cons(pmt.PMT_NIL, send_pmt)
port = pmt.intern("in")
formatter.to_basic_block()._post(port, msg)
self.tb.start()
while (snk_hdr.num_messages() < 1) or (snk_pld.num_messages() < 1):
time.sleep(0.1)
self.tb.stop()
self.tb.wait()
result_hdr_pmt = pmt.cdr(snk_hdr.get_message(0))
result_pld_pmt = pmt.cdr(snk_pld.get_message(0))
result_hdr = pmt.u8vector_elements(result_hdr_pmt)
result_pld = pmt.u8vector_elements(result_pld_pmt)
header = bytes(result_hdr)
payload = bytes(result_pld)
access_code = packet_utils.default_access_code_binary
rx_access_code = header[0:len(access_code)]
length = len(send_str)
rx_length = struct.unpack_from(b"!H", header, len(access_code))[0]
self.assertEqual(access_code, rx_access_code)
self.assertEqual(length, rx_length)
self.assertEqual(length, len(payload))
self.assertEqual(send_str, payload[0:length])
def test_packet_parse_default(self):
ac = packet_utils.default_access_code
length = '0000000000000001'
hdr_format_1bps = digital.header_format_default(ac, 0)
hdr_format_4bps = digital.header_format_default(ac, 0, 4)
ac_bits = [int(x) & 1 for x in ac]
length_bits = [int(x) & 1 for x in length]
header_bits = ac_bits + length_bits + length_bits
src_hdr = blocks.vector_source_b(header_bits)
parser_1bps = digital.protocol_parser_b(hdr_format_1bps)
parser_4bps = digital.protocol_parser_b(hdr_format_4bps)
snk_hdr_1bps = blocks.message_debug()
snk_hdr_4bps = blocks.message_debug()
self.tb.connect(src_hdr, parser_1bps)
self.tb.connect(src_hdr, parser_4bps)
self.tb.msg_connect(parser_1bps, 'info', snk_hdr_1bps, 'store')
self.tb.msg_connect(parser_4bps, 'info', snk_hdr_4bps, 'store')
self.tb.start()
while (
snk_hdr_1bps.num_messages() < 1) or (
snk_hdr_4bps.num_messages() < 1):
time.sleep(0.1)
self.tb.stop()
self.tb.wait()
result_1bps = snk_hdr_1bps.get_message(0)
result_4bps = snk_hdr_4bps.get_message(0)
self.assertTrue(pmt.dict_has_key(
result_1bps, pmt.intern('payload symbols')))
self.assertEqual(pmt.to_long(pmt.dict_ref(
result_1bps, pmt.intern('payload symbols'), pmt.PMT_F)), 8)
self.assertTrue(pmt.dict_has_key(
result_4bps, pmt.intern('payload symbols')))
self.assertEqual(pmt.to_long(pmt.dict_ref(
result_4bps, pmt.intern('payload symbols'), pmt.PMT_F)), 2)
def test_packet_format_async_counter(self):
bps = 2
ac = packet_utils.default_access_code
hdr_format = digital.header_format_counter(ac, 0, 2)
formatter = digital.protocol_formatter_async(hdr_format)
snk_hdr = blocks.message_debug()
snk_pld = blocks.message_debug()
self.tb.msg_connect(formatter, 'header', snk_hdr, 'store')
self.tb.msg_connect(formatter, 'payload', snk_pld, 'store')
send_str = b"Hello World" + 1000 * b"xxx"
send_pmt = pmt.make_u8vector(len(send_str), 0)
for i in range(len(send_str)):
pmt.u8vector_set(send_pmt, i, send_str[i])
msg = pmt.cons(pmt.PMT_NIL, send_pmt)
port = pmt.intern("in")
formatter.to_basic_block()._post(port, msg)
self.tb.start()
while (snk_hdr.num_messages() < 1) or (snk_pld.num_messages() < 1):
time.sleep(0.1)
self.tb.stop()
self.tb.wait()
result_hdr_pmt = pmt.cdr(snk_hdr.get_message(0))
result_pld_pmt = pmt.cdr(snk_pld.get_message(0))
result_hdr = pmt.u8vector_elements(result_hdr_pmt)
result_pld = pmt.u8vector_elements(result_pld_pmt)
header = bytes(result_hdr)
payload = bytes(result_pld)
access_code = packet_utils.default_access_code_binary
rx_access_code = header[0:len(access_code)]
length = len(send_str)
rx_length = struct.unpack_from(b"!H", header, len(access_code))[0]
rx_bps = struct.unpack_from(b"!H", header, len(access_code) + 4)[0]
rx_counter = struct.unpack_from(b"!H", header, len(access_code) + 6)[0]
self.assertEqual(access_code, rx_access_code)
self.assertEqual(length, rx_length)
self.assertEqual(bps, rx_bps)
self.assertEqual(0, rx_counter)
self.assertEqual(length, len(payload))
self.assertEqual(send_str, payload[0:length])
if __name__ == '__main__':
gr_unittest.run(test_packet_format_fb) |
6,234 | ancestors from exception | # Standard Library
import sys
import traceback
from dataclasses import dataclass, field
from typing import List, Optional, Type, Union
# Sematic
from sematic.abstract_function import FunctionError
@dataclass
class ExceptionMetadata:
repr: str
name: str
module: str
# defaults to empty list for backwards compatibility for 0.17.0
ancestors: List[str] = field(default_factory=list)
@classmethod
def from_exception(cls, exception: Exception) -> "ExceptionMetadata":
return ExceptionMetadata(
repr=str(exception),
name=exception.__class__.__name__,
module=exception.__class__.__module__,
ancestors=cls.METHOD_NAME(exception),
)
@classmethod
def METHOD_NAME(
cls, exception: Union[BaseException, Type[BaseException]]
) -> List[str]:
"""For an Exception, return a list of all its base classes that inherit from
Exception.
Parameters
----------
exception:
The exception or exception class whose ancestors should be retrieved
Returns
-------
A list of all base classes (and their base classes, etc.) that inherit
from Exception. They will be in no particular order.
"""
if isinstance(exception, BaseException):
exception_type = type(exception)
else:
exception_type = exception
ancestors = []
to_traverse = [exception_type]
self_classpath = f"{exception_type.__module__}.{exception_type.__name__}"
while len(to_traverse) > 0:
class_ = to_traverse.pop()
for base in class_.__bases__:
if not issubclass(base, Exception):
# only interested in exception classes
continue
classpath = f"{base.__module__}.{base.__name__}"
if classpath not in ancestors and self_classpath != classpath:
ancestors.append(classpath)
to_traverse.append(base)
return ancestors
def is_instance_of(self, exception_type: Type[Exception]) -> bool:
"""Determine whether this exception corresponds to an instance of exception_type
Parameters
----------
exception_type:
The type of the exception we are checking this against
Returns
-------
True if this exception is an instance of the given type, False otherwise
"""
matches_self = (
self.name == exception_type.__name__
and self.module == exception_type.__module__
)
if matches_self:
return True
classpath = f"{exception_type.__module__}.{exception_type.__name__}"
return classpath in self.ancestors
def format_exception_for_run(
exception: Optional[BaseException] = None,
) -> Optional[ExceptionMetadata]:
"""Format an exception trace into a string for usage in a run.
Returns
-------
Optional[ExceptionMetadata]
If an exceptions is found on the traceback, an `ExceptionMetadata` object is
instantiated to describe it. If not, None is returned.
"""
if exception is None:
_, exception, __ = sys.exc_info()
if exception is None:
# the failure was caused by another issue,
# not by an exception on the traceback
return None
if isinstance(exception, FunctionError) and exception.__cause__ is not None:
# Don't display to the user the parts of the stack from Sematic
# resolver if the root cause was a failure in Function code.
tb_exception = traceback.TracebackException.from_exception(exception.__cause__)
repr_ = "\n".join(tb_exception.format())
exception = exception.__cause__
else:
repr_ = traceback.format_exc()
assert isinstance(exception, BaseException)
return ExceptionMetadata(
repr=repr_,
name=exception.__class__.__name__,
module=exception.__class__.__module__,
ancestors=ExceptionMetadata.METHOD_NAME(exception),
)
class InfrastructureError(Exception):
"""An error originated in compute infrastructure such as Kubernetes."""
pass
class KubernetesError(InfrastructureError):
"""An error originated in external Kubernetes compute infrastructure."""
pass
class ExternalResourceError(InfrastructureError):
"""An error originated in compute infrastructure for an external resource."""
pass
class IllegalStateTransitionError(Exception):
"""A state-machine object made a state transition that was not allowed."""
pass
class IllegalUseOfFutureError(Exception):
"""A future was given where one was not expected."""
pass
class NotInSematicFuncError(RuntimeError):
"""An API intended for usage in a Sematic func was used outside a Sematic func."""
pass
class TimeoutError(RuntimeError):
"""A timeout has expired."""
pass
class PipelineRunError(Exception):
"""The pipeline run has failed.
Should only be generated to halt execution. Should not be handled.
Parameters
----------
exception_metadata:
Metadata describing an exception which occurred during code execution
(Pipeline, Runner, Driver)
external_exception_metadata:
Metadata describing an exception which occurred in external compute
infrastructure
"""
def __init__(
self,
exception_metadata: Optional[ExceptionMetadata] = None,
external_exception_metadata: Optional[ExceptionMetadata] = None,
):
exception_msg = PipelineRunError._make_metadata_msg(
"\n\nPipeline failure:\n", exception_metadata
)
external_exception_msg = PipelineRunError._make_metadata_msg(
"\n\nExternal failure:\n", external_exception_metadata
)
self._msg = (
"The pipeline run failed due to previous errors!"
f"{exception_msg}{external_exception_msg}"
)
super(PipelineRunError, self).__init__(self._msg)
@staticmethod
def _make_metadata_msg(
msg_prefix: str, metadata: Optional[ExceptionMetadata]
) -> str:
if metadata is not None and metadata.repr is not None:
return f"{msg_prefix}{metadata.repr}"
return ""
# In case anybody is using this still.
# should deprecate it after resolver -> runner rename
# has been done for a while.
# TODO: https://github.com/sematic-ai/sematic/issues/975
ResolutionError = PipelineRunError
class MissingPluginError(Exception):
"""
Exception to indicate a missing plug-in.
"""
def __init__(self, plugin_path: str):
message = f"Unable to find plug-in {plugin_path}. Module or class is missing."
super().__init__(message)
class UnsupportedUsageError(Exception):
"""A library is being used in an unsupported context
Some examples of when this might be used:
- something tries to use a feature that is only available for cloud runs,
but with a non-cloud run
- something tries to use GPUs when the server is not able to launch jobs
using GPUs
- something tries to interface with a component with a non-supported version
"""
pass
class UnsupportedVersionError(UnsupportedUsageError):
"""Code is being asked to interface with a component with a non-supported version."""
pass
class DataIntegrityError(Exception):
"""
A data integrity error.
"""
pass
class CancellationError(Exception):
"""The pipeline run was cancelled."""
pass |
6,235 | get formality values | from pathlib import Path
import os
import re
import discord
from models.deepl_model import TranslationModel
from services.moderations_service import ModerationOptions
from services.usage_service import UsageService
from models.openai_model import ImageSize, Model, ModelLimits, Models, Mode
from services.environment_service import EnvService
usage_service = UsageService(Path(os.environ.get("DATA_DIR", os.getcwd())))
model = Model(usage_service)
class Settings_autocompleter:
"""autocompleter for the settings command"""
async def get_settings(ctx: discord.AutocompleteContext):
"""get settings for the settings option"""
SETTINGS = [
re.sub("^_", "", key)
for key in model.__dict__.keys()
if key not in model._hidden_attributes
]
return [
parameter
for parameter in SETTINGS
if parameter.startswith(ctx.value.lower())
][:25]
async def get_value(
ctx: discord.AutocompleteContext,
): # Behaves a bit weird if you go back and edit the parameter without typing in a new command
"""gets valid values for the value option"""
values = {
"max_conversation_length": [
str(num)
for num in range(
ModelLimits.MIN_CONVERSATION_LENGTH,
ModelLimits.MAX_CONVERSATION_LENGTH + 1,
2,
)
],
"num_images": [
str(num)
for num in range(
ModelLimits.MIN_NUM_IMAGES, ModelLimits.MAX_NUM_IMAGES + 1
)
],
"mode": Mode.ALL_MODES,
"model": Models.TEXT_MODELS,
"low_usage_mode": ["True", "False"],
"image_size": ImageSize.ALL_SIZES,
"summarize_conversation": ["True", "False"],
"welcome_message_enabled": ["True", "False"],
"num_static_conversation_items": [
str(num)
for num in range(
ModelLimits.MIN_NUM_STATIC_CONVERSATION_ITEMS,
ModelLimits.MAX_NUM_STATIC_CONVERSATION_ITEMS + 1,
)
],
"num_conversation_lookback": [
str(num)
for num in range(
ModelLimits.MIN_NUM_CONVERSATION_LOOKBACK,
ModelLimits.MAX_NUM_CONVERSATION_LOOKBACK + 1,
)
],
"summarize_threshold": [
str(num)
for num in range(
ModelLimits.MIN_SUMMARIZE_THRESHOLD,
ModelLimits.MAX_SUMMARIZE_THRESHOLD + 1,
50,
)
],
"type": ["warn", "delete"],
"use_org": ["True", "False"],
}
options = values.get(ctx.options["parameter"], [])
if options:
return [value for value in options if value.startswith(ctx.value.lower())]
await ctx.interaction.response.defer() # defer so the autocomplete in int values doesn't error but rather just says not found
return []
async def get_models(
ctx: discord.AutocompleteContext,
):
"""Gets all models"""
models = [
value for value in Models.TEXT_MODELS if value.startswith(ctx.value.lower())
]
return models
async def get_index_and_search_models(
ctx: discord.AutocompleteContext,
):
models = Models.CHATGPT_MODELS + Models.GPT4_MODELS
return [value for value in models if value.startswith(ctx.value.lower())]
async def get_converse_models(
ctx: discord.AutocompleteContext,
):
"""Gets all models"""
models = [
value for value in Models.TEXT_MODELS if value.startswith(ctx.value.lower())
]
return models
async def get_value_moderations(
ctx: discord.AutocompleteContext,
): # Behaves a bit weird if you go back and edit the parameter without typing in a new command
"""gets valid values for the type option"""
return [
value
for value in ModerationOptions.OPTIONS
if value.startswith(ctx.value.lower())
]
async def get_value_alert_id_channel(self, ctx: discord.AutocompleteContext):
"""gets valid values for the channel option"""
return [
channel.name
for channel in ctx.interaction.guild.channels
if channel.name.startswith(ctx.value.lower())
]
class Translations_autocompleter:
"""autocompleter for the translations command"""
async def get_languages(ctx: discord.AutocompleteContext):
"""gets valid values for the language option"""
return [
language
for language in TranslationModel.get_all_country_names()
if language.lower().startswith(ctx.value.lower())
]
async def METHOD_NAME(self, ctx: discord.AutocompleteContext):
"""gets valid values for the formality option"""
return [
value
for value in ["prefer_more", "prefer_less"]
if value.lower().startswith(ctx.value.lower())
]
class File_autocompleter:
"""Autocompleter for the opener command"""
async def get_openers(ctx: discord.AutocompleteContext):
"""get all files in the openers folder"""
try:
return [
file
for file in os.listdir(EnvService.find_shared_file("openers"))
if file.startswith(ctx.value.lower())
][
:25
] # returns the 25 first files from your current input
except Exception:
return ["No 'openers' folder"]
async def get_user_indexes(ctx: discord.AutocompleteContext):
"""get all files in the indexes folder"""
try:
return [
file
for file in os.listdir(
EnvService.find_shared_file(
f"indexes/{str(ctx.interaction.user.id)}/"
)
)
if file.startswith(ctx.value.lower())
][
:25
] # returns the 25 first files from your current input
except Exception:
return ["No user indexes found, add an index"]
async def get_server_indexes(ctx: discord.AutocompleteContext):
"""get all files in the indexes folder"""
try:
return [
file
for file in os.listdir(
EnvService.find_shared_file(
f"indexes/{str(ctx.interaction.guild.id)}/"
)
)
if file.startswith(ctx.value.lower())
][
:25
] # returns the 25 first files from your current input
except Exception:
return ["No server indexes found, add an index"]
async def get_user_search_indexes(ctx: discord.AutocompleteContext):
"""get all files in the indexes folder"""
try:
return [
file
for file in os.listdir(
EnvService.find_shared_file(
f"indexes/{str(ctx.interaction.user.id)}_search/"
)
)
if file.startswith(ctx.value.lower())
][
:25
] # returns the 25 first files from your current input
except Exception:
return ["No user indexes found, add an index"] |
6,236 | fake failing open | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for coverage.sqlitedb"""
from __future__ import annotations
from typing import NoReturn
from unittest import mock
import pytest
import coverage.sqlitedb
from coverage.debug import DebugControlString
from coverage.exceptions import DataError
from coverage.sqlitedb import SqliteDb
from tests.coveragetest import CoverageTest
from tests.helpers import FailingProxy
DB_INIT = """\
create table name (first text, last text);
insert into name (first, last) values ("pablo", "picasso");
"""
class SqliteDbTest(CoverageTest):
"""Tests of tricky parts of SqliteDb."""
def test_error_reporting(self) -> None:
msg = "Couldn't use data file 'test.db': no such table: bar"
with SqliteDb("test.db", DebugControlString(options=["sql"])) as db:
with pytest.raises(DataError, match=msg):
with db.execute("select foo from bar"):
# Entering the context manager raises the error, this line doesn't run:
pass # pragma: not covered
def test_retry_execute(self) -> None:
with SqliteDb("test.db", DebugControlString(options=["sql"])) as db:
db.executescript(DB_INIT)
proxy = FailingProxy(db.con, "execute", [Exception("WUT")])
with mock.patch.object(db, "con", proxy):
with db.execute("select first from name order by 1") as cur:
assert list(cur) == [("pablo",)]
def test_retry_execute_failure(self) -> None:
with SqliteDb("test.db", DebugControlString(options=["sql"])) as db:
db.executescript(DB_INIT)
proxy = FailingProxy(db.con, "execute", [Exception("WUT"), RuntimeError("Fake")])
with mock.patch.object(db, "con", proxy):
with pytest.raises(RuntimeError, match="Fake"):
with db.execute("select first from name order by 1"):
# Entering the context manager raises the error, this line doesn't run:
pass # pragma: not covered
def test_retry_executemany_void(self) -> None:
with SqliteDb("test.db", DebugControlString(options=["sql"])) as db:
db.executescript(DB_INIT)
proxy = FailingProxy(db.con, "executemany", [Exception("WUT")])
with mock.patch.object(db, "con", proxy):
db.executemany_void(
"insert into name (first, last) values (?, ?)",
[("vincent", "van gogh")],
)
with db.execute("select first from name order by 1") as cur:
assert list(cur) == [("pablo",), ("vincent",)]
def test_retry_executemany_void_failure(self) -> None:
with SqliteDb("test.db", DebugControlString(options=["sql"])) as db:
db.executescript(DB_INIT)
proxy = FailingProxy(db.con, "executemany", [Exception("WUT"), RuntimeError("Fake")])
with mock.patch.object(db, "con", proxy):
with pytest.raises(RuntimeError, match="Fake"):
db.executemany_void(
"insert into name (first, last) values (?, ?)",
[("vincent", "van gogh")],
)
def test_open_fails_on_bad_db(self) -> None:
self.make_file("bad.db", "boogers")
def METHOD_NAME(filename: str, mode: str) -> NoReturn:
assert (filename, mode) == ("bad.db", "rb")
raise RuntimeError("No you can't!")
with mock.patch.object(coverage.sqlitedb, "open", METHOD_NAME):
msg = "Couldn't use data file 'bad.db': file is not a database"
with pytest.raises(DataError, match=msg):
with SqliteDb("bad.db", DebugControlString(options=["sql"])):
pass # pragma: not covered
def test_execute_void_can_allow_failure(self) -> None:
with SqliteDb("fail.db", DebugControlString(options=["sql"])) as db:
db.executescript(DB_INIT)
proxy = FailingProxy(db.con, "execute", [Exception("WUT")])
with mock.patch.object(db, "con", proxy):
db.execute_void("select x from nosuchtable", fail_ok=True)
def test_execute_void_can_refuse_failure(self) -> None:
with SqliteDb("fail.db", DebugControlString(options=["sql"])) as db:
db.executescript(DB_INIT)
proxy = FailingProxy(db.con, "execute", [Exception("WUT")])
with mock.patch.object(db, "con", proxy):
msg = "Couldn't use data file 'fail.db': no such table: nosuchtable"
with pytest.raises(DataError, match=msg):
db.execute_void("select x from nosuchtable", fail_ok=False) |
6,237 | list workspace notebook access token | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListWorkspaceNotebookAccessTokenResult',
'AwaitableListWorkspaceNotebookAccessTokenResult',
'list_workspace_notebook_access_token',
'list_workspace_notebook_access_token_output',
]
@pulumi.output_type
class ListWorkspaceNotebookAccessTokenResult:
def __init__(__self__, access_token=None, expires_in=None, host_name=None, notebook_resource_id=None, public_dns=None, refresh_token=None, scope=None, token_type=None):
if access_token and not isinstance(access_token, str):
raise TypeError("Expected argument 'access_token' to be a str")
pulumi.set(__self__, "access_token", access_token)
if expires_in and not isinstance(expires_in, int):
raise TypeError("Expected argument 'expires_in' to be a int")
pulumi.set(__self__, "expires_in", expires_in)
if host_name and not isinstance(host_name, str):
raise TypeError("Expected argument 'host_name' to be a str")
pulumi.set(__self__, "host_name", host_name)
if notebook_resource_id and not isinstance(notebook_resource_id, str):
raise TypeError("Expected argument 'notebook_resource_id' to be a str")
pulumi.set(__self__, "notebook_resource_id", notebook_resource_id)
if public_dns and not isinstance(public_dns, str):
raise TypeError("Expected argument 'public_dns' to be a str")
pulumi.set(__self__, "public_dns", public_dns)
if refresh_token and not isinstance(refresh_token, str):
raise TypeError("Expected argument 'refresh_token' to be a str")
pulumi.set(__self__, "refresh_token", refresh_token)
if scope and not isinstance(scope, str):
raise TypeError("Expected argument 'scope' to be a str")
pulumi.set(__self__, "scope", scope)
if token_type and not isinstance(token_type, str):
raise TypeError("Expected argument 'token_type' to be a str")
pulumi.set(__self__, "token_type", token_type)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> str:
return pulumi.get(self, "access_token")
@property
@pulumi.getter(name="expiresIn")
def expires_in(self) -> int:
return pulumi.get(self, "expires_in")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> str:
return pulumi.get(self, "host_name")
@property
@pulumi.getter(name="notebookResourceId")
def notebook_resource_id(self) -> str:
return pulumi.get(self, "notebook_resource_id")
@property
@pulumi.getter(name="publicDns")
def public_dns(self) -> str:
return pulumi.get(self, "public_dns")
@property
@pulumi.getter(name="refreshToken")
def refresh_token(self) -> str:
return pulumi.get(self, "refresh_token")
@property
@pulumi.getter
def scope(self) -> str:
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="tokenType")
def token_type(self) -> str:
return pulumi.get(self, "token_type")
class AwaitableListWorkspaceNotebookAccessTokenResult(ListWorkspaceNotebookAccessTokenResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWorkspaceNotebookAccessTokenResult(
access_token=self.access_token,
expires_in=self.expires_in,
host_name=self.host_name,
notebook_resource_id=self.notebook_resource_id,
public_dns=self.public_dns,
refresh_token=self.refresh_token,
scope=self.scope,
token_type=self.token_type)
def METHOD_NAME(resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWorkspaceNotebookAccessTokenResult:
"""
return notebook access token and refresh token
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20230401preview:listWorkspaceNotebookAccessToken', __args__, opts=opts, typ=ListWorkspaceNotebookAccessTokenResult).value
return AwaitableListWorkspaceNotebookAccessTokenResult(
access_token=pulumi.get(__ret__, 'access_token'),
expires_in=pulumi.get(__ret__, 'expires_in'),
host_name=pulumi.get(__ret__, 'host_name'),
notebook_resource_id=pulumi.get(__ret__, 'notebook_resource_id'),
public_dns=pulumi.get(__ret__, 'public_dns'),
refresh_token=pulumi.get(__ret__, 'refresh_token'),
scope=pulumi.get(__ret__, 'scope'),
token_type=pulumi.get(__ret__, 'token_type'))
@_utilities.lift_output_func(METHOD_NAME)
def list_workspace_notebook_access_token_output(resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListWorkspaceNotebookAccessTokenResult]:
"""
return notebook access token and refresh token
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
... |
6,238 | get column attnums from tables | import warnings
from sqlalchemy import and_, asc, cast, select, text, exists, Identity
from db.columns.exceptions import DynamicDefaultWarning
from db.connection import execute_msar_func_with_engine
from db.tables.operations.select import reflect_table_from_oid
from db.utils import execute_statement, get_pg_catalog_table
def get_column_attnum_from_names_as_map(table_oid, column_names, engine, metadata, connection_to_use=None):
statement = _get_columns_attnum_from_names(table_oid, column_names, engine, metadata=metadata)
attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()
name_attnum_map = {attnum_tuple['attname']: attnum_tuple['attnum'] for attnum_tuple in attnums_tuple}
return name_attnum_map
def get_columns_attnum_from_names(table_oid, column_names, engine, metadata, connection_to_use=None):
"""
Returns the respective list of attnum of the column names passed.
The order is based on the column order in the table and not by the order of the column names argument.
"""
statement = _get_columns_attnum_from_names(table_oid, column_names, engine=engine, metadata=metadata)
attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()
attnums = [attnum_tuple[0] for attnum_tuple in attnums_tuple]
return attnums
def get_column_attnum_from_name(table_oid, column_name, engine, metadata, connection_to_use=None):
statement = _get_columns_attnum_from_names(table_oid, [column_name], engine=engine, metadata=metadata)
return execute_statement(engine, statement, connection_to_use).scalar()
def _get_columns_attnum_from_names(table_oid, column_names, engine, metadata):
pg_attribute = get_pg_catalog_table("pg_attribute", engine=engine, metadata=metadata)
sel = select(pg_attribute.c.attnum, pg_attribute.c.attname).where(
and_(
pg_attribute.c.attrelid == table_oid,
pg_attribute.c.attname.in_(column_names)
)
).order_by(asc(pg_attribute.c.attnum))
return sel
def METHOD_NAME(table_oids, engine, metadata, connection_to_use=None):
pg_attribute = get_pg_catalog_table("pg_attribute", engine, metadata=metadata)
sel = select(pg_attribute.c.attnum, pg_attribute.c.attrelid.label('table_oid')).where(
and_(
pg_attribute.c.attrelid.in_(table_oids),
# Ignore system columns
pg_attribute.c.attnum > 0,
# Ignore removed columns
pg_attribute.c.attisdropped.is_(False)
)
)
results = execute_statement(engine, sel, connection_to_use).fetchall()
return results
def get_map_of_attnum_and_table_oid_to_column_name(table_oids, engine, metadata, connection_to_use=None):
"""
Order determined by the column order in the table.
"""
triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(
table_oids, None, engine, metadata, connection_to_use
)
return {
(attnum, table_oid): column_name
for column_name, attnum, table_oid
in triples_of_col_info
}
def get_column_names_from_attnums(table_oid, attnums, engine, metadata, connection_to_use=None):
return list(get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use).values())
def get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use=None):
"""
Order determined by the column order in the table.
"""
triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(
[table_oid], attnums, engine, metadata, connection_to_use
)
return {
attnum: column_name
for column_name, attnum, _
in triples_of_col_info
}
def _get_triples_of_column_name_and_attnum_and_table_oid(
table_oids, attnums, engine, metadata, connection_to_use
):
statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(
table_oids, attnums, engine, metadata
)
return execute_statement(engine, statement, connection_to_use).fetchall()
def get_column_default(table_oid, attnum, engine, metadata, connection_to_use=None):
default_dict = get_column_default_dict(
table_oid,
attnum,
engine,
metadata=metadata,
connection_to_use=connection_to_use,
)
if default_dict is not None:
return default_dict['value']
def get_column_default_dict(table_oid, attnum, engine, metadata, connection_to_use=None):
column = get_column_from_oid_and_attnum(
table_oid=table_oid,
attnum=attnum,
engine=engine,
metadata=metadata,
connection_to_use=connection_to_use,
)
default = column.server_default
if default is None:
return
is_dynamic = execute_msar_func_with_engine(
engine, 'is_default_possibly_dynamic', table_oid, attnum
).fetchone()[0]
sql_text = str(default.arg) if not isinstance(default, Identity) else 'identity'
if is_dynamic:
warnings.warn(
"Dynamic column defaults are read only", DynamicDefaultWarning
)
default_value = sql_text
else:
# Defaults are often stored as text with SQL casts appended
# Ex: "'test default string'::character varying" or "'2020-01-01'::date"
# Here, we execute the cast to get the proper python value
default_value = execute_statement(
engine,
select(cast(text(sql_text), column.type)),
connection_to_use
).scalar()
return {"value": default_value, "is_dynamic": is_dynamic}
def determine_whether_column_contains_data(
table_oid, column_name, engine, metadata, connection_to_use=None
):
"""
Given a column, return True if it contains data, False otherwise.
"""
sa_table = reflect_table_from_oid(
table_oid, engine, metadata=metadata, connection_to_use=connection_to_use,
)
sel = select(exists(1).where(sa_table.columns[column_name] != None)) # noqa
contains_data = execute_statement(engine, sel, connection_to_use).scalar()
return contains_data
def get_column_from_oid_and_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):
sa_table = reflect_table_from_oid(table_oid, engine, metadata=metadata, connection_to_use=connection_to_use)
column_name = get_column_name_from_attnum(table_oid, attnum, engine, metadata=metadata, connection_to_use=connection_to_use)
sa_column = sa_table.columns[column_name]
return sa_column
def get_column_name_from_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):
statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(
[table_oid], [attnum], engine, metadata=metadata,
)
column_name = execute_statement(engine, statement, connection_to_use).scalar()
return column_name
def _statement_for_triples_of_column_name_and_attnum_and_table_oid(
table_oids, attnums, engine, metadata
):
"""
Returns (column name, column attnum, column table's oid) tuples for each column that's in the
tables specified via `table_oids`, and, when `attnums` is not None, that has an attnum
specified in `attnums`.
The order is based on the column order in the table and not on the order of the arguments.
"""
pg_attribute = get_pg_catalog_table("pg_attribute", engine, metadata=metadata)
sel = select(pg_attribute.c.attname, pg_attribute.c.attnum, pg_attribute.c.attrelid)
wasnt_dropped = pg_attribute.c.attisdropped.is_(False)
table_oid_matches = pg_attribute.c.attrelid.in_(table_oids)
conditions = [wasnt_dropped, table_oid_matches]
if attnums is not None:
attnum_matches = pg_attribute.c.attnum.in_(attnums)
conditions.append(attnum_matches)
else:
attnum_positive = pg_attribute.c.attnum > 0
conditions.append(attnum_positive)
sel = sel.where(and_(*conditions))
return sel |
6,239 | test directory radius server status failed | from unittest import mock
from prowler.providers.aws.services.directoryservice.directoryservice_service import (
AuthenticationProtocol,
Directory,
DirectoryType,
RadiusSettings,
RadiusStatus,
)
AWS_REGION = "eu-west-1"
AWS_ACCOUNT_NUMBER = "123456789012"
class Test_directoryservice_supported_mfa_radius_enabled:
def test_no_directories(self):
directoryservice_client = mock.MagicMock
directoryservice_client.directories = {}
with mock.patch(
"prowler.providers.aws.services.directoryservice.directoryservice_service.DirectoryService",
new=directoryservice_client,
):
# Test Check
from prowler.providers.aws.services.directoryservice.directoryservice_supported_mfa_radius_enabled.directoryservice_supported_mfa_radius_enabled import (
directoryservice_supported_mfa_radius_enabled,
)
check = directoryservice_supported_mfa_radius_enabled()
result = check.execute()
assert len(result) == 0
def test_directory_no_radius_server(self):
directoryservice_client = mock.MagicMock
directory_name = "test-directory"
directory_id = "d-12345a1b2"
directory_arn = (
f"arn:aws:ds:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:directory/d-12345a1b2"
)
directoryservice_client.directories = {
directory_name: Directory(
name=directory_name,
id=directory_id,
arn=directory_arn,
type=DirectoryType.MicrosoftAD,
region=AWS_REGION,
radius_settings=None,
)
}
with mock.patch(
"prowler.providers.aws.services.directoryservice.directoryservice_service.DirectoryService",
new=directoryservice_client,
):
# Test Check
from prowler.providers.aws.services.directoryservice.directoryservice_supported_mfa_radius_enabled.directoryservice_supported_mfa_radius_enabled import (
directoryservice_supported_mfa_radius_enabled,
)
check = directoryservice_supported_mfa_radius_enabled()
result = check.execute()
assert len(result) == 0
def METHOD_NAME(self):
directoryservice_client = mock.MagicMock
directory_name = "test-directory"
directory_id = "d-12345a1b2"
directory_arn = (
f"arn:aws:ds:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:directory/d-12345a1b2"
)
directoryservice_client.directories = {
directory_name: Directory(
name=directory_name,
id=directory_id,
arn=directory_arn,
type=DirectoryType.MicrosoftAD,
region=AWS_REGION,
radius_settings=RadiusSettings(
authentication_protocol=AuthenticationProtocol.MS_CHAPv1,
status=RadiusStatus.Failed,
),
)
}
with mock.patch(
"prowler.providers.aws.services.directoryservice.directoryservice_service.DirectoryService",
new=directoryservice_client,
):
# Test Check
from prowler.providers.aws.services.directoryservice.directoryservice_supported_mfa_radius_enabled.directoryservice_supported_mfa_radius_enabled import (
directoryservice_supported_mfa_radius_enabled,
)
check = directoryservice_supported_mfa_radius_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].resource_id == directory_id
assert result[0].resource_arn == directory_arn
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"Directory {directory_id} does not have Radius MFA enabled."
)
def test_directory_radius_server_status_creating(self):
directoryservice_client = mock.MagicMock
directory_name = "test-directory"
directory_id = "d-12345a1b2"
directory_arn = (
f"arn:aws:ds:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:directory/d-12345a1b2"
)
directoryservice_client.directories = {
directory_name: Directory(
name=directory_name,
id=directory_id,
arn=directory_arn,
type=DirectoryType.MicrosoftAD,
region=AWS_REGION,
radius_settings=RadiusSettings(
authentication_protocol=AuthenticationProtocol.MS_CHAPv2,
status=RadiusStatus.Creating,
),
)
}
with mock.patch(
"prowler.providers.aws.services.directoryservice.directoryservice_service.DirectoryService",
new=directoryservice_client,
):
# Test Check
from prowler.providers.aws.services.directoryservice.directoryservice_supported_mfa_radius_enabled.directoryservice_supported_mfa_radius_enabled import (
directoryservice_supported_mfa_radius_enabled,
)
check = directoryservice_supported_mfa_radius_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].resource_id == directory_id
assert result[0].resource_arn == directory_arn
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"Directory {directory_id} does not have Radius MFA enabled."
)
def test_directory_radius_server_status_completed(self):
directoryservice_client = mock.MagicMock
directory_name = "test-directory"
directory_id = "d-12345a1b2"
directory_arn = (
f"arn:aws:ds:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:directory/d-12345a1b2"
)
directoryservice_client.directories = {
directory_name: Directory(
name=directory_name,
id=directory_id,
arn=directory_arn,
type=DirectoryType.MicrosoftAD,
region=AWS_REGION,
radius_settings=RadiusSettings(
authentication_protocol=AuthenticationProtocol.MS_CHAPv2,
status=RadiusStatus.Completed,
),
)
}
with mock.patch(
"prowler.providers.aws.services.directoryservice.directoryservice_service.DirectoryService",
new=directoryservice_client,
):
# Test Check
from prowler.providers.aws.services.directoryservice.directoryservice_supported_mfa_radius_enabled.directoryservice_supported_mfa_radius_enabled import (
directoryservice_supported_mfa_radius_enabled,
)
check = directoryservice_supported_mfa_radius_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].resource_id == directory_id
assert result[0].resource_arn == directory_arn
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION
assert result[0].status == "PASS"
assert (
result[0].status_extended
== f"Directory {directory_id} have Radius MFA enabled."
) |
6,240 | get view | # Copyright 2020-2023 Capypara and the SkyTemple Contributors
#
# This file is part of SkyTemple.
#
# SkyTemple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SkyTemple is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SkyTemple. If not, see <https://www.gnu.org/licenses/>.
import os
from typing import TYPE_CHECKING, Optional
from gi.repository import Gtk
from skytemple.core.message_dialog import SkyTempleMessageDialog
from skytemple.core.module_controller import AbstractController
from skytemple.core.ui_utils import data_dir
from skytemple_files.common.util import create_file_in_rom
from skytemple_files.common.i18n_util import _
from skytemple.controller.main import MainController as SkyTempleMainController
from skytemple_files.common.types.file_types import FileType
if TYPE_CHECKING:
from skytemple.module.sprite.module import SpriteModule
OBJECT_SPRTIES = _('Object Sprites')
class ObjectMainController(AbstractController):
def __init__(self, module: 'SpriteModule', item_id: int):
self.module = module
self.builder: Gtk.Builder = None # type: ignore
def METHOD_NAME(self) -> Gtk.Widget:
self.builder = self._get_builder(__file__, 'object_main.glade')
self.builder.connect_signals(self)
return self.builder.get_object('box_list')
def on_btn_add_clicked(self, *args):
from skytemple.module.sprite.module import GROUND_DIR, WAN_FILE_EXT
response, name = self._show_generic_input(_('Sprite Name'), _('Create Object Sprite'))
if response != Gtk.ResponseType.OK:
return
name = name.lower()
if len(name) < 1 or len(name) > 10:
md = SkyTempleMessageDialog(
SkyTempleMainController.window(),
Gtk.DialogFlags.MODAL, Gtk.MessageType.ERROR,
Gtk.ButtonsType.OK,
_("The length of the sprite name must be between 1-10 characters.")
)
md.run()
md.destroy()
return
obj_name = f'{GROUND_DIR}/{name}.{WAN_FILE_EXT}'
if self.module.project.file_exists(obj_name):
md = SkyTempleMessageDialog(
SkyTempleMainController.window(),
Gtk.DialogFlags.MODAL, Gtk.MessageType.ERROR,
Gtk.ButtonsType.OK,
_("A sprite with this name already exists.")
)
md.run()
md.destroy()
return
with open(os.path.join(data_dir(), 'empty.wan'), 'rb') as f:
empty_wan = f.read()
# Write to ROM
self.module.project.create_file_manually(obj_name, empty_wan)
self.module.add_wan(f'{name}.{WAN_FILE_EXT}')
md = SkyTempleMessageDialog(
SkyTempleMainController.window(),
Gtk.DialogFlags.MODAL, Gtk.MessageType.INFO,
Gtk.ButtonsType.OK,
_("Object sprite added successfully"), is_success=True
)
md.run()
md.destroy()
def _show_generic_input(self, label_text, ok_text):
dialog: Gtk.Dialog = self.builder.get_object('generic_input_dialog')
entry: Gtk.Entry = self.builder.get_object('generic_input_dialog_entry')
label: Gtk.Label = self.builder.get_object('generic_input_dialog_label')
label.set_text(label_text)
btn_cancel = dialog.add_button(_("Cancel"), Gtk.ResponseType.CANCEL)
btn = dialog.add_button(ok_text, Gtk.ResponseType.OK)
btn.set_can_default(True)
btn.grab_default()
entry.set_activates_default(True)
dialog.set_attached_to(SkyTempleMainController.window())
dialog.set_transient_for(SkyTempleMainController.window())
response = dialog.run()
dialog.hide()
btn.get_parent().remove(btn)
btn_cancel.get_parent().remove(btn_cancel)
return response, entry.get_text() |
6,241 | reverse archive | import datetime
import json
from django.test import RequestFactory
from django.utils import timezone
from rest_framework import status
from rest_framework.reverse import reverse
from schedule.models import ScheduleEntry
from schedule.tests.utils import TEST_SCHEDULE_ENTRY, post_schedule
from scheduler.tests.utils import simulate_scheduler_run
from sensor import V1
from sensor.tests.utils import HTTPS_KWARG, validate_response
from tasks.models import TaskResult
TEST_MAX_DISK_USAGE = 10
ONE_MICROSECOND = datetime.timedelta(0, 0, 1)
EMPTY_RESULTS_RESPONSE = []
EMPTY_ACQUISITIONS_RESPONSE = []
SINGLE_FREQUENCY_FFT_ACQUISITION = {
"name": "test_acq",
"start": None,
"stop": None,
"interval": None,
"action": "test_single_frequency_m4s_action",
}
MULTIPLE_FREQUENCY_FFT_ACQUISITIONS = {
"name": "test_multiple_acq",
"start": None,
"relative_stop": 5,
"interval": 1,
"action": "test_single_frequency_m4s_action",
}
SINGLE_TIMEDOMAIN_IQ_MULTI_RECORDING_ACQUISITION = {
"name": "test_multirec_acq",
"start": None,
"stop": None,
"interval": None,
"action": "test_multi_frequency_iq_action",
}
SINGLE_TIMEDOMAIN_IQ_ACQUISITION = {
"name": "test_time_domain_iq_acquire",
"start": None,
"stop": None,
"interval": None,
"action": "test_single_frequency_iq_action",
}
def simulate_acquisitions(client, schedule_entry, n=1, name=None):
assert 0 < n <= 10
if n > 1:
schedule_entry["relative_stop"] = n
if name is not None:
schedule_entry["name"] = name
entry = post_schedule(client, schedule_entry)
simulate_scheduler_run(n)
return entry["name"]
def simulate_frequency_fft_acquisitions(client, n=1, name=None):
if n == 1:
schedule_entry = SINGLE_FREQUENCY_FFT_ACQUISITION.copy()
else:
schedule_entry = MULTIPLE_FREQUENCY_FFT_ACQUISITIONS.copy()
return simulate_acquisitions(client, schedule_entry, n, name)
def simulate_multirec_acquisition(client, name=None):
schedule_entry = SINGLE_TIMEDOMAIN_IQ_MULTI_RECORDING_ACQUISITION.copy()
return simulate_acquisitions(client, schedule_entry, n=1, name=name)
def simulate_timedomain_iq_acquisition(client, name=None):
schedule_entry = SINGLE_TIMEDOMAIN_IQ_ACQUISITION.copy()
return simulate_acquisitions(client, schedule_entry, n=1, name=name)
def create_task_results(n, admin_client, entry_name=None):
# We need an entry in the schedule to create TRs for
try:
entry = ScheduleEntry.objects.get(name=entry_name)
except Exception:
test_entry = TEST_SCHEDULE_ENTRY
if entry_name is not None:
test_entry["name"] = entry_name
rjson = post_schedule(admin_client, test_entry)
entry_name = rjson["name"]
entry = ScheduleEntry.objects.get(name=entry_name)
for i in range(n):
started = timezone.now()
tr = TaskResult(
schedule_entry=entry,
task_id=i + 1,
started=started,
finished=started + ONE_MICROSECOND,
duration=ONE_MICROSECOND,
status="success",
detail="",
)
tr.max_disk_usage = TEST_MAX_DISK_USAGE
tr.save()
return entry_name
def reverse_results_overview():
rf = RequestFactory()
request = rf.get("/tasks/completed/", **HTTPS_KWARG)
return reverse("task-results-overview", kwargs=V1, request=request)
def reverse_result_list(schedule_entry_name):
rf = RequestFactory()
request = rf.get("/tasks/completed/" + schedule_entry_name, **HTTPS_KWARG)
kws = {"schedule_entry_name": schedule_entry_name}
kws.update(V1)
return reverse("task-result-list", kwargs=kws, request=request)
def reverse_result_detail(schedule_entry_name, task_id):
rf = RequestFactory()
url = "/tasks/completed/" + schedule_entry_name + "/" + str(task_id)
request = rf.get(url, **HTTPS_KWARG)
kws = {"schedule_entry_name": schedule_entry_name, "task_id": task_id}
kws.update(V1)
return reverse("task-result-detail", kwargs=kws, request=request)
def METHOD_NAME(schedule_entry_name, task_id):
rf = RequestFactory()
entry_name = schedule_entry_name
url = f"/tasks/completed/{entry_name}/{task_id!s}/archive"
request = rf.get(url, **HTTPS_KWARG)
kws = {"schedule_entry_name": entry_name, "task_id": task_id}
kws.update(V1)
return reverse("task-result-archive", kwargs=kws, request=request)
def reverse_archive_all(schedule_entry_name):
rf = RequestFactory()
entry_name = schedule_entry_name
url = f"/tasks/completed/{entry_name}/archive"
request = rf.get(url, **HTTPS_KWARG)
kws = {"schedule_entry_name": entry_name}
kws.update(V1)
return reverse("task-result-list-archive", kwargs=kws, request=request)
def get_results_overview(client):
url = reverse_results_overview()
response = client.get(url, **HTTPS_KWARG)
rjson = validate_response(response, status.HTTP_200_OK)
return rjson["results"]
def get_result_list(client, schedule_entry_name):
url = reverse_result_list(schedule_entry_name)
response = client.get(url, **HTTPS_KWARG)
rjson = validate_response(response, status.HTTP_200_OK)
return rjson["results"]
def get_result_detail(client, schedule_entry_name, task_id):
url = reverse_result_detail(schedule_entry_name, task_id)
response = client.get(url, **HTTPS_KWARG)
return validate_response(response, status.HTTP_200_OK)
def update_result_detail(client, schedule_entry_name, task_id, new_acquisition):
url = reverse_result_detail(schedule_entry_name, task_id)
kwargs = {
"data": json.dumps(new_acquisition),
"content_type": "application/json",
"wsgi.url_scheme": "https",
}
return client.put(url, **kwargs) |
6,242 | register collection blocks | import json
from pathlib import Path
import sqlalchemy as sa
import prefect
from prefect.logging import get_logger
from prefect.server import models, schemas
logger = get_logger("server")
COLLECTIONS_BLOCKS_DATA_PATH = (
Path(__file__).parent.parent / "collection_blocks_data.json"
)
async def _install_protected_system_blocks(session):
"""Install block types that the system expects to be present"""
for block in [
prefect.blocks.webhook.Webhook,
prefect.blocks.system.JSON,
prefect.blocks.system.DateTime,
prefect.blocks.system.Secret,
prefect.filesystems.LocalFileSystem,
prefect.infrastructure.Process,
]:
async with session.begin():
block_type = block._to_block_type()
block_type.is_protected = True
block_type = await models.block_types.create_block_type(
session=session, block_type=block_type, override=True
)
await models.block_schemas.create_block_schema(
session=session,
block_schema=block._to_block_schema(block_type_id=block_type.id),
override=True,
)
async def register_block_schema(
session: sa.orm.Session,
block_schema: schemas.core.BlockSchema,
):
"""
Stores the provided block schema in the Prefect REST API database.
If a block schema with a matching checksum and version is already saved,
then the ID of the existing block schema will be returned.
Args:
session: A database session.
block_schema: A block schema object.
Returns:
The ID of the registered block schema.
"""
from prefect.server.models.block_schemas import (
create_block_schema,
read_block_schema_by_checksum,
)
existing_block_schema = await read_block_schema_by_checksum(
session=session, checksum=block_schema.checksum, version=block_schema.version
)
if existing_block_schema is None:
block_schema = await create_block_schema(
session=session,
block_schema=block_schema,
)
return block_schema.id
else:
return existing_block_schema.id
async def register_block_type(
session: sa.orm.Session,
block_type: schemas.core.BlockType,
):
"""
Stores the provided block type in the Prefect REST API database.
If a block type with a matching slug is already saved, then the block type
will be updated to match the passed in block type.
Args:
session: A database session.
block_type: A block type object.
Returns:
The ID of the registered block type.
"""
from prefect.server.models.block_types import (
create_block_type,
read_block_type_by_slug,
update_block_type,
)
existing_block_type = await read_block_type_by_slug(
session=session,
block_type_slug=block_type.slug,
)
if existing_block_type is None:
block_type = await create_block_type(
session=session,
block_type=block_type,
)
return block_type.id
else:
await update_block_type(
session=session,
block_type_id=existing_block_type.id,
block_type=block_type,
)
return existing_block_type.id
async def _load_collection_blocks_data():
"""Loads blocks data for whitelisted collections."""
import anyio
async with await anyio.open_file(COLLECTIONS_BLOCKS_DATA_PATH, "r") as f:
return json.loads(await f.read())
async def _register_registry_blocks(session: sa.orm.Session):
"""Registers block from the client block registry."""
from prefect.blocks.core import Block
from prefect.utilities.dispatch import get_registry_for_type
block_registry = get_registry_for_type(Block) or {}
for block_class in block_registry.values():
# each block schema gets its own transaction
async with session.begin():
block_type_id = await register_block_type(
session=session,
block_type=block_class._to_block_type(),
)
await register_block_schema(
session=session,
block_schema=block_class._to_block_schema(block_type_id=block_type_id),
)
async def METHOD_NAME(session: sa.orm.Session):
"""Registers blocks from whitelisted collections."""
collections_blocks_data = await _load_collection_blocks_data()
block_types = [
block_type
for collection in collections_blocks_data["collections"].values()
for block_type in collection["block_types"].values()
]
for block_type in block_types:
# each block schema gets its own transaction
async with session.begin():
block_schemas = block_type.pop("block_schemas", [])
block_type_id = await register_block_type(
session=session,
block_type=schemas.core.BlockType.parse_obj(block_type),
)
for block_schema in block_schemas:
await register_block_schema(
session=session,
block_schema=schemas.core.BlockSchema.parse_obj(
{**block_schema, "block_type_id": block_type_id}
),
)
async def run_block_auto_registration(session: sa.orm.Session):
"""
Registers all blocks in the client block registry and any blocks from Prefect
Collections that are configured for auto-registration.
Args:
session: A database session.
"""
await _install_protected_system_blocks(session)
await _register_registry_blocks(session)
await METHOD_NAME(session=session) |
6,243 | if enum group | from __future__ import annotations
from enum import Enum
from typing import Iterable, List, Literal, Tuple, TypedDict, Union
import navi
from base_types import InputId
from nodes.base_input import BaseInput
from nodes.group import NestedGroup, group
InputValue = Union[int, str]
EnumValues = Union[
InputValue,
Enum,
Iterable[str],
Iterable[int],
Iterable[Enum],
]
RawEnumValues = Union[
InputValue, List[str], List[int], Tuple[str, ...], Tuple[int, ...]
]
_ConditionJson = Union[
"_AndConditionJson",
"_OrConditionJson",
"_NotConditionJson",
"_EnumConditionJson",
"_TypeConditionJson",
]
class _AndConditionJson(TypedDict):
kind: Literal["and"]
items: List[_ConditionJson]
class _OrConditionJson(TypedDict):
kind: Literal["or"]
items: List[_ConditionJson]
class _NotConditionJson(TypedDict):
kind: Literal["not"]
condition: _ConditionJson
class _EnumConditionJson(TypedDict):
kind: Literal["enum"]
enum: InputId
values: List[str | int]
class _TypeConditionJson(TypedDict):
kind: Literal["type"]
input: InputId
condition: navi.ExpressionJson
ifNotConnected: bool
class Condition:
def __init__(self, value: _ConditionJson) -> None:
self._value: _ConditionJson = value
def to_json(self):
return self._value
def __and__(self, other: Condition) -> Condition:
return Condition({"kind": "and", "items": [self._value, other._value]})
def __or__(self, other: Condition) -> Condition:
return Condition({"kind": "or", "items": [self._value, other._value]})
def __invert__(self) -> Condition:
return Condition({"kind": "not", "condition": self._value})
@staticmethod
def enum(enum: int, values: EnumValues) -> Condition:
"""
A condition to check whether a certain dropdown/enum input has a certain value.
"""
v: List[str | int] = []
def convert(value: int | str | Enum):
if isinstance(value, (int, str)):
v.append(value)
else:
enum_value = value.value
assert isinstance(enum_value, (int, str))
v.append(enum_value)
if isinstance(values, (int, str, Enum)):
convert(values)
else:
for value in values:
convert(value)
return Condition(
{
"kind": "enum",
"enum": InputId(enum),
"values": v,
}
)
@staticmethod
def bool(input_id: int, value: bool) -> Condition:
"""
A condition to check whether a certain bool input has a certain value.
"""
return Condition(
{
"kind": "enum",
"enum": InputId(input_id),
"values": [int(value)],
}
)
@staticmethod
def type(
input_id: int,
condition: navi.ExpressionJson,
if_not_connected: bool = False,
) -> Condition:
"""
A condition to check whether a certain input is compatible a certain type.
Here "compatible" is defined as overlapping.
"""
return Condition(
{
"kind": "type",
"input": InputId(input_id),
"condition": condition,
"ifNotConnected": if_not_connected,
}
)
@staticmethod
def const(value: bool) -> Condition:
if value:
return Condition({"kind": "and", "items": []})
return Condition({"kind": "or", "items": []})
def if_group(condition: Condition):
return group("conditional", {"condition": condition.to_json()})
def METHOD_NAME(enum: int, condition: EnumValues):
return if_group(Condition.enum(enum, condition))
def required(condition: Condition | None = None):
"""
Given generic inputs (meaning of kind "generic") that are optional, this group marks them as
being required under the given condition. If no condition is given, `True` will be used.
In addition to the given condition, if the require group is nested within conditional group
(`if_group` and derivatives), then the conditions of all ancestor conditional groups must also
be met.
Note that this group only guarantees **best effort**. It cannot guarantee that the optional
input is going to have a value if the condition is met. You must always check `None`.
Example:
```py
if_group(someCondition)(
required()(
GenericInput("Foo").make_optional(),
)
)
```
In this example, the input "Foo" is required if and only if the input is visible (by virtue of
the parent conditional group).
"""
if condition is None:
condition = Condition.const(True)
return group("required", {"condition": condition.to_json()})
def seed_group(seed_input: BaseInput):
"""
This groups is a wrapper around the `SeedInput`. It changes its visual appearance and adds a
little button for users to click on to generate a new seed.
All `SeedInput`s must be wrapped in this group.
Example:
```py
seed_group(SeedInput())
```
"""
return group("seed")(seed_input)
def optional_list_group(*inputs: BaseInput | NestedGroup):
"""
This groups wraps around optional inputs and displays them as a list.
This can be used to create nodes that have a variable number of inputs. The user will initially
see no inputs, but can add as many inputs as the group contains. While not true varargs, this
can be used to create a similar effect.
See the Text Append node for an example.
"""
return group("optional-list")(*inputs)
def linked_inputs_group(*inputs: BaseInput):
"""
This group wraps around inputs of the same type. It ensures that all inputs have the same
value.
"The same type" here not only refers to the Navi type of those inputs. All possible values
from all inputs must also be valid values for all other inputs. This typically necessitates
that the inputs are of the same class and use the same parameters.
"""
return group("linked-inputs")(*inputs) |
6,244 | step update | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import List
import torch.optim.lr_scheduler
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class ReduceLROnPlateauLRScheduleConfig(FairseqDataclass):
lr_shrink: float = field(
default=0.1, metadata={"help": "shrink factor for annealing"}
)
lr_threshold: float = field(
default=1e-4,
metadata={
"help": (
"threshold for measuring the new optimum, to only focus on "
"significant changes"
)
},
)
lr_patience: int = field(
default=0,
metadata={
"help": (
"number of epochs with no improvement after which learning rate will "
"be reduced"
)
},
)
warmup_updates: int = field(
default=0,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
warmup_init_lr: float = field(
default=-1,
metadata={
"help": "initial learning rate during warmup phase; default is cfg.lr"
},
)
lr: List[float] = II("optimization.lr")
maximize_best_checkpoint_metric: bool = II(
"checkpoint.maximize_best_checkpoint_metric"
)
@register_lr_scheduler(
"reduce_lr_on_plateau", dataclass=ReduceLROnPlateauLRScheduleConfig
)
class ReduceLROnPlateauLRSchedule(FairseqLRScheduler):
"""
Decay the LR by a factor every time the validation loss plateaus.
Also comes with optional warmup phase, where we linearly increase
the learning rate from some initial learning rate
(``--warmup-init-lr``) until the configured learning rate
(``--lr``). Thereafter the lr is adjusted according to original
reduce_on_plateau scheme.
During warmup::
lrs = torch.linspace(
cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates
)
lr = lrs[update_num]
"""
def __init__(self, cfg: ReduceLROnPlateauLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
if len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with reduce_lr_on_plateau."
" Consider --lr-scheduler=fixed instead."
)
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer.optimizer,
patience=cfg.lr_patience,
factor=cfg.lr_shrink,
mode="max" if cfg.maximize_best_checkpoint_metric else "min",
threshold=cfg.lr_threshold,
)
warmup_end_lr = cfg.lr[0]
# if no warm up, sets initial lr to be cfg.lr[0]
if cfg.warmup_init_lr < 0:
cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr
# linearly warmup for the first cfg.warmup_updates
if cfg.warmup_updates > 0:
self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates
# this flag is either set from arg when no warm up, or set by
# step_update() when warmup finishes
self.warmup_end = True if cfg.warmup_updates <= 0 else False
# initial learning rate
# this self.lr is used only during init and/or warm up period
self.lr = warmup_end_lr if self.warmup_end else cfg.warmup_init_lr
self.optimizer.set_lr(self.lr)
def state_dict(self):
"""Return the LR scheduler state dict."""
return {
"best": self.lr_scheduler.best,
"last_epoch": self.lr_scheduler.last_epoch,
}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.lr_scheduler.best = state_dict["best"]
if "last_epoch" in state_dict:
self.lr_scheduler.last_epoch = state_dict["last_epoch"]
def step(self, epoch, val_loss=None):
"""
Update the learning rate at the end of the given epoch if warmup
finishes otherwise no update of lr on epoch boundaries
"""
if val_loss is not None and self.warmup_end is True:
self.lr_scheduler.step(val_loss)
else:
self.lr_scheduler.last_epoch = epoch
return self.optimizer.get_lr()
def METHOD_NAME(self, num_updates):
"""
Update the learning rate after each update."""
# if there is warmup
if self.cfg.warmup_updates > 0:
if num_updates <= self.cfg.warmup_updates:
self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step
self.optimizer.set_lr(self.lr)
else:
if self.warmup_end is False:
self.warmup_end = True
# else do nothing
return self.optimizer.get_lr() |
6,245 | region | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetRegionBackendServiceIamPolicyResult',
'AwaitableGetRegionBackendServiceIamPolicyResult',
'get_region_backend_service_iam_policy',
'get_region_backend_service_iam_policy_output',
]
@pulumi.output_type
class GetRegionBackendServiceIamPolicyResult:
"""
A collection of values returned by getRegionBackendServiceIamPolicy.
"""
def __init__(__self__, etag=None, id=None, name=None, policy_data=None, project=None, METHOD_NAME=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if policy_data and not isinstance(policy_data, str):
raise TypeError("Expected argument 'policy_data' to be a str")
pulumi.set(__self__, "policy_data", policy_data)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", METHOD_NAME)
@property
@pulumi.getter
def etag(self) -> str:
"""
(Computed) The etag of the IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> str:
"""
(Required only by `compute.RegionBackendServiceIamPolicy`) The policy data generated by
a `organizations_get_iam_policy` data source.
"""
return pulumi.get(self, "policy_data")
@property
@pulumi.getter
def project(self) -> str:
return pulumi.get(self, "project")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
return pulumi.get(self, "region")
class AwaitableGetRegionBackendServiceIamPolicyResult(GetRegionBackendServiceIamPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegionBackendServiceIamPolicyResult(
etag=self.etag,
id=self.id,
name=self.name,
policy_data=self.policy_data,
project=self.project,
METHOD_NAME=self.METHOD_NAME)
def get_region_backend_service_iam_policy(name: Optional[str] = None,
project: Optional[str] = None,
METHOD_NAME: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegionBackendServiceIamPolicyResult:
"""
Use this data source to access information about an existing resource.
:param str name: Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
:param str region: The Region in which the created backend service should reside.
If it is not provided, the provider region is used.
Used to find the parent resource to bind the IAM policy to. If not specified,
the value will be parsed from the identifier of the parent resource. If no region is provided in the parent identifier and no
region is specified, it is taken from the provider configuration.
"""
__args__ = dict()
__args__['name'] = name
__args__['project'] = project
__args__['region'] = METHOD_NAME
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('gcp:compute/getRegionBackendServiceIamPolicy:getRegionBackendServiceIamPolicy', __args__, opts=opts, typ=GetRegionBackendServiceIamPolicyResult).value
return AwaitableGetRegionBackendServiceIamPolicyResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
policy_data=pulumi.get(__ret__, 'policy_data'),
project=pulumi.get(__ret__, 'project'),
METHOD_NAME=pulumi.get(__ret__, 'region'))
@_utilities.lift_output_func(get_region_backend_service_iam_policy)
def get_region_backend_service_iam_policy_output(name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
METHOD_NAME: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRegionBackendServiceIamPolicyResult]:
"""
Use this data source to access information about an existing resource.
:param str name: Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
:param str region: The Region in which the created backend service should reside.
If it is not provided, the provider region is used.
Used to find the parent resource to bind the IAM policy to. If not specified,
the value will be parsed from the identifier of the parent resource. If no region is provided in the parent identifier and no
region is specified, it is taken from the provider configuration.
"""
... |
6,246 | cache das filenames | #!/usr/bin/env python3
from __future__ import print_function
#Hack to get ROOT to ignore command line arguments that we want
#to pass to Python
def import_ROOT():
import sys
tmpargv = sys.argv
sys.argv = ['-b', '-n']
import ROOT
sys.argv[:] = tmpargv[:]
return ROOT
import yaml
import subprocess
import logging
import json
import argparse
import glob
import multiprocessing
import optparse
import shlex
import os
LOG_MODULE_NAME = logging.getLogger(__name__)
class Dataset:
"""Datatype that represents a DAS dataset
Attributes:
global_file_prefix (string): The ROOT TFile prefix that allows to open an LFN (/store/...)
name (string): The DAS name of the dataset
process (string): The nickname for the physics process that this dataset belongs to
"""
def __init__(self, name, process, global_file_prefix, cache_location, use_cache, tmpdir):
"""Summary
Args:
name (string): The DAS name of the dataset
process (string): The nickname for the physics process that this dataset belongs to
global_file_prefix (string): The ROOT TFile prefix that allows to open an LFN (/store/...)
cache_location (string): The location of the local file cache
use_cache (boolean): If true, access files from cache_location instead of global_file_prefix in jobs
"""
self.name = name
self.process = process
self.global_file_prefix = global_file_prefix
self.cache_location = cache_location
self.use_cache = use_cache
self.tmpdir = tmpdir
self.files = None
self.max_files = None
def __repr__(self):
"""
Returns:
string: The string representation of the Dataset
"""
s = "Dataset(name={0})".format(self.name)
return s
def escape_name(self):
"""Removes any slashes and other characters from the name such that it can be used as a filename
Returns:
string: The DAS name usable as a filename
"""
name = self.name.replace("/", "__")
if name.startswith("__"):
name = name[2:]
return name
def get_das_cache_filename(self):
"""Summary
Returns:
TYPE: Description
"""
return os.path.join(self.tmpdir, "das_cache", self.process + ".txt")
#return os.path.join(self.tmpdir, "das_cache", self.process + ".txt", self.escape_name() + ".txt")
def get_filenames(self):
"""Summary
Args:
njob (TYPE): Description
Returns:
TYPE: Description
"""
ret = None
with open(self.get_das_cache_filename(), "r") as fi:
ret = [self.global_file_prefix + li.strip() for li in fi.readlines()]
return ret
def METHOD_NAME(self):
"""Summary
Returns:
TYPE: Description
"""
LOG_MODULE_NAME.info("caching dataset {0}".format(self.name))
ret = subprocess.check_output('dasgoclient --query="file dataset={0}" --limit=0'.format(self.name), shell=True)
target_dir = os.path.dirname(self.get_das_cache_filename())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
nfiles = 0
with open(self.get_das_cache_filename(), "w") as fi:
for line in ret.decode().split("\n"):
if line.endswith(".root"):
fi.write(self.global_file_prefix + line + "\n")
nfiles += 1
LOG_MODULE_NAME.info("retrieved {0} files from DAS".format(nfiles))
return
if __name__ == "__main__":
#prefix = ""
prefix = "root://cmsxrootd.fnal.gov//"
#prefix = "root://xrootd-cms.infn.it//"
tmpdir = "tmp"
datasets = [
Dataset("/RelValQCD_FlatPt_15_3000HS_14/CMSSW_12_1_0_pre2-121X_mcRun3_2021_realistic_v1-v1/GEN-SIM-DIGI-RAW", "QCD_noPU", prefix, None, False, tmpdir),
Dataset("/RelValQCD_FlatPt_15_3000HS_14/CMSSW_12_1_0_pre2-PU_121X_mcRun3_2021_realistic_v1-v1/GEN-SIM-DIGI-RAW", "QCD_PU", prefix, None, False, tmpdir),
Dataset("/RelValZEE_14/CMSSW_12_1_0_pre2-PU_121X_mcRun3_2021_realistic_v1-v1/GEN-SIM-DIGI-RAW", "ZEE_PU", prefix, None, False, tmpdir),
Dataset("/RelValZMM_14/CMSSW_12_1_0_pre2-PU_121X_mcRun3_2021_realistic_v1-v1/GEN-SIM-DIGI-RAW", "ZMM_PU", prefix, None, False, tmpdir),
Dataset("/RelValTenTau_15_500/CMSSW_12_1_0_pre2-PU_121X_mcRun3_2021_realistic_v1-v1/GEN-SIM-DIGI-RAW", "TenTau_PU", prefix, None, False, tmpdir),
Dataset("/RelValNuGun/CMSSW_12_1_0_pre2-PU_121X_mcRun3_2021_realistic_v1-v1/GEN-SIM-DIGI-RAW", "NuGun_PU", prefix, None, False, tmpdir)]
for ds in datasets:
ds.METHOD_NAME()
|
6,247 | test outline | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Tests map creation.
"""
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests # isort:skip
import cartopy.crs as ccrs
import numpy as np
import numpy.testing as np_testing
import iris
import iris.coord_systems
import iris.cube
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
# A specific cartopy Globe matching the iris RotatedGeogCS default.
_DEFAULT_GLOBE = ccrs.Globe(
semimajor_axis=6371229.0, semiminor_axis=6371229.0, ellipse=None
)
@tests.skip_plot
@tests.skip_data
class TestBasic(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.cube = iris.tests.stock.realistic_4d()
def test_contourf(self):
cube = self.cube[0, 0]
iplt.contourf(cube)
self.check_graphic()
def test_pcolor(self):
cube = self.cube[0, 0]
iplt.pcolor(cube)
self.check_graphic()
def test_unmappable(self):
cube = self.cube[0, 0]
cube.coord("grid_longitude").standard_name = None
iplt.contourf(cube)
self.check_graphic()
def test_default_projection_and_extent(self):
self.assertEqual(
iplt.default_projection(self.cube),
ccrs.RotatedPole(357.5 - 180, 37.5, globe=_DEFAULT_GLOBE),
)
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(self.cube),
(3.59579163e02, 3.59669159e02, -1.28250003e-01, -3.82499993e-02),
decimal=3,
)
@tests.skip_data
@tests.skip_plot
class TestUnmappable(tests.GraphicsTest):
def setUp(self):
super().setUp()
src_cube = iris.tests.stock.global_pp()
# Make a cube that can't be located on the globe.
cube = iris.cube.Cube(src_cube.data)
cube.add_dim_coord(
iris.coords.DimCoord(
np.arange(96, dtype=np.float32) * 100, long_name="x", units="m"
),
1,
)
cube.add_dim_coord(
iris.coords.DimCoord(
np.arange(73, dtype=np.float32) * 100, long_name="y", units="m"
),
0,
)
cube.standard_name = "air_temperature"
cube.units = "K"
self.cube = cube
def test_simple(self):
iplt.contourf(self.cube, coords=["y", "x"])
self.check_graphic()
@tests.skip_data
@tests.skip_plot
class TestMappingSubRegion(tests.GraphicsTest):
def setUp(self):
super().setUp()
cube_path = tests.get_data_path(
("PP", "aPProt1", "rotatedMHtimecube.pp")
)
cube = iris.load_cube(cube_path)[0]
# make the data smaller to speed things up.
self.cube = cube[::10, ::10]
def test_simple(self):
# First sub-plot
plt.subplot(221)
plt.title("Default")
iplt.contourf(self.cube)
plt.gca().coastlines("110m")
# Second sub-plot
plt.subplot(222, projection=ccrs.Mollweide(central_longitude=120))
plt.title("Molleweide")
iplt.contourf(self.cube)
plt.gca().coastlines("110m")
# Third sub-plot (the projection part is redundant, but a useful
# test none-the-less)
ax = plt.subplot(223, projection=iplt.default_projection(self.cube))
plt.title("Native")
iplt.contour(self.cube)
ax.coastlines("110m")
# Fourth sub-plot
ax = plt.subplot(2, 2, 4, projection=ccrs.PlateCarree())
plt.title("PlateCarree")
iplt.contourf(self.cube)
ax.coastlines("110m")
self.check_graphic()
def test_default_projection_and_extent(self):
self.assertEqual(
iplt.default_projection(self.cube),
ccrs.RotatedPole(357.5 - 180, 37.5, globe=_DEFAULT_GLOBE),
)
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(self.cube),
(313.01998901, 391.11999512, -22.48999977, 24.80999947),
)
@tests.skip_data
@tests.skip_plot
class TestLowLevel(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.cube = iris.tests.stock.global_pp()
self.few = 4
self.few_levels = list(range(280, 300, 5))
self.many_levels = np.linspace(
self.cube.data.min(), self.cube.data.max(), 40
)
def test_simple(self):
iplt.contour(self.cube)
self.check_graphic()
def test_params(self):
iplt.contourf(self.cube, self.few)
self.check_graphic()
iplt.contourf(self.cube, self.few_levels)
self.check_graphic()
iplt.contourf(self.cube, self.many_levels)
self.check_graphic()
def test_keywords(self):
iplt.contourf(self.cube, levels=self.few_levels)
self.check_graphic()
iplt.contourf(self.cube, levels=self.many_levels, alpha=0.5)
self.check_graphic()
@tests.skip_data
@tests.skip_plot
class TestBoundedCube(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.cube = iris.tests.stock.global_pp()
# Add some bounds to this data (this will actually make the bounds
# invalid as they will straddle the north pole and overlap on the
# dateline, but that doesn't matter for this test.)
self.cube.coord("latitude").guess_bounds()
self.cube.coord("longitude").guess_bounds()
def test_pcolormesh(self):
# pcolormesh can only be drawn in native coordinates (or more
# specifically, in coordinates that don't wrap).
plt.axes(projection=ccrs.PlateCarree(central_longitude=180))
iplt.pcolormesh(self.cube)
self.check_graphic()
def test_grid(self):
iplt.outline(self.cube)
self.check_graphic()
def test_default_projection_and_extent(self):
self.assertEqual(
iplt.default_projection(self.cube),
ccrs.PlateCarree(
globe=self.cube.coord_system("CoordSystem").as_cartopy_globe()
),
)
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(self.cube),
[0.0, 360.0, -89.99995422, 89.99998474],
)
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(
self.cube, mode=iris.coords.BOUND_MODE
),
[-1.875046, 358.124954, -90, 90],
)
@tests.skip_data
@tests.skip_plot
class TestLimitedAreaCube(tests.GraphicsTest):
def setUp(self):
super().setUp()
cube_path = tests.get_data_path(("PP", "aPProt1", "rotated.pp"))
self.cube = iris.load_cube(cube_path)[::20, ::20]
self.cube.coord("grid_latitude").guess_bounds()
self.cube.coord("grid_longitude").guess_bounds()
def test_pcolormesh(self):
iplt.pcolormesh(self.cube)
self.check_graphic()
def METHOD_NAME(self):
iplt.outline(self.cube)
self.check_graphic()
def test_scatter(self):
iplt.points(self.cube)
plt.gca().coastlines("110m")
self.check_graphic()
if __name__ == "__main__":
tests.main() |
6,248 | test address missing mask | """Test IPAM forms."""
from unittest import skip
from django.test import TestCase
from nautobot.extras.models import Status
from nautobot.ipam import forms, models
from nautobot.ipam.choices import IPAddressTypeChoices
from nautobot.ipam.models import IPAddress, Namespace, Prefix
class BaseNetworkFormTest:
form_class = None
field_name = None
object_name = None
extra_data = {}
def setUp(self):
super().setUp()
self.namespace = Namespace.objects.create(name="IPAM Form Test")
self.status = Status.objects.get(name="Active")
self.prefix_status = Status.objects.get_for_model(Prefix).first()
self.ip_status = Status.objects.get_for_model(IPAddress).first()
self.parent = Prefix.objects.create(
prefix="192.168.1.0/24", namespace=self.namespace, status=self.prefix_status
)
self.parent2 = Prefix.objects.create(
prefix="192.168.0.0/24", namespace=self.namespace, status=self.prefix_status
)
self.parent6 = Prefix.objects.create(
prefix="2001:0db8::/40", namespace=self.namespace, status=self.prefix_status
)
def test_valid_ip_address(self):
data = {self.field_name: "192.168.1.0/24", "namespace": self.namespace, "status": self.status}
data.update(self.extra_data)
form = self.form_class(data)
self.assertTrue(form.is_valid())
self.assertTrue(form.save())
def test_address_invalid_ipv4(self):
data = {self.field_name: "192.168.0.1/64", "namespace": self.namespace, "status": self.status}
data.update(self.extra_data)
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual("Please specify a valid IPv4 or IPv6 address.", form.errors[self.field_name][0])
def test_address_zero_mask(self):
data = {self.field_name: "192.168.0.1/0", "namespace": self.namespace, "status": self.status}
data.update(self.extra_data)
form = self.form_class(data)
# With the advent of `Prefix.parent`, it's now possible to create a /0 .
self.assertTrue(form.is_valid())
def METHOD_NAME(self):
data = {self.field_name: "192.168.0.1", "namespace": self.namespace, "status": self.status}
data.update(self.extra_data)
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual("CIDR mask (e.g. /24) is required.", form.errors[self.field_name][0])
@skip("Needs to be updated for Namespaces")
class PrefixFormTest(BaseNetworkFormTest, TestCase):
form_class = forms.PrefixForm
field_name = "prefix"
object_name = "prefix"
def setUp(self):
super().setUp()
self.extra_data = {
"namespace": self.namespace,
"status": self.prefix_status,
"type": "network",
"rir": models.RIR.objects.first(),
}
class IPAddressFormTest(BaseNetworkFormTest, TestCase):
form_class = forms.IPAddressForm
field_name = "address"
object_name = "IP address"
def setUp(self):
super().setUp()
self.extra_data = {
"namespace": self.namespace,
"status": self.ip_status,
"type": IPAddressTypeChoices.TYPE_HOST,
}
def test_slaac_valid_ipv6(self):
data = self.extra_data
data.update(
{
self.field_name: "2001:0db8:0000:0000:0000:ff00:0042:8329/128",
"type": IPAddressTypeChoices.TYPE_SLAAC,
}
)
form = self.form_class(data=data)
self.assertTrue(form.is_valid())
self.assertTrue(form.save())
def test_slaac_status_invalid_ipv4(self):
data = self.extra_data
data.update(
{
self.field_name: "192.168.0.1/32",
"type": IPAddressTypeChoices.TYPE_SLAAC,
}
)
form = self.form_class(data=data)
self.assertFalse(form.is_valid())
self.assertEqual("Only IPv6 addresses can be assigned SLAAC type", form.errors["type"][0]) |
6,249 | value | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetIntegrationRuntimeObjectMetadatumResult',
'AwaitableGetIntegrationRuntimeObjectMetadatumResult',
'get_integration_runtime_object_metadatum',
'get_integration_runtime_object_metadatum_output',
]
@pulumi.output_type
class GetIntegrationRuntimeObjectMetadatumResult:
"""
A list of SSIS object metadata.
"""
def __init__(__self__, next_link=None, METHOD_NAME=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if METHOD_NAME and not isinstance(METHOD_NAME, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", METHOD_NAME)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
The link to the next page of results, if any remaining results exist.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[Sequence[Any]]:
"""
List of SSIS object metadata.
"""
return pulumi.get(self, "value")
class AwaitableGetIntegrationRuntimeObjectMetadatumResult(GetIntegrationRuntimeObjectMetadatumResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIntegrationRuntimeObjectMetadatumResult(
next_link=self.next_link,
METHOD_NAME=self.METHOD_NAME)
def get_integration_runtime_object_metadatum(factory_name: Optional[str] = None,
integration_runtime_name: Optional[str] = None,
metadata_path: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIntegrationRuntimeObjectMetadatumResult:
"""
Get a SSIS integration runtime object metadata by specified path. The return is pageable metadata list.
:param str factory_name: The factory name.
:param str integration_runtime_name: The integration runtime name.
:param str metadata_path: Metadata path.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['factoryName'] = factory_name
__args__['integrationRuntimeName'] = integration_runtime_name
__args__['metadataPath'] = metadata_path
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:datafactory/v20180601:getIntegrationRuntimeObjectMetadatum', __args__, opts=opts, typ=GetIntegrationRuntimeObjectMetadatumResult).METHOD_NAME
return AwaitableGetIntegrationRuntimeObjectMetadatumResult(
next_link=pulumi.get(__ret__, 'next_link'),
METHOD_NAME=pulumi.get(__ret__, 'value'))
@_utilities.lift_output_func(get_integration_runtime_object_metadatum)
def get_integration_runtime_object_metadatum_output(factory_name: Optional[pulumi.Input[str]] = None,
integration_runtime_name: Optional[pulumi.Input[str]] = None,
metadata_path: Optional[pulumi.Input[Optional[str]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetIntegrationRuntimeObjectMetadatumResult]:
"""
Get a SSIS integration runtime object metadata by specified path. The return is pageable metadata list.
:param str factory_name: The factory name.
:param str integration_runtime_name: The integration runtime name.
:param str metadata_path: Metadata path.
:param str resource_group_name: The resource group name.
"""
... |
6,250 | squeeze | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Iterable
from ...serialization.serializables import FieldTypes, KeyField, TupleField
from ... import opcodes as OperandDef
from ..operands import TensorHasInput, TensorOperandMixin
from ..array_utils import as_same_device, device
def _get_squeeze_shape(shape, axis):
if axis is not None:
if isinstance(axis, Iterable):
axis = tuple(axis)
else:
axis = (axis,)
for ax in axis:
if shape[ax] != 1:
raise ValueError(
"cannot select an axis to squeeze out "
"which has size not equal to one"
)
shape = tuple(s for i, s in enumerate(shape) if i not in axis)
else:
axis = tuple(i for i, s in enumerate(shape) if s == 1)
shape = tuple(s for s in shape if s != 1)
return shape, axis
class TensorSqueeze(TensorHasInput, TensorOperandMixin):
_op_type_ = OperandDef.SQUEEZE
_input = KeyField("input")
_axis = TupleField("axis", FieldTypes.int32)
def __init__(self, axis=None, **kw):
super().__init__(_axis=axis, create_view=True, **kw)
def on_output_modify(self, new_output):
slcs = [slice(None)] * new_output.ndim
for axis in self._axis:
slcs.insert(axis, None)
return new_output[slcs]
def on_input_modify(self, new_input):
op = self.copy().reset_key()
return op(new_input, self.outputs[0].shape)
@property
def axis(self):
return self._axis
def __call__(self, a, shape):
return self.new_tensor([a], shape, order=a.order)
@classmethod
def tile(cls, op):
in_tensor = op.input
out_tensor = op.outputs[0]
axis_set = set(op.axis)
out_chunks = []
for c in in_tensor.chunks:
chunk_op = op.copy().reset_key()
chunk_shape = _get_squeeze_shape(c.shape, op.axis)[0]
chunk_idx = tuple(idx for i, idx in enumerate(c.index) if i not in axis_set)
out_chunk = chunk_op.new_chunk(
[c], shape=chunk_shape, index=chunk_idx, order=out_tensor.order
)
out_chunks.append(out_chunk)
nsplits = [
nsplit for i, nsplit in enumerate(in_tensor.nsplits) if i not in axis_set
]
new_op = op.copy()
return new_op.new_tensors(
op.inputs,
op.outputs[0].shape,
order=out_tensor.order,
chunks=out_chunks,
nsplits=nsplits,
)
@classmethod
def execute(cls, ctx, op):
(a,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True
)
with device(device_id):
ctx[op.outputs[0].key] = xp.METHOD_NAME(a, axis=op.axis)
def METHOD_NAME(a, axis=None):
"""
Remove single-dimensional entries from the shape of a tensor.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : Tensor
The input tensor, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Raises
------
ValueError
If `axis` is not `None`, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> import mars.tensor as mt
>>> x = mt.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> mt.squeeze(x).shape
(3,)
>>> mt.squeeze(x, axis=0).shape
(3, 1)
>>> mt.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> mt.squeeze(x, axis=2).shape
(1, 3)
"""
shape, axis = _get_squeeze_shape(a.shape, axis)
if 1 not in a.shape:
return a
op = TensorSqueeze(axis=axis, dtype=a.dtype, sparse=a.issparse())
return op(a, shape) |
6,251 | show locked mapset info | """
@package datacatalog.infomanager
@brief Class for managing info messages
in Data Catalog
Classes:
- infomanager::DataCatalogInfoManager
(C) 2020 by the GRASS Development Team
This program is free software under the GNU General Public License
(>=v2). Read the file COPYING that comes with GRASS for details.
@author Linda Kladivova
@author Anna Petrasova <kratochanna gmail.com>
@author Vaclav Petras <wenzeslaus gmail.com>
"""
import wx
from grass.script import gisenv
from grass.grassdb.checks import get_mapset_owner
class DataCatalogInfoManager:
"""Manager for all things related to info bar in Data Catalog"""
def __init__(self, infobar, giface):
self.infoBar = infobar
self._giface = giface
def ShowDataStructureInfo(self, onCreateLocationHandler):
"""Show info about the data hierarchy focused on the first-time user"""
buttons = [
(_("Create new Location"), onCreateLocationHandler),
(_("Learn more"), self._onLearnMore),
]
message = _(
"GRASS GIS helps you organize your data using Locations (projects) "
"which contain Mapsets (subprojects). All data in one Location is "
"in the same coordinate reference system (CRS).\n\n"
"You are currently in Mapset PERMANENT in default Location {loc} "
"which uses WGS 84 (EPSG:4326). "
"Consider creating a new Location with a CRS "
"specific to your area. You can do it now or anytime later from "
"the toolbar above."
).format(loc=gisenv()["LOCATION_NAME"])
self.infoBar.ShowMessage(message, wx.ICON_INFORMATION, buttons)
def ShowImportDataInfo(self, OnImportOgrLayersHandler, OnImportGdalLayersHandler):
"""Show info about the data import focused on the first-time user"""
buttons = [
(_("Import vector data"), OnImportOgrLayersHandler),
(_("Import raster data"), OnImportGdalLayersHandler),
]
message = _(
"You have successfully created a new Location {loc}. "
"Currently you are in its PERMANENT Mapset which is used for "
"storing your base maps to make them readily available in other "
"Mapsets. You can create new Mapsets for different tasks by right "
"clicking on the Location name.\n\n"
"To import data, go to the toolbar above or use the buttons below."
).format(loc=gisenv()["LOCATION_NAME"])
self.infoBar.ShowMessage(message, wx.ICON_INFORMATION, buttons)
def ShowLazyLoadingOn(self, setLazyLoadingOnHandler, doNotAskHandler):
"""Show info about lazy loading"""
message = _(
"Loading of Data catalog content took rather long. "
"To prevent delay, you can enable loading of current mapset only. "
"You can change that later in GUI Settings, General tab."
)
buttons = [
(_("Enable loading current mapset only"), setLazyLoadingOnHandler),
(_("No change, don't ask me again"), doNotAskHandler),
]
self.infoBar.ShowMessage(message, wx.ICON_INFORMATION, buttons)
def ShowFallbackSessionInfo(self, reason_id):
"""Show info when last used mapset is not usable"""
string = self._text_from_reason_id(reason_id)
message = _(
"{string} GRASS GIS has started in a temporary Location. "
"To continue, use Data Catalog below to switch to a different Location."
).format(
string=string,
)
self.infoBar.ShowMessage(message, wx.ICON_INFORMATION)
def METHOD_NAME(self, OnSwitchMapsetHandler):
"""Show info when last used mapset is locked"""
last_used_mapset_path = gisenv()["LAST_MAPSET_PATH"]
buttons = [(_("Switch to last used mapset"), OnSwitchMapsetHandler)]
message = _(
"Last used mapset in path '{mapsetpath}' is currently in use. "
"GRASS GIS has started in a temporary Location. "
"To continue, use Data Catalog below to switch to a different Location "
"or remove lock file and switch to the last used mapset."
).format(mapsetpath=last_used_mapset_path)
self.infoBar.ShowMessage(message, wx.ICON_INFORMATION, buttons)
def _text_from_reason_id(self, reason_id):
"""Get string for infobar message based on the reason."""
last_used_mapset_path = gisenv()["LAST_MAPSET_PATH"]
reason = None
if reason_id == "non-existent":
reason = _(
"Last used mapset in path '{mapsetpath}' does not exist."
).format(mapsetpath=last_used_mapset_path)
elif reason_id == "invalid":
reason = _("Last used mapset in path '{mapsetpath}' is invalid.").format(
mapsetpath=last_used_mapset_path
)
elif reason_id == "different-owner":
owner = get_mapset_owner(last_used_mapset_path)
reason = _(
"Last used mapset in path '{mapsetpath}' has different owner {owner}."
).format(owner=owner, mapsetpath=last_used_mapset_path)
return reason
def _onLearnMore(self, event):
self._giface.Help(entry="grass_database") |
6,252 | delete dataset | # coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from fastapi import APIRouter, Body, Depends, Security
from pydantic import parse_obj_as
from argilla.server.apis.v0.helpers import deprecate_endpoint
from argilla.server.apis.v0.models.commons.params import (
CommonTaskHandlerDependencies,
OptionalWorkspaceRequestDependency,
)
from argilla.server.errors import EntityNotFoundError
from argilla.server.models import User
from argilla.server.schemas.v0.datasets import CopyDatasetRequest, CreateDatasetRequest, Dataset, UpdateDatasetRequest
from argilla.server.security import auth
from argilla.server.services.datasets import DatasetsService
router = APIRouter(tags=["datasets"], prefix="/datasets")
@deprecate_endpoint(
"/",
new_path="",
router_method=router.get,
response_model=List[Dataset],
response_model_exclude_none=True,
operation_id="list_datasets",
)
async def list_datasets(
workspace_request: OptionalWorkspaceRequestDependency = Depends(),
service: DatasetsService = Depends(DatasetsService.get_instance),
current_user: User = Security(auth.get_current_user),
) -> List[Dataset]:
datasets = await service.list(
user=current_user,
workspaces=[workspace_request.workspace] if workspace_request.workspace is not None else None,
)
return parse_obj_as(List[Dataset], datasets)
@router.post(
"",
response_model=Dataset,
response_model_exclude_none=True,
operation_id="create_dataset",
name="create_dataset",
description="Create a new dataset",
)
async def create_dataset(
request: CreateDatasetRequest = Body(..., description="The request dataset info"),
ws_params: CommonTaskHandlerDependencies = Depends(),
datasets: DatasetsService = Depends(DatasetsService.get_instance),
current_user: User = Security(auth.get_current_user),
) -> Dataset:
request.workspace = request.workspace or ws_params.workspace
dataset = await datasets.create_dataset(user=current_user, dataset=request)
return Dataset.from_orm(dataset)
@router.get("/{name}", response_model=Dataset, response_model_exclude_none=True, operation_id="get_dataset")
async def get_dataset(
name: str,
ds_params: CommonTaskHandlerDependencies = Depends(),
service: DatasetsService = Depends(DatasetsService.get_instance),
current_user: User = Security(auth.get_current_user),
) -> Dataset:
return Dataset.from_orm(await service.find_by_name(user=current_user, name=name, workspace=ds_params.workspace))
@router.patch("/{name}", operation_id="update_dataset", response_model=Dataset, response_model_exclude_none=True)
async def update_dataset(
name: str,
request: UpdateDatasetRequest,
ds_params: CommonTaskHandlerDependencies = Depends(),
service: DatasetsService = Depends(DatasetsService.get_instance),
current_user: User = Security(auth.get_current_user),
) -> Dataset:
found_ds = await service.find_by_name(user=current_user, name=name, workspace=ds_params.workspace)
dataset = await service.update(user=current_user, dataset=found_ds, tags=request.tags, metadata=request.metadata)
return Dataset.from_orm(dataset)
@router.delete("/{name}", operation_id="delete_dataset")
async def METHOD_NAME(
name: str,
ds_params: CommonTaskHandlerDependencies = Depends(),
service: DatasetsService = Depends(DatasetsService.get_instance),
current_user: User = Security(auth.get_current_user),
):
try:
found_ds = await service.find_by_name(user=current_user, name=name, workspace=ds_params.workspace)
await service.delete(user=current_user, dataset=found_ds)
except EntityNotFoundError:
pass
@router.put("/{name}:close", operation_id="close_dataset")
async def close_dataset(
name: str,
ds_params: CommonTaskHandlerDependencies = Depends(),
service: DatasetsService = Depends(DatasetsService.get_instance),
current_user: User = Security(auth.get_current_user),
):
found_ds = await service.find_by_name(user=current_user, name=name, workspace=ds_params.workspace)
await service.close(user=current_user, dataset=found_ds)
@router.put("/{name}:open", operation_id="open_dataset")
async def open_dataset(
name: str,
ds_params: CommonTaskHandlerDependencies = Depends(),
service: DatasetsService = Depends(DatasetsService.get_instance),
current_user: User = Security(auth.get_current_user),
):
found_ds = await service.find_by_name(user=current_user, name=name, workspace=ds_params.workspace)
await service.open(user=current_user, dataset=found_ds)
@router.put("/{name}:copy", operation_id="copy_dataset", response_model=Dataset, response_model_exclude_none=True)
async def copy_dataset(
name: str,
copy_request: CopyDatasetRequest,
ds_params: CommonTaskHandlerDependencies = Depends(),
service: DatasetsService = Depends(DatasetsService.get_instance),
current_user: User = Security(auth.get_current_user),
) -> Dataset:
found = await service.find_by_name(user=current_user, name=name, workspace=ds_params.workspace)
dataset = await service.copy_dataset(
user=current_user,
dataset=found,
copy_name=copy_request.name,
copy_workspace=copy_request.target_workspace,
copy_tags=copy_request.tags,
copy_metadata=copy_request.metadata,
)
return Dataset.from_orm(dataset) |
6,253 | get eg | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: vexata_eg
short_description: Manage export groups on Vexata VX100 storage arrays
description:
- Create or delete export groups on a Vexata VX100 array.
- An export group is a tuple of a volume group, initiator group and port
group that allows a set of volumes to be exposed to one or more hosts
through specific array ports.
author:
- Sandeep Kasargod (@vexata)
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
description:
- Export group name.
required: true
type: str
state:
description:
- Creates export group when present or delete when absent.
default: present
choices: [ present, absent ]
type: str
vg:
description:
- Volume group name.
type: str
ig:
description:
- Initiator group name.
type: str
pg:
description:
- Port group name.
type: str
extends_documentation_fragment:
- community.general.vexata.vx100
- community.general.attributes
'''
EXAMPLES = r'''
- name: Create export group named db_export.
community.general.vexata_eg:
name: db_export
vg: dbvols
ig: dbhosts
pg: pg1
state: present
array: vx100_ultra.test.com
user: admin
password: secret
- name: Delete export group named db_export
community.general.vexata_eg:
name: db_export
state: absent
array: vx100_ultra.test.com
user: admin
password: secret
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.vexata import (
argument_spec, get_array, required_together)
def METHOD_NAME(module, array):
"""Retrieve a named vg if it exists, None if absent."""
name = module.params['name']
try:
egs = array.list_egs()
eg = filter(lambda eg: eg['name'] == name, egs)
if len(eg) == 1:
return eg[0]
else:
return None
except Exception:
module.fail_json(msg='Error while attempting to retrieve export groups.')
def get_vg_id(module, array):
"""Retrieve a named vg's id if it exists, error if absent."""
name = module.params['vg']
try:
vgs = array.list_vgs()
vg = filter(lambda vg: vg['name'] == name, vgs)
if len(vg) == 1:
return vg[0]['id']
else:
module.fail_json(msg='Volume group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve volume groups.')
def get_ig_id(module, array):
"""Retrieve a named ig's id if it exists, error if absent."""
name = module.params['ig']
try:
igs = array.list_igs()
ig = filter(lambda ig: ig['name'] == name, igs)
if len(ig) == 1:
return ig[0]['id']
else:
module.fail_json(msg='Initiator group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve initiator groups.')
def get_pg_id(module, array):
"""Retrieve a named pg's id if it exists, error if absent."""
name = module.params['pg']
try:
pgs = array.list_pgs()
pg = filter(lambda pg: pg['name'] == name, pgs)
if len(pg) == 1:
return pg[0]['id']
else:
module.fail_json(msg='Port group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve port groups.')
def create_eg(module, array):
""""Create a new export group."""
changed = False
eg_name = module.params['name']
vg_id = get_vg_id(module, array)
ig_id = get_ig_id(module, array)
pg_id = get_pg_id(module, array)
if module.check_mode:
module.exit_json(changed=changed)
try:
eg = array.create_eg(
eg_name,
'Ansible export group',
(vg_id, ig_id, pg_id))
if eg:
module.log(msg='Created export group {0}'.format(eg_name))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Export group {0} create failed.'.format(eg_name))
module.exit_json(changed=changed)
def delete_eg(module, array, eg):
changed = False
eg_name = eg['name']
if module.check_mode:
module.exit_json(changed=changed)
try:
ok = array.delete_eg(
eg['id'])
if ok:
module.log(msg='Export group {0} deleted.'.format(eg_name))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Export group {0} delete failed.'.format(eg_name))
module.exit_json(changed=changed)
def main():
arg_spec = argument_spec()
arg_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
vg=dict(type='str'),
ig=dict(type='str'),
pg=dict(type='str')
)
)
module = AnsibleModule(arg_spec,
supports_check_mode=True,
required_together=required_together())
state = module.params['state']
array = get_array(module)
eg = METHOD_NAME(module, array)
if state == 'present' and not eg:
create_eg(module, array)
elif state == 'absent' and eg:
delete_eg(module, array, eg)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main() |
6,254 | get backend | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from copy import deepcopy
from enum import Enum
from typing import List, TypeVar
TModel = TypeVar("TModel")
class BackendType(Enum):
TORCH = "Torch"
TENSORFLOW = "Tensorflow"
ONNX = "ONNX"
OPENVINO = "OpenVINO"
def get_available_backends() -> List[BackendType]:
"""
Returns a list of available backends.
:return: A list of available backends.
"""
frameworks = [
("torch", BackendType.TORCH),
("tensorflow", BackendType.TENSORFLOW),
("onnx", BackendType.ONNX),
("openvino.runtime", BackendType.OPENVINO),
]
available_backends = []
for module_name, backend in frameworks:
try:
importlib.import_module(module_name)
available_backends.append(backend)
except ImportError:
pass
return available_backends
def is_torch_model(model: TModel) -> bool:
"""
Returns True if the model is an instance of torch.nn.Module, otherwise False.
:param model: A target model.
:return: True if the model is an instance of torch.nn.Module, otherwise False.
"""
import torch
return isinstance(model, torch.nn.Module)
def is_tensorflow_model(model: TModel) -> bool:
"""
Returns True if the model is an instance of tensorflow.Module, otherwise False.
:param model: A target model.
:return: True if the model is an instance of tensorflow.Module, otherwise False.
"""
import tensorflow
return isinstance(model, tensorflow.Module)
def is_onnx_model(model: TModel) -> bool:
"""
Returns True if the model is an instance of onnx.ModelProto, otherwise False.
:param model: A target model.
:return: True if the model is an instance of onnx.ModelProto, otherwise False.
"""
import onnx
return isinstance(model, onnx.ModelProto)
def is_openvino_model(model: TModel) -> bool:
"""
Returns True if the model is an instance of openvino.runtime.Model, otherwise False.
:param model: A target model.
:return: True if the model is an instance of openvino.runtime.Model, otherwise False.
"""
import openvino.runtime as ov
return isinstance(model, ov.Model)
def is_openvino_compiled_model(model: TModel) -> bool:
"""
Returns True if the model is an instance of openvino.runtime.CompiledModel, otherwise False.
:param model: A target model.
:return: True if the model is an instance of openvino.runtime.CompiledModel, otherwise False.
"""
import openvino.runtime as ov
return isinstance(model, ov.CompiledModel)
def METHOD_NAME(model: TModel) -> BackendType:
"""
Returns the NNCF backend name string inferred from the type of the model object passed into this function.
:param model: The framework-specific model.
:return: A BackendType representing the correct NNCF backend to be used when working with the framework.
"""
available_backends = get_available_backends()
if BackendType.TORCH in available_backends and is_torch_model(model):
return BackendType.TORCH
if BackendType.TENSORFLOW in available_backends and is_tensorflow_model(model):
return BackendType.TENSORFLOW
if BackendType.ONNX in available_backends and is_onnx_model(model):
return BackendType.ONNX
if BackendType.OPENVINO in available_backends and is_openvino_model(model):
return BackendType.OPENVINO
raise RuntimeError(
"Could not infer the backend framework from the model type because "
"the framework is not available or the model type is unsupported. "
"The available frameworks found: {}.".format(", ".join([b.value for b in available_backends]))
)
def copy_model(model: TModel) -> TModel:
"""
Function to create copy of the backend-specific model.
:param model: the backend-specific model instance
:return: Copy of the backend-specific model instance.
"""
model_backend = METHOD_NAME(model)
if model_backend == BackendType.OPENVINO:
# TODO(l-bat): Remove after fixing ticket: 100919
return model.clone()
if model_backend == BackendType.TENSORFLOW:
# deepcopy and tensorflow.keras.models.clone_model does not work correctly on 2.8.4 version
from nncf.tensorflow.graph.model_transformer import TFModelTransformer
from nncf.tensorflow.graph.transformations.layout import TFTransformationLayout
model = TFModelTransformer(model).transform(TFTransformationLayout())
return model
return deepcopy(model) |
6,255 | build | import pathlib
import os
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.microsoft import check_min_vs, is_msvc
from conan.tools.apple import is_apple_os
from conan.tools.files import apply_conandata_patches, get, copy, rm
from conan.tools.METHOD_NAME import check_min_cppstd
from conan.tools.scm import Version
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.env import VirtualBuildEnv
required_conan_version = ">=1.53.0"
class PackageConan(ConanFile):
name = "openassetio"
description = "An open-source interoperability standard for tools and content management systems used in media production."
license = "Apache-2.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/OpenAssetIO/OpenAssetIO"
topics = ("asset-pipeline", "vfx", "cg", "assetmanager", "vfx-pipeline")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"with_python": [True, False],
}
default_options = {
"shared": False,
"with_python": True,
}
short_paths = True
@property
def _min_cppstd(self):
return 17
@property
def _compilers_minimum_version(self):
return {
"gcc": "9",
"clang": "12",
"apple-clang": "12",
}
def configure(self):
if self.options.with_python:
if is_msvc(self):
# Required to create import .lib for building extension module.
self.options["cpython"].shared = True
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("tomlplusplus/3.2.0")
if self.options.with_python:
# TODO: cpython requires ncurses/6.2 but no pre-built package exists.
self.requires("ncurses/6.3")
self.requires("cpython/3.9.7")
self.requires("pybind11/2.10.1")
def validate(self):
if is_apple_os(self):
raise ConanInvalidConfiguration(
f"{self.ref} does not support MacOS at this time"
)
if self.settings.compiler.cppstd:
check_min_cppstd(self, self._min_cppstd)
if is_msvc(self) and not self.dependencies["cpython"].options.shared:
raise ConanInvalidConfiguration(f"{self.ref} requires cpython:shared=True when using MSVC compiler")
check_min_vs(self, 191)
if not is_msvc(self):
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support."
)
def build_requirements(self):
self.tool_requires("cmake/3.25.3")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["OPENASSETIO_ENABLE_TESTS"] = not self.conf.get("tools.build:skip_test", default=True, check_type=bool)
tc.variables["OPENASSETIO_GLIBCXX_USE_CXX11_ABI"] = self.settings.get_safe("compiler.libcxx") == "libstdc++11"
tc.variables["OPENASSETIO_ENABLE_PYTHON"] = self.options.with_python
if self.options.with_python:
tc.variables["Python_EXECUTABLE"] = self._python_exe
if is_msvc(self):
tc.variables["Python_LIBRARY"] = self._python_windows_lib
tc.generate()
tc = CMakeDeps(self)
tc.generate()
tc = VirtualBuildEnv(self)
tc.generate()
@property
def _python_exe(self):
# TODO: update to V2 once cpython is updated
return pathlib.Path(self.deps_user_info["cpython"].python).as_posix()
@property
def _python_windows_lib(self):
pth = pathlib.Path(
self.dependencies["cpython"].package_folder,
self.dependencies["cpython"].cpp_info.components["embed"].libdirs[0],
self.dependencies["cpython"].cpp_info.components["embed"].libs[0])
pth = pth.with_suffix(".lib")
return pth.as_posix()
def METHOD_NAME(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.METHOD_NAME()
def package_id(self):
if self.options.with_python:
self.info.requires["cpython"].minor_mode()
def package(self):
copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rm(self, "OpenAssetIOConfig*.cmake", os.path.join(self.package_folder, "lib", "cmake", "OpenAssetIO"))
rm(self, "OpenAssetIOTargets*.cmake", os.path.join(self.package_folder, "lib", "cmake", "OpenAssetIO"))
rm(self, "*.pdb", os.path.join(self.package_folder, "lib"))
rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
def package_info(self):
self.cpp_info.libs = []
self.cpp_info.set_property("cmake_file_name", "OpenAssetIO")
self.cpp_info.set_property("cmake_target_name", "OpenAssetIO::OpenAssetIO")
self.cpp_info.set_property("cmake_build_modules", [os.path.join("lib", "cmake", "OpenAssetIO", "OpenAssetIOVariables.cmake")])
self.cpp_info.builddirs = [os.path.join("lib", "cmake")]
self.cpp_info.components["openassetio-core"].set_property("cmake_target_name", "OpenAssetIO::openassetio-core")
self.cpp_info.components["openassetio-core"].libs = ["openassetio"]
if self.options.with_python:
self.cpp_info.components["openassetio-python-bridge"].set_property("cmake_target_name", "OpenAssetIO::openassetio-python-bridge")
self.cpp_info.components["openassetio-python-bridge"].requires = ["openassetio-core"]
self.cpp_info.components["openassetio-python-bridge"].libs = ["openassetio-python"]
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.names["cmake_find_package"] = "OpenAssetIO"
self.cpp_info.names["cmake_find_package_multi"] = "OpenAssetIO" |
6,256 | main | from __future__ import nested_scopes, generators, division, absolute_import
from __future__ import with_statement, print_function
import sys, os
# Copyright 2021 Richardson Lab at Duke University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from suitenamedefs import globals
from iotbx.data_manager import DataManager # Load in the DataManager
from libtbx import phil
from libtbx.utils import Sorry
# from mmtbx.validation import utils
# from cctbx import geometry_restraints
# from collections import defaultdict
from diangle import getResidueDihedrals
# IMPORT TO EXPORT:
from mmtbx.suitename.suitename import compute, write, \
finalStats, clearStats
# The following are the options available, in Phil format,
# for human and computer comprehension.
philOptions = """
suitename {
# input
infile=""
.type=str
.help="the file to process"
anglefields = 9
.type=int
.help="number of angle fields provided, for textual input only"
pointidfields = 7
.type=int
.help="number of point id fields before the angle fields"
ptid=0
.type=int
.help="number of point id fields before the angle fields"
residuein=false
.type=bool
.help="expect dangle format giving residues"
suitein=false
.type=bool
.help="expect kinemage format giving suites directly"
# output
string=False
.type=bool
.help="output in string format, 3 characters per suite"
kinemage=False
.type=bool
.help="output in kinemage format, useful for visualization"
report=true
.type=bool
.help="output as a report, giving statistical details"
chart=False
.type=bool
.help="modifier to standard report, output without statistical summary"
nosequence = False
.type=bool
.help="modifier to string format, do not include base letters"
causes=False
.type=bool
.help="output extra details concerning the causes of each assignment made"
test=False
.type=bool
.help="display a lot of additional information about program internals"
# compute
satellites=False
.type=bool
.help="use the special satelliteWidths values for satellites"
nowannabe=False
.type=bool
.help="do not consider 'wannabe' clusters"
noinc=False
.type=bool
.help="do not display incomplete suites"
etatheta=False
.type=bool
altid="A"
.type=str
.help="which alternate conformer to use (A, B, etc)"
altidfield = 6
.type=int
.help="which field (1-based) gives the alternate conformer code"
version=false
.type=bool
.help="give the version number of suite name"
# deprecated and automatically true:
oneline=false
.type=bool
}
"""
def METHOD_NAME(options, outFile=None, errorFile=None):
"""The main track for handling PDB and CIF input formats, which will involve
parsing the model hierarchy to get the dihedral angles for ourselves"""
setOptions(options)
import suiteninput # must be AFTER setOptions
if not outFile: outFile = sys.stdout
if not errorFile: errorFile = sys.stderr
inFile = options.infile
model = loadModel(inFile)
residues = getResidueDihedrals(model, options.altid,
name=os.path.splitext(inFile)[0],
errorFile=errorFile)
### to print mp_geo-like output:
# for r in residues:
# print(residueString(r))
# useful for seeing what suites were generated
if residues is not None and len(residues) > 0:
suiteList = suiteninput.buildSuites(residues)
suiteList = suiteList[:-1]
suiteList = compute(suiteList)
finalStats()
write(outFile, suiteList)
clearStats()
def parseOptions(optionString, errorFile=None):
""" Use optionString to modify the defaults given in philOptions above.
Returns a Python object that has an attribute for every option listed
in philOptions. Example: "chart=true noinc=true causes=true"
The values in optionString are case insensitive.
"""
opt2 = """ # use this for more complex option types e.g. multiples
suitename {
report=true
chart=true
} """
# user_phil = phil.parse(opt2)
master_phil = phil.parse(philOptions)
interp = master_phil.command_line_argument_interpreter()
optionList = optionString.split()
try:
user_phil = interp.process(args=optionList)
except Sorry as e:
if errorFile is None: errorFile = sys.stderr
print(e, file=errorFile)
working_phil = master_phil.fetch(sources=user_phil)
full_options = working_phil.extract()
return full_options.suitename
def setOptions(optionsIn):
"""optionsIn may be the result of parseOptions above
or the result of an argparse parse_args operation"""
from mmtbx.suitename.suitename import loadOptions
globals.options = optionsIn
loadOptions(optionsIn)
def loadModel(filename):
dm = DataManager() # Initialize the DataManager and call it dm
dm.set_overwrite(True) # tell the DataManager to overwrite files with the same name
#print("Reading file")
model = dm.get_model(filename)
#print("Processing model")
#model.process_input_model(make_restraints=True)
# removed because Restraints Manager will not operate
# on unfamiliar residues KPB 6/10/2021
return model
def testResidues(model):
#print("computing dihedrals")
residues = getResidueDihedrals(model)
for r in residues:
print(r.pointIDs, " : ", r.angle)
|
6,257 | to list | from tensorflow.keras.utils import Sequence
import numpy as np
from .processor import SequentialProcessor
class SequenceExtra(Sequence):
def __init__(self, pipeline, batch_size, as_list=False):
if not isinstance(pipeline, SequentialProcessor):
raise ValueError('``processor`` must be a ``SequentialProcessor``')
self.output_wrapper = pipeline.processors[-1]
self.pipeline = pipeline
self.inputs_name_to_shape = self.output_wrapper.inputs_name_to_shape
self.labels_name_to_shape = self.output_wrapper.labels_name_to_shape
self.ordered_input_names = self.output_wrapper.ordered_input_names
self.ordered_label_names = self.output_wrapper.ordered_label_names
self.batch_size = batch_size
self.as_list = as_list
def make_empty_batches(self, name_to_shape):
batch = {}
for name, shape in name_to_shape.items():
batch[name] = np.zeros((self.batch_size, *shape))
return batch
def METHOD_NAME(self, batch, names):
return [batch[name] for name in names]
def _place_sample(self, sample, sample_arg, batch):
for name, data in sample.items():
batch[name][sample_arg] = data
def _get_unprocessed_batch(self, data, batch_index):
batch_arg_A = self.batch_size * (batch_index)
batch_arg_B = self.batch_size * (batch_index + 1)
unprocessed_batch = data[batch_arg_A:batch_arg_B]
return unprocessed_batch
def __getitem__(self, batch_index):
inputs = self.make_empty_batches(self.inputs_name_to_shape)
labels = self.make_empty_batches(self.labels_name_to_shape)
inputs, labels = self.process_batch(inputs, labels, batch_index)
if self.as_list:
inputs = self.METHOD_NAME(inputs, self.ordered_input_names)
labels = self.METHOD_NAME(labels, self.ordered_label_names)
return inputs, labels
def process_batch(self, inputs, labels, batch_index=None):
raise NotImplementedError
class ProcessingSequence(SequenceExtra):
"""Sequence generator used for processing samples given in ``data``.
# Arguments
processor: Function, used for processing elements of ``data``.
batch_size: Int.
data: List. Each element of the list is processed by ``processor``.
as_list: Bool, if True ``inputs`` and ``labels`` are dispatched as
lists. If false ``inputs`` and ``labels`` are dispatched as
dictionaries.
"""
def __init__(self, processor, batch_size, data, as_list=False):
self.data = data
super(ProcessingSequence, self).__init__(
processor, batch_size, as_list)
def __len__(self):
return int(np.ceil(len(self.data) / float(self.batch_size)))
def process_batch(self, inputs, labels, batch_index):
unprocessed_batch = self._get_unprocessed_batch(self.data, batch_index)
for sample_arg, unprocessed_sample in enumerate(unprocessed_batch):
sample = self.pipeline(unprocessed_sample.copy())
self._place_sample(sample['inputs'], sample_arg, inputs)
self._place_sample(sample['labels'], sample_arg, labels)
return inputs, labels
class GeneratingSequence(SequenceExtra):
"""Sequence generator used for generating samples.
# Arguments
processor: Function used for generating and processing ``samples``.
batch_size: Int.
num_steps: Int. Number of steps for each epoch.
as_list: Bool, if True ``inputs`` and ``labels`` are dispatched as
lists. If false ``inputs`` and ``labels`` are dispatched as
dictionaries.
"""
def __init__(self, processor, batch_size, num_steps, as_list=False):
self.num_steps = num_steps
super(GeneratingSequence, self).__init__(
processor, batch_size, as_list)
def __len__(self):
return self.num_steps
def process_batch(self, inputs, labels, batch_index):
for sample_arg in range(self.batch_size):
sample = self.pipeline()
self._place_sample(sample['inputs'], sample_arg, inputs)
self._place_sample(sample['labels'], sample_arg, labels)
return inputs, labels |
6,258 | exercise 1 | from __future__ import absolute_import, division, print_function
import mmtbx.model
import iotbx.pdb
from libtbx.utils import format_cpu_times, null_out
pdb_str_1="""
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P 1
HELIX 1 1 THR A 1 THR A 2 1 6
HELIX 1 1 THR B 1 THR B 2 1 6
SHEET 1 A 2 THR A 1 THR A 3 0
SHEET 2 A 2 THR B 4 THR B 5 -1 O THR B 4 N THR A 2
MTRIX1 1 1.000000 0.000000 0.000000 0.00000 1
MTRIX2 1 0.000000 1.000000 0.000000 0.00000 1
MTRIX3 1 0.000000 0.000000 1.000000 0.00000 1
MTRIX1 2 0.496590 -0.643597 0.582393 0.00000
MTRIX2 2 0.867925 0.376088 -0.324443 0.00000
MTRIX3 2 -0.010221 0.666588 0.745356 0.00000
MTRIX1 3 -0.317946 -0.173437 0.932111 0.00000
MTRIX2 3 0.760735 -0.633422 0.141629 0.00000
MTRIX3 3 0.565855 0.754120 0.333333 0.00000
ATOM 1 N THR A 1 5.111 8.080 7.645 1.00 20.00 N
ATOM 2 CA THR A 1 5.000 6.722 7.125 1.00 20.00 C
ATOM 3 C THR A 1 5.075 5.694 8.249 1.00 20.00 C
ATOM 4 O THR A 4 5.890 5.818 9.163 1.00 20.00 O
ATOM 5 CB THR A 5 6.101 6.421 6.092 1.00 20.00 C
ATOM 6 OG1 THR A 6 6.001 7.343 5.000 1.00 20.00 O
ATOM 7 CG2 THR A 7 5.964 5.000 5.565 1.00 20.00 C
TER
END
"""
pdb_str_2="""
HELIX 1 1 THR A 1 THR A 2 1 6
SHEET 1 A 2 THR A 1 THR A 3 0
SHEET 2 A 2 THR B 4 THR B 5 -1 O THR B 4 N THR A 2
MTRIX1 1 1.000000 0.000000 0.000000 0.00000 1
MTRIX2 1 0.000000 1.000000 0.000000 0.00000 1
MTRIX3 1 0.000000 0.000000 1.000000 0.00000 1
MTRIX1 2 0.496590 -0.643597 0.582393 0.00000
MTRIX2 2 0.867925 0.376088 -0.324443 0.00000
MTRIX3 2 -0.010221 0.666588 0.745356 0.00000
MTRIX1 3 -0.317946 -0.173437 0.932111 0.00000
MTRIX2 3 0.760735 -0.633422 0.141629 0.00000
MTRIX3 3 0.565855 0.754120 0.333333 0.00000
ATOM 1 N THR A 1 5.111 8.080 7.645 1.00 20.00 N
ATOM 2 CA THR A 1 5.000 6.722 7.125 1.00 20.00 C
ATOM 3 C THR A 1 5.075 5.694 8.249 1.00 20.00 C
ATOM 4 O THR B 4 5.890 5.818 9.163 1.00 20.00 O
ATOM 5 CB THR B 5 6.101 6.421 6.092 1.00 20.00 C
ATOM 6 OG1 THR C 6 6.001 7.343 5.000 1.00 20.00 O
ATOM 7 CG2 THR C 7 5.964 5.000 5.565 1.00 20.00 C
TER
END
"""
def METHOD_NAME():
inp = iotbx.pdb.input(source_info=None, lines=pdb_str_1)
model = mmtbx.model.manager(
model_input = inp,
log = null_out())
model.process(make_restraints=True)
assert model.get_number_of_atoms() == 21, model.get_number_of_atoms()
assert model.get_hierarchy().atoms_size() == 21
assert model.get_xray_structure().scatterers().size() == 21
ss = model.get_ss_annotation()
# print ss.as_pdb_str()
# STOP()
assert ss.get_n_helices() == 3
# because the second strand contains chain B which is not in ATOM records
# whole sheet got discarded.
assert ss.get_n_sheets() == 0
rm = model.get_restraints_manager()
assert rm.geometry.pair_proxies().bond_proxies.simple.size() == 6
# since No NCS was set, these functions return the whole thing and no
# master selection
assert model.get_master_hierarchy().atoms_size() == 21
assert model.get_master_selection().size() == 0
# print model.model_as_pdb()
# print "="*40
# Here we set NCS constraints
inp = iotbx.pdb.input(source_info=None, lines=pdb_str_1)
pdb_int_params = mmtbx.model.manager.get_default_pdb_interpretation_params()
pdb_int_params.pdb_interpretation.ncs_search.enabled=True
model = mmtbx.model.manager(
model_input = inp,
log = null_out())
model.process(pdb_interpretation_params = pdb_int_params)
# model.get_xray_structure()
assert not model.ncs_constraints_present()
assert model.get_ncs_obj() is not None
model.setup_ncs_constraints_groups()
# print model.get_ncs_obj()
assert model.ncs_constraints_present()
assert model.get_master_hierarchy().atoms_size() == 7
# print model.get_master_hierarchy().as_pdb_string()
# print list(model.get_master_selection())
assert list(model.get_master_selection()).count(True) == 7
def exercise_2():
"""
Same as 1 but automatic NCS search procedure does not match short chains,
in this case chains B,C, so they left out of NCS.
Not clear if we should utilize MTRIX instead of searching for NCS
because currently we don't output them and in consecutive runs NCS
search would be utilized anyway, potentially yelding different groups.
"""
inp = iotbx.pdb.input(source_info=None, lines=pdb_str_2)
pdb_int_params = mmtbx.model.manager.get_default_pdb_interpretation_params()
pdb_int_params.pdb_interpretation.ncs_search.enabled=True
model = mmtbx.model.manager(
model_input = inp,
log = null_out())
model.process(pdb_interpretation_params = pdb_int_params)
# model.get_xray_structure()
ss = model.get_ss_annotation()
assert ss.get_n_helices() == 3
assert ss.get_n_sheets() == 3
assert not model.ncs_constraints_present()
assert model.get_ncs_obj() is not None
model.setup_ncs_constraints_groups()
# print model.get_ncs_obj()
assert model.ncs_constraints_present()
assert model.get_master_hierarchy().atoms_size() == 15
# print model.get_master_hierarchy().as_pdb_string()
assert list(model.get_master_selection()).count(True) == 15
if (__name__ == "__main__"):
METHOD_NAME()
exercise_2()
print(format_cpu_times())
print("OK") |
6,259 | get branches | import datetime
import git
from django.conf import settings
import evennia
from evennia import CmdSet, InterruptCommand
from evennia.commands.default.muxcommand import MuxCommand
from evennia.utils.utils import list_to_string
class GitCommand(MuxCommand):
"""
The shared functionality between git/git evennia
"""
def parse(self):
"""
Parse the arguments, set default arg to 'status' and check for existence of currently targeted repo
"""
super().parse()
if self.args:
split_args = self.args.strip().split(" ", 1)
self.action = split_args[0]
if len(split_args) > 1:
self.args = "".join(split_args[1:])
else:
self.args = ""
else:
self.action = "status"
self.args = ""
self.err_msgs = [
"|rInvalid Git Repository|n:",
"The {repo_type} repository is not recognized as a git directory.",
"In order to initialize it as a git directory, you will need to access your terminal and run the following commands from within your directory:",
" git init",
" git remote add origin {remote_link}",
]
try:
self.repo = git.Repo(self.directory, search_parent_directories=True)
except git.exc.InvalidGitRepositoryError:
err_msg = "\n".join(self.err_msgs).format(
repo_type=self.repo_type, remote_link=self.remote_link
)
self.caller.msg(err_msg)
raise InterruptCommand
self.commit = self.repo.head.commit
try:
self.branch = self.repo.active_branch.name
except TypeError as type_err:
self.caller.msg(type_err)
raise InterruptCommand
def short_sha(self, repo, hexsha):
"""
Utility: Get the short SHA of a commit.
"""
short_sha = repo.git.rev_parse(hexsha, short=True)
return short_sha
def get_status(self):
"""
Retrieves the status of the active git repository, displaying unstaged changes/untracked files.
"""
time_of_commit = datetime.datetime.fromtimestamp(self.commit.committed_date)
status_msg = "\n".join(
[
f"Branch: |w{self.branch}|n ({self.repo.git.rev_parse(self.commit.hexsha, short=True)}) ({time_of_commit})",
f"By {self.commit.author.email}: {self.commit.message}",
]
)
changedFiles = {item.a_path for item in self.repo.index.diff(None)}
if changedFiles:
status_msg += f"Unstaged/uncommitted changes:|/ |g{'|/ '.join(changedFiles)}|n|/"
if len(self.repo.untracked_files) > 0:
status_msg += f"Untracked files:|/ |x{'|/ '.join(self.repo.untracked_files)}|n"
return status_msg
def METHOD_NAME(self):
"""
Display current and available branches.
"""
remote_refs = self.repo.remote().refs
branch_msg = (
f"Current branch: |w{self.branch}|n. Branches available: {list_to_string(remote_refs)}"
)
return branch_msg
def checkout(self):
"""
Check out a specific branch.
"""
remote_refs = self.repo.remote().refs
to_branch = self.args.strip().removeprefix(
"origin/"
) # Slightly hacky, but git tacks on the origin/
if to_branch not in remote_refs:
self.caller.msg(f"Branch '{to_branch}' not available.")
return False
elif to_branch == self.branch:
self.caller.msg(f"Already on |w{to_branch}|n. Maybe you want <git pull>?")
return False
else:
try:
self.repo.git.checkout(to_branch)
except git.exc.GitCommandError as err:
self.caller.msg("Couldn't checkout {} ({})".format(to_branch, err.stderr.strip()))
return False
self.msg(f"Checked out |w{to_branch}|n successfully. Server restart initiated.")
return True
def pull(self):
"""
Attempt to pull new code.
"""
old_commit = self.commit
try:
self.repo.remotes.origin.pull()
except git.exc.GitCommandError as err:
self.caller.msg("Couldn't pull {} ({})".format(self.branch, err.stderr.strip()))
return False
if old_commit == self.repo.head.commit:
self.caller.msg("No new code to pull, no need to reset.\n")
return False
else:
self.caller.msg(
f"You have pulled new code. Server restart initiated.|/Head now at {self.repo.git.rev_parse(self.repo.head.commit.hexsha, short=True)}.|/Author: {self.repo.head.commit.author.name} ({self.repo.head.commit.author.email})|/{self.repo.head.commit.message.strip()}"
)
return True
def func(self):
"""
Provide basic Git functionality within the game.
"""
caller = self.caller
if self.action == "status":
caller.msg(self.get_status())
elif self.action == "branch" or (self.action == "checkout" and not self.args):
caller.msg(self.METHOD_NAME())
elif self.action == "checkout":
if self.checkout():
evennia.SESSION_HANDLER.portal_restart_server()
elif self.action == "pull":
if self.pull():
evennia.SESSION_HANDLER.portal_restart_server()
else:
caller.msg("You can only git status, git branch, git checkout, or git pull.")
return
class CmdGitEvennia(GitCommand):
"""
Pull the latest code from the evennia core or checkout a different branch.
Usage:
git evennia status - View an overview of the evennia repository status.
git evennia branch - View available branches in evennia.
git evennia checkout <branch> - Checkout a different branch in evennia.
git evennia pull - Pull the latest evennia code.
For updating your local mygame repository, the same commands are available with 'git'.
If there are any conflicts encountered, the command will abort. The command will reload your game after pulling new code automatically, but for some changes involving persistent scripts etc, you may need to manually restart.
"""
key = "git evennia"
locks = "cmd:pperm(Developer)"
help_category = "System"
directory = settings.EVENNIA_DIR
repo_type = "Evennia"
remote_link = "https://github.com/evennia/evennia.git"
class CmdGit(GitCommand):
"""
Pull the latest code from your repository or checkout a different branch.
Usage:
git status - View an overview of your git repository.
git branch - View available branches.
git checkout main - Checkout the main branch of your code.
git pull - Pull the latest code from your current branch.
For updating evennia code, the same commands are available with 'git evennia'.
If there are any conflicts encountered, the command will abort. The command will reload your game after pulling new code automatically, but for changes involving persistent scripts etc, you may need to manually restart.
"""
key = "git"
locks = "cmd:pperm(Developer)"
help_category = "System"
directory = settings.GAME_DIR
repo_type = "game"
remote_link = "[your remote link]"
# CmdSet for easily install all commands
class GitCmdSet(CmdSet):
"""
The git command.
"""
def at_cmdset_creation(self):
self.add(CmdGit)
self.add(CmdGitEvennia) |
6,260 | message for source | # -*- coding: utf-8 -*-
"""Functions to call when running the function.
This module should contain a function called `run_module`, that is executed
when the module is run with `python -m delphi_sir_complainsalot`.
"""
import time
from itertools import groupby
import covidcast
from delphi_utils import SlackNotifier
from delphi_utils import get_structured_logger
from delphi_utils import read_params
from .check_source import check_source
def get_logger():
"""Create structured logger."""
params = read_params()
return get_structured_logger(
__name__, filename=params.get("log_filename"),
log_exceptions=params.get("log_exceptions", True))
LOGGER = get_logger()
def run_module():
"""Run SirCAL."""
start_time = time.time()
params = read_params()
covidcast.use_api_key(params["api_credentials"])
meta = covidcast.metadata()
slack_notifier = None
if "channel" in params and "slack_token" in params:
slack_notifier = SlackNotifier(params["channel"], params["slack_token"])
complaints = []
for data_source in params["sources"].keys():
complaints.extend(check_source(data_source, meta,
params["sources"], params.get("grace", 0), LOGGER))
if len(complaints) > 0:
report_complaints(complaints, slack_notifier)
elapsed_time_in_seconds = round(time.time() - start_time, 2)
LOGGER.info("Completed indicator run",
elapsed_time_in_seconds = elapsed_time_in_seconds)
def split_complaints(complaints, n=49): # pylint: disable=invalid-name
"""Yield successive n-sized chunks from complaints list."""
for i in range(0, len(complaints), n):
yield complaints[i:i + n]
def report_complaints(all_complaints, slack_notifier):
"""Log complaints and optionally post to Slack."""
for complaints in split_complaints(all_complaints):
blocks = format_and_log_complaints_aggregated_by_source(complaints)
if slack_notifier:
slack_notifier.post_message(blocks)
def get_maintainers_block(complaints):
"""Build a Slack block to alert maintainers to pay attention."""
maintainers = set()
for complaint in complaints:
maintainers.update(complaint.maintainers)
maintainers_block = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "Hi, this is Sir Complains-a-Lot. I need to speak to " +
(", ".join("<@{0}>".format(m)
for m in maintainers)) + "."
}
}
return maintainers_block
def format_and_log_complaints_aggregated_by_source(complaints):
"""Build formatted Slack message for posting to the API.
Complaints are aggregated by source to reduce the number of blocks.
"""
blocks = [get_maintainers_block(complaints)]
def METHOD_NAME(complaint):
return "{main_text} - (last update: {last_updated})".format(
main_text=complaint.message,
last_updated=complaint.last_updated.strftime("%Y-%m-%d"))
for source, complaints_by_source in groupby(
complaints, key=lambda x: x.data_source):
for message, complaint_list in groupby(
complaints_by_source, key=METHOD_NAME):
signal_and_geo_types = ""
for complaint in complaint_list:
signal_and_geo_types += "`{signal}: [{geo_types}]`\n".format(
signal=complaint.signal,
geo_types=", ".join(complaint.geo_types))
LOGGER.critical(event="Signal out of SLA",
message=message,
data_source=source,
signal_and_geo_types=signal_and_geo_types)
blocks.extend([
{
"type": "divider"
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*{source_name}* {message}:\n{signals}"
.format(
source_name=source.upper(),
message=message,
signals=signal_and_geo_types)
}
}
])
return blocks
def format_complaints(complaints):
"""Build a formatted Slack message for posting to the API.
To find good formatting for blocks, try the block kit builder:
https://api.slack.com/tools/block-kit-builder
"""
blocks = [get_maintainers_block(complaints)]
for complaint in complaints:
blocks.append(
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": complaint.to_md()
}
}
)
return blocks |
6,261 | test add non selections | """Core selection list unit tests, aimed at testing basic list creation.
Note that the vast majority of the API *isn't* tested in here as
`SelectionList` inherits from `OptionList` and so that would be duplicated
effort. Instead these tests aim to just test the things that have been
changed or wrapped in some way.
"""
from __future__ import annotations
import pytest
from rich.text import Text
from textual.app import App, ComposeResult
from textual.widgets import SelectionList
from textual.widgets.option_list import Option
from textual.widgets.selection_list import Selection, SelectionError
class SelectionListApp(App[None]):
"""Test selection list application."""
def compose(self) -> ComposeResult:
yield SelectionList[int](
("0", 0),
("1", 1, False),
("2", 2, True),
Selection("3", 3, id="3"),
Selection("4", 4, True, id="4"),
)
async def test_all_parameters_become_selctions() -> None:
"""All input parameters to a list should become selections."""
async with SelectionListApp().run_test() as pilot:
selections = pilot.app.query_one(SelectionList)
assert selections.option_count == 5
for n in range(5):
assert isinstance(selections.get_option_at_index(n), Selection)
async def test_get_selection_by_index() -> None:
"""It should be possible to get a selection by index."""
async with SelectionListApp().run_test() as pilot:
option_list = pilot.app.query_one(SelectionList)
for n in range(5):
assert option_list.get_option_at_index(n).prompt == Text(str(n))
assert option_list.get_option_at_index(-1).prompt == Text("4")
async def test_get_selection_by_id() -> None:
"""It should be possible to get a selection by ID."""
async with SelectionListApp().run_test() as pilot:
option_list = pilot.app.query_one(SelectionList)
assert option_list.get_option("3").prompt == Text("3")
assert option_list.get_option("4").prompt == Text("4")
async def test_add_later() -> None:
"""It should be possible to add more items to a selection list."""
async with SelectionListApp().run_test() as pilot:
selections = pilot.app.query_one(SelectionList)
assert selections.option_count == 5
selections.add_option(("5", 5))
assert selections.option_count == 6
selections.add_option(Selection("6", 6))
assert selections.option_count == 7
selections.add_options(
[Selection("7", 7), Selection("8", 8, True), ("9", 9), ("10", 10, True)]
)
assert selections.option_count == 11
selections.add_options([])
assert selections.option_count == 11
async def test_add_later_selcted_state() -> None:
"""When adding selections later the selected collection should get updated."""
async with SelectionListApp().run_test() as pilot:
selections = pilot.app.query_one(SelectionList)
assert selections.selected == [2, 4]
selections.add_option(("5", 5, True))
assert selections.selected == [2, 4, 5]
selections.add_option(Selection("6", 6, True))
assert selections.selected == [2, 4, 5, 6]
async def METHOD_NAME() -> None:
"""Adding options that aren't selections should result in errors."""
async with SelectionListApp().run_test() as pilot:
selections = pilot.app.query_one(SelectionList)
with pytest.raises(SelectionError):
selections.add_option(None)
with pytest.raises(SelectionError):
selections.add_option(Option("Nope"))
with pytest.raises(SelectionError):
selections.add_option("Nope")
with pytest.raises(SelectionError):
selections.add_option(("Nope",))
with pytest.raises(SelectionError):
selections.add_option(("Nope", 0, False, 23))
async def test_clear_options() -> None:
"""Clearing the options should also clear the selections."""
async with SelectionListApp().run_test() as pilot:
selections = pilot.app.query_one(SelectionList)
selections.clear_options()
assert selections.selected == [] |
6,262 | most common | import weakref
from _typeshed import Incomplete, Self, SupportsItems, SupportsKeysAndGetItem
from collections.abc import Callable, Generator, Hashable, Iterable, Iterator, Mapping
from typing import Any, Generic, TypeVar, overload
_KT = TypeVar("_KT")
_VT = TypeVar("_VT")
_T = TypeVar("_T")
PREV: int
NEXT: int
KEY: int
VALUE: int
DEFAULT_MAX_SIZE: int
class LRI(dict[_KT, _VT]):
hit_count: int
miss_count: int
soft_miss_count: int
max_size: int
on_miss: Callable[[_KT], _VT]
def __init__(self, max_size: int = 128, values: Incomplete | None = None, on_miss: Incomplete | None = None) -> None: ...
def __setitem__(self, key: _KT, value: _VT) -> None: ...
def __getitem__(self, key: _KT) -> _VT: ...
@overload
def get(self, key: _KT, default: None = None) -> _VT | None: ...
@overload
def get(self, key: _KT, default: _T) -> _T | _VT: ...
def __delitem__(self, key: _KT) -> None: ...
@overload
def pop(self, key: _KT) -> _VT: ...
@overload
def pop(self, key: _KT, default: _T) -> _T | _VT: ...
def popitem(self) -> tuple[_KT, _VT]: ...
def clear(self) -> None: ...
def copy(self: Self) -> Self: ...
@overload
def setdefault(self, key: _KT, default: None = None) -> _VT: ...
@overload
def setdefault(self, key: _KT, default: _VT) -> _VT: ...
def update(self, E: SupportsKeysAndGetItem[_KT, _VT] | Iterable[tuple[_KT, _VT]], **F: _VT) -> None: ... # type: ignore[override]
class LRU(LRI[_KT, _VT]):
def __getitem__(self, key: _KT) -> _VT: ...
def make_cache_key(
args: Iterable[Hashable],
kwargs: SupportsItems[Hashable, Hashable],
typed: bool = False,
kwarg_mark: object = ...,
fasttypes: frozenset[type] = ...,
): ...
class CachedFunction:
func: Incomplete
get_cache: Incomplete
scoped: Incomplete
typed: Incomplete
key_func: Incomplete
def __init__(self, func, cache, scoped: bool = True, typed: bool = False, key: Incomplete | None = None): ...
def __call__(self, *args, **kwargs): ...
class CachedMethod:
func: Incomplete
get_cache: Incomplete
scoped: Incomplete
typed: Incomplete
key_func: Incomplete
bound_to: Incomplete
def __init__(self, func, cache, scoped: bool = True, typed: bool = False, key: Incomplete | None = None): ...
def __get__(self, obj, objtype: Incomplete | None = None): ...
def __call__(self, *args, **kwargs): ...
def cached(cache: Mapping[Any, Any], scoped: bool = True, typed: bool = False, key: Incomplete | None = None): ...
def cachedmethod(cache, scoped: bool = True, typed: bool = False, key: Incomplete | None = None): ...
class cachedproperty(Generic[_T]):
func: Callable[[Incomplete], _T]
def __init__(self, func: Callable[[Incomplete], _T]) -> None: ...
def __get__(self, obj: _T, objtype: type | None = None): ...
class ThresholdCounter(Generic[_T]):
total: int
def __init__(self, threshold: float = 0.001) -> None: ...
@property
def threshold(self) -> float: ...
def add(self, key: _T) -> None: ...
def elements(self) -> Iterator[_T]: ...
def METHOD_NAME(self, n: int | None = None) -> list[tuple[_T, int]]: ...
def get_common_count(self) -> int: ...
def get_uncommon_count(self) -> int: ...
def get_commonality(self) -> float: ...
def __getitem__(self, key: _T) -> int: ...
def __len__(self) -> int: ...
def __contains__(self, key: _T) -> bool: ...
def iterkeys(self) -> Iterator[_T]: ...
def keys(self) -> list[_T]: ...
def itervalues(self) -> Generator[int, None, None]: ...
def values(self) -> list[int]: ...
def iteritems(self) -> Generator[tuple[_T, int], None, None]: ...
def items(self) -> list[tuple[_T, int]]: ...
def get(self, key: _T, default: int = 0) -> int: ...
def update(self, iterable: Iterable[_T] | Mapping[_T, int], **kwargs: Iterable[_T] | Mapping[_T, int]) -> None: ...
class MinIDMap(Generic[_T]):
mapping: weakref.WeakKeyDictionary[_T, int]
ref_map: dict[_T, int]
free: list[int]
def __init__(self) -> None: ...
def get(self, a: _T) -> int: ...
def drop(self, a: _T) -> None: ...
def __contains__(self, a: _T) -> bool: ...
def __iter__(self) -> Iterator[_T]: ...
def __len__(self) -> int: ...
def iteritems(self) -> Iterator[tuple[_T, int]]: ... |
6,263 | load | # -*- coding: utf-8 -*-
# vStream https://github.com/Kodi-vStream/venom-xbmc-addons
# jordigarnacho
import re
from resources.lib.gui.hoster import cHosterGui
from resources.lib.gui.gui import cGui
from resources.lib.handler.inputParameterHandler import cInputParameterHandler
from resources.lib.handler.outputParameterHandler import cOutputParameterHandler
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.parser import cParser
from resources.lib.comaddon import progress, siteManager
SITE_IDENTIFIER = 'mamcin'
SITE_NAME = 'Mamcin'
SITE_DESC = 'Plus belle la vie'
URL_MAIN = siteManager().getUrlMain(SITE_IDENTIFIER)
REPLAYTV_REPLAYTV = (True, 'load')
REPLAYTV_NEWS = (URL_MAIN, 'showMovies')
SERIE_NEWS = (URL_MAIN, 'showMovies')
SERIE_SERIES = (URL_MAIN, 'showMovies')
# loader function
def METHOD_NAME():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Plus Belle La Vie', 'news.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
# genre definition
def showGenres():
oGui = cGui()
liste = []
liste.append(['News', URL_MAIN + 'non-classe/'])
oOutputParameterHandler = cOutputParameterHandler()
for sTitle, sUrl in liste:
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
# function to extract episodes
def showMovies(sSearch=''):
oGui = cGui()
if sSearch:
sUrl = sSearch
else:
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
sPattern = 'class="featured-image"><a href="([^"]+)" title="([^"]+)"><img width=".+?" height=".+?" src="([^"]+)'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if not aResult[0]:
oGui.addText(SITE_IDENTIFIER)
if aResult[0]:
total = len(aResult[1])
progress_ = progress().VScreate(SITE_NAME)
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
# first post filter
if (str(aEntry[2]) != "https://www.mamcin.com/wp-content/uploads/2017/10/plus-belle-la-vie-episode-suivant-en-avance.jpg"):
sUrl = aEntry[0]
sTitle = aEntry[1]
sThumb = aEntry[2]
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sTitle, '', sThumb, '', oOutputParameterHandler)
progress_.VSclose(progress_)
sNextPage = __checkForNextPage(sHtmlContent)
if sNextPage:
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sNextPage)
sPaging = re.search('page/([0-9]+)', sNextPage).group(1)
oGui.addNext(SITE_IDENTIFIER, 'showMovies', 'Page ' + sPaging, oOutputParameterHandler)
if not sSearch:
oGui.setEndOfDirectory()
# search the next page
def __checkForNextPage(sHtmlContent):
oParser = cParser()
sPattern = '<li class="previous"><a href="([^"]+)"'
aResult = oParser.parse(sHtmlContent, sPattern)
if aResult[0]:
return aResult[1][0]
return False
# search hosts
def showHosters():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
oParser = cParser()
# add dailymotion sources
sPattern = '<iframe.+?src="(.+?)?logo=0&info=0"'
aResult = oParser.parse(sHtmlContent, sPattern)
if aResult[0]:
for aEntry in aResult[1]:
if not aEntry.startswith('http'):
sHosterUrl = 'https:' + aEntry
else:
sHosterUrl = aEntry
oHoster = cHosterGui().checkHoster(sHosterUrl)
if oHoster:
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)
# add sendvid sources
sPattern = '<(?:source|iframe).+?src="(.+?)" width'
aResult = oParser.parse(sHtmlContent, sPattern)
if aResult[0]:
for aEntry in aResult[1]:
sHosterUrl = aEntry
if not sHosterUrl.startswith('http'):
sHosterUrl = 'https:' + aEntry
oHoster = cHosterGui().checkHoster(sHosterUrl)
if oHoster:
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)
oGui.setEndOfDirectory() |
6,264 | f3 | import pytest
from dash import Dash, Input, Output, State, html
from dash.exceptions import InvalidCallbackReturnValue, IncorrectTypeException
def test_cbva001_callback_dep_types():
app = Dash(__name__)
app.layout = html.Div(
[
html.Div("child", id="in1"),
html.Div("state", id="state1"),
html.Div(id="out1"),
html.Div("child", id="in2"),
html.Div("state", id="state2"),
html.Div(id="out2"),
html.Div("child", id="in3"),
html.Div("state", id="state3"),
html.Div(id="out3"),
]
)
with pytest.raises(IncorrectTypeException) as err:
@app.callback(Input("in1", "children"), Output("out1", "children"))
def f2(i):
return i
pytest.fail("out-of-order args")
assert "Outputs first,\nthen all Inputs, then all States." in err.value.args[0]
assert "<Input `in1.children`>" in err.value.args[0]
assert "<Output `out1.children`>" in err.value.args[0]
# all OK with tuples
@app.callback(
(Output("out1", "children"),),
(Input("in1", "children"),),
(State("state1", "children"),),
)
def f1(i):
return i
# all OK with all args in single list
@app.callback(
Output("out2", "children"),
Input("in2", "children"),
State("state2", "children"),
)
def METHOD_NAME(i):
return i
# all OK with lists
@app.callback(
[Output("out3", "children")],
[Input("in3", "children")],
[State("state3", "children")],
)
def f4(i):
return i
def test_cbva002_callback_return_validation():
app = Dash(__name__)
app.layout = html.Div(
[
html.Div(id="a"),
html.Div(id="b"),
html.Div(id="c"),
html.Div(id="d"),
html.Div(id="e"),
html.Div(id="f"),
]
)
@app.callback(Output("b", "children"), [Input("a", "children")])
def single(a):
return set([1])
single_wrapped = app.callback_map["b.children"]["callback"]
with pytest.raises(InvalidCallbackReturnValue):
# outputs_list (normally callback_context.outputs_list) is provided
# by the dispatcher from the request.
single_wrapped("aaa", outputs_list={"id": "b", "property": "children"})
pytest.fail("not serializable")
@app.callback(
[Output("c", "children"), Output("d", "children")], [Input("a", "children")]
)
def multi(a):
return [1, set([2])]
multi_wrapped = app.callback_map["..c.children...d.children.."]["callback"]
with pytest.raises(InvalidCallbackReturnValue):
outputs_list = [
{"id": "c", "property": "children"},
{"id": "d", "property": "children"},
]
multi_wrapped("aaa", outputs_list=outputs_list)
pytest.fail("nested non-serializable")
@app.callback(
[Output("e", "children"), Output("f", "children")], [Input("a", "children")]
)
def multi2(a):
return ["abc"]
multi2_wrapped = app.callback_map["..e.children...f.children.."]["callback"]
with pytest.raises(InvalidCallbackReturnValue):
outputs_list = [
{"id": "e", "property": "children"},
{"id": "f", "property": "children"},
]
multi2_wrapped("aaa", outputs_list=outputs_list)
pytest.fail("wrong-length list")
def test_cbva003_list_single_output(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[html.Div("Hi", id="in"), html.Div(id="out1"), html.Div(id="out2")]
)
@app.callback(Output("out1", "children"), Input("in", "children"))
def o1(i):
return "1: " + i
@app.callback([Output("out2", "children")], [Input("in", "children")])
def o2(i):
return ("2: " + i,)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out1", "1: Hi")
dash_duo.wait_for_text_to_equal("#out2", "2: Hi")
@pytest.mark.parametrize("named_out", [True, False])
@pytest.mark.parametrize("named_in,named_state", [(True, True), (False, False)])
def test_cbva004_named_args(named_out, named_in, named_state, dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
html.Div("Hi", id="in"),
html.Div("gh", id="state"),
html.Div(id="out1"),
html.Div(id="out2"),
]
)
def make_args(*a):
args = []
kwargs = {}
names = ["output", "inputs", "state"]
flags = [named_out, named_in, named_state]
for ai, name, flag in zip(a, names, flags):
if flag:
kwargs[name] = ai
else:
args.append(ai)
return args, kwargs
args, kwargs = make_args(
Output("out1", "children"), Input("in", "children"), State("state", "children")
)
@app.callback(*args, **kwargs)
def o1(i, s):
return "1: " + i + s
args, kwargs = make_args(
[Output("out2", "children")],
[Input("in", "children")],
[State("state", "children")],
)
@app.callback(*args, **kwargs)
def o2(i, s):
return ("2: " + i + s,)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out1", "1: High")
dash_duo.wait_for_text_to_equal("#out2", "2: High")
def test_cbva005_tuple_args(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
html.Div("Yo", id="in1"),
html.Div("lo", id="in2"),
html.Div(id="out1"),
html.Div(id="out2"),
]
)
@app.callback(
Output("out1", "children"), (Input("in1", "children"), Input("in2", "children"))
)
def f(i1, i2):
return "1: " + i1 + i2
@app.callback(
(Output("out2", "children"),),
Input("in1", "children"),
(State("in2", "children"),),
)
def g(i1, i2):
return ("2: " + i1 + i2,)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out1", "1: Yolo")
dash_duo.wait_for_text_to_equal("#out2", "2: Yolo") |
6,265 | test set by scale factor | # Copyright (c) 2019 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
import unittest
import numpy
import pytest
from UM.Math.Matrix import Matrix
from UM.Math.Vector import Vector
import copy
class TestMatrix(unittest.TestCase):
def setUp(self):
# Called before the first testfunction is executed
pass
def tearDown(self):
# Called after the last testfunction was executed
pass
def test_setByQuaternion(self):
pass
def test_multiply(self):
temp_matrix = Matrix()
temp_matrix.setByTranslation(Vector(10,10,10))
temp_matrix2 = Matrix()
temp_matrix2.setByScaleFactor(0.5)
temp_matrix.multiply(temp_matrix2)
numpy.testing.assert_array_almost_equal(temp_matrix.getData(), numpy.array([[0.5,0,0,10],[0,0.5,0,10],[0,0,0.5,10],[0,0,0,1]]))
def test_multiplyCopy(self):
temp_matrix = Matrix()
temp_matrix.setByTranslation(Vector(10, 10, 10))
temp_matrix2 = Matrix()
temp_matrix2.setByScaleFactor(0.5)
result = temp_matrix.multiply(temp_matrix2, copy=True)
assert temp_matrix != result
numpy.testing.assert_array_almost_equal(result.getData(), numpy.array([[0.5, 0, 0, 10], [0, 0.5, 0, 10], [0, 0, 0.5, 10], [0, 0, 0, 1]]))
def test_preMultiply(self):
temp_matrix = Matrix()
temp_matrix.setByTranslation(Vector(10,10,10))
temp_matrix2 = Matrix()
temp_matrix2.setByScaleFactor(0.5)
temp_matrix.preMultiply(temp_matrix2)
numpy.testing.assert_array_almost_equal(temp_matrix.getData(), numpy.array([[0.5,0,0,5],[0,0.5,0,5],[0,0,0.5,5],[0,0,0,1]]))
def test_preMultiplyCopy(self):
temp_matrix = Matrix()
temp_matrix.setByTranslation(Vector(10,10,10))
temp_matrix2 = Matrix()
temp_matrix2.setByScaleFactor(0.5)
result = temp_matrix.preMultiply(temp_matrix2, copy = True)
assert result != temp_matrix
numpy.testing.assert_array_almost_equal(result.getData(), numpy.array([[0.5,0,0,5],[0,0.5,0,5],[0,0,0.5,5],[0,0,0,1]]))
def METHOD_NAME(self):
matrix = Matrix()
matrix.setByScaleFactor(0.5)
numpy.testing.assert_array_almost_equal(matrix.getData(), numpy.array([[0.5,0,0,0],[0,0.5,0,0],[0,0,0.5,0],[0,0,0,1]]))
assert matrix.getScale() == Vector(0.5, 0.5, 0.5)
def test_scaleByFactor(self):
matrix = Matrix()
matrix.scaleByFactor(2)
assert matrix.getScale() == Vector(2, 2, 2)
def test_setByRotation(self):
pass
def test_setByTranslation(self):
matrix = Matrix()
matrix.setByTranslation(Vector(0,1,0))
numpy.testing.assert_array_almost_equal(matrix.getData(), numpy.array([[1,0,0,0],[0,1,0,1],[0,0,1,0],[0,0,0,1]]))
def test_deepcopy(self):
matrix = Matrix()
# Set some data
matrix.setRow(1, [1, 2, 3])
matrix.setColumn(2, [3, 4, 5])
copied_matrix = copy.deepcopy(matrix)
assert copied_matrix == matrix
def test_compare(self):
matrix = Matrix()
matrix2 = Matrix()
assert matrix == matrix
assert not matrix == "zomg"
matrix._data = None
matrix2._data = None
assert matrix == matrix2
def test_translate(self):
matrix = Matrix()
matrix.translate(Vector(1, 1, 1))
assert matrix.getTranslation() == Vector(1, 1, 1)
matrix.translate(Vector(2, 3, 4))
assert matrix.getTranslation() == Vector(3, 4, 5)
def test_setToIdentity(self):
pass
def test_getData(self):
pass
def test_transposed(self):
temp_matrix = Matrix()
temp_matrix.setByTranslation(Vector(10,10,10))
temp_matrix = temp_matrix.getTransposed()
numpy.testing.assert_array_almost_equal(temp_matrix.getData(), numpy.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[10,10,10,1]]))
def test_dot(self):
pass
def test_invalidAt(self):
matrix = Matrix()
with pytest.raises(IndexError):
matrix.at(12, 13)
def test_invalidSetRow(self):
matrix = Matrix()
with pytest.raises(IndexError):
matrix.setRow(12, [1., 2., 3.])
matrix.setRow(-1, [2., 3., 4.])
def test_invalidSetColumn(self):
matrix = Matrix()
with pytest.raises(IndexError):
matrix.setColumn(12, [1., 2., 3.])
matrix.setColumn(-1, [2., 3., 4.] |
6,266 | test strategy stochasic depth | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.core import NeuralModule
from nemo.core.classes.mixins import AdapterModuleMixin, access_mixins, adapter_mixin_strategies, adapter_mixins
from nemo.utils import config_utils
class DefaultModule(NeuralModule):
def __init__(self):
super().__init__()
self.fc = torch.nn.Linear(50, 50)
self.bn = torch.nn.BatchNorm1d(50)
def forward(self, x):
x = self.fc(x)
x = self.bn(x)
out = x
return out
def num_params(self):
num: int = 0
for p in self.parameters():
if p.requires_grad:
num += p.numel()
return num
class DefaultModuleAdapter(DefaultModule, AdapterModuleMixin):
def forward(self, x):
x = super(DefaultModuleAdapter, self).forward(x)
if self.is_adapter_available():
# For testing purposes, cache the adapter names
self._adapter_names = self.get_enabled_adapters()
# call forward over model adapters, summing them up
x = self.forward_enabled_adapters(x)
return x
def get_adapter_cfg(in_features=50, dim=100, norm_pos='pre'):
cfg = {
'_target_': 'nemo.collections.common.parts.adapter_modules.LinearAdapter',
'in_features': in_features,
'dim': dim,
'norm_position': norm_pos,
}
return cfg
def get_classpath(cls):
return f'{cls.__module__}.{cls.__name__}'
if adapter_mixins.get_registered_adapter(DefaultModule) is None:
adapter_mixins.register_adapter(DefaultModule, DefaultModuleAdapter)
class TestAdapterStrategy:
@pytest.mark.unit
def test_ResidualAddAdapterStrategyConfig(self):
IGNORED_ARGS = ['_target_']
result = config_utils.assert_dataclass_signature_match(
adapter_mixin_strategies.ResidualAddAdapterStrategy,
adapter_mixin_strategies.ResidualAddAdapterStrategyConfig,
ignore_args=IGNORED_ARGS,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_strategy_default(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
module = DefaultModuleAdapter()
module.add_adapter(name='temp', cfg=get_adapter_cfg())
adapter = module.adapter_layer[module.get_enabled_adapters()[0]]
# update the strategy
adapter_strategy = adapter_mixin_strategies.ResidualAddAdapterStrategy()
adapter.adapter_strategy = adapter_strategy
with torch.no_grad():
assert adapter_strategy.stochastic_depth == 0.0
out = adapter_strategy.forward(x, adapter, module=module)
assert (out - x).abs().mean() < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('stochastic_depth', [0.0, 1.0])
def METHOD_NAME(self, stochastic_depth):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
module = DefaultModuleAdapter()
module.add_adapter(name='temp', cfg=get_adapter_cfg())
# extract adapter
adapter = module.adapter_layer[module.get_enabled_adapters()[0]]
# reinitialize the final layer of the adapter module (so that it is not zero init)
adapter.module[-1].weight.data += 1
# get just module output
module.set_enabled_adapters('temp', enabled=False)
module_out = module(x)
# get module + adapter output
module.set_enabled_adapters('temp', enabled=True)
module_adapter_out = module(x)
assert (
module_out - module_adapter_out
).abs().sum() > 0 # results should not be the same after adapter forward now
adapter_strategy = adapter_mixin_strategies.ResidualAddAdapterStrategy(stochastic_depth=stochastic_depth)
adapter.adapter_strategy = adapter_strategy
module.eval()
with torch.inference_mode(): # stochastic depth disabled, no grad tracking
assert adapter.adapter_strategy.stochastic_depth == stochastic_depth
out = adapter_strategy.forward(module_out, adapter, module=module)
assert (out - module_adapter_out).abs().mean() < 1e-5
module.train()
with torch.inference_mode(): # stochastic depth enabled, but no grad tracking during training mode
out = adapter_strategy.forward(module_out, adapter, module=module)
if stochastic_depth == 0.0:
check = module_adapter_out
else:
check = module_out
assert (out - check).abs().mean() < 1e-5
@pytest.mark.unit
def test_strategy_l2_lambda(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
module = DefaultModuleAdapter()
module.add_adapter(name='temp', cfg=get_adapter_cfg())
module.train()
adapter = module.adapter_layer[module.get_enabled_adapters()[0]]
# update the strategy
adapter_strategy = adapter_mixin_strategies.ResidualAddAdapterStrategy(l2_lambda=0.01)
adapter.adapter_strategy = adapter_strategy
with torch.no_grad():
access_mixins.AccessMixin.reset_registry(module)
assert access_mixins.AccessMixin.is_access_enabled() is False
assert adapter_strategy.stochastic_depth == 0.0
assert adapter_strategy.l2_lambda > 0.0
out = adapter_strategy.forward(x, adapter, module=module)
assert (out - x).abs().mean() < 1e-5
# extract losses
assert access_mixins.AccessMixin.is_access_enabled() is True
auxiliary_losses = access_mixins.AccessMixin.get_module_registry(module)
loss = list(auxiliary_losses.values())[0]
assert 'adapter_loss' in loss
assert loss['adapter_loss'][0] == torch.tensor(0.0) # initially adapter is 0 init, no loss required. |
6,267 | uniquify | import os
from fnmatch import fnmatch
import yaml
from django.conf import settings
DATA_URI_START = "<!--[if (!IE)|(gte IE 8)]><!-->"
DATA_URI_END = "<!--<![endif]-->"
MHTML_START = "<!--[if lte IE 7]>"
MHTML_END = "<![endif]-->"
class JammitAssets:
ASSET_FILENAME = 'assets.yml'
def __init__(self, assets_dir):
"""
Initializes the Jammit object by reading the assets.yml file and
stores all javascripts and stylesheets in memory for easy lookup
in templates.
"""
self.assets_dir = assets_dir
self.assets = self.read_assets()
def read_assets(self):
"""
Read the assets from the YAML and store it as a lookup dictionary.
"""
filepath = os.path.join(self.assets_dir, self.ASSET_FILENAME)
with open(filepath, 'r') as yaml_file:
return yaml.safe_load(yaml_file)
def render_tags(self, asset_type, asset_package):
"""
Returns rendered <script> and <link> tags for the given package name. Will
either be a single tag or a list of tags as a string, depending on
`use_compressed_assets` profile setting.
"""
tags = []
if not getattr(settings, 'DEBUG_ASSETS', settings.DEBUG):
if asset_type == 'javascripts':
asset_type_ext = 'js'
elif asset_type == 'stylesheets':
asset_type_ext = 'css'
if asset_type == 'javascripts':
tag = self.javascript_tag_compressed(asset_package, asset_type_ext)
elif asset_type == 'stylesheets':
tag = self.stylesheet_tag_compressed(asset_package, asset_type_ext)
tags.append(tag)
else:
patterns = self.assets[asset_type][asset_package]
for pattern in patterns:
paths = FileFinder.filefinder(pattern)
for path in paths:
if asset_type == 'javascripts':
tag = self.javascript_tag(path)
elif asset_type == 'stylesheets':
tag = self.stylesheet_tag(path)
tags.append(tag)
tags = self.METHOD_NAME(tags)
return '\n'.join(tags)
def render_code(self, asset_type, asset_package):
text = []
patterns = self.assets[asset_type][asset_package]
for pattern in patterns:
paths = FileFinder.filefinder(pattern)
for path in paths:
newsblur_dir = settings.NEWSBLUR_DIR
abs_filename = os.path.join(newsblur_dir, path)
f = open(abs_filename, 'r')
code = f.read()
if asset_type == 'stylesheets':
code = code.replace('\"', '\\"').replace('\n', ' ')
text.append(code)
return ''.join(text)
def METHOD_NAME(self, tags):
"""
Returns a uniquified list of script/link tags, preserving order.
"""
seen = set()
unique = []
for tag in tags:
if tag not in seen:
unique.append(tag)
seen.add(tag)
return unique
def javascript_tag(self, path):
return '<script src="/%s" type="text/javascript" charset="utf-8"></script>' % path
def javascript_tag_compressed(self, asset_package, asset_type_ext):
filename = 'static/%s.%s' % (asset_package, asset_type_ext)
asset_mtime = int(os.path.getmtime(filename))
path = '%s?%s' % (filename, asset_mtime)
return self.javascript_tag(path)
def stylesheet_tag(self, path):
return '<link rel="stylesheet" href="/%s" type="text/css" charset="utf-8">' % path
def stylesheet_tag_compressed(self, asset_package, asset_type_ext):
datauri_filename = 'static/%s-datauri.%s' % (asset_package, asset_type_ext)
original_filename = 'static/%s.%s' % (asset_package, asset_type_ext)
asset_mtime = int(os.path.getmtime(datauri_filename))
datauri_path = '%s?%s' % (datauri_filename, asset_mtime)
original_path = '%s?%s' % (original_filename, asset_mtime)
return '\n'.join([
DATA_URI_START,
self.stylesheet_tag(datauri_path),
DATA_URI_END,
MHTML_START,
self.stylesheet_tag(original_path),
MHTML_END,
])
class FileFinder:
@classmethod
def filefinder(cls, pattern):
paths = []
if '**' in pattern:
folder, wild, pattern = pattern.partition('/**/')
for f in cls.recursive_find_files(folder, pattern):
paths.append(f)
else:
folder, pattern = os.path.split(pattern)
for f in cls.find_files(folder, pattern):
# print f, paths
paths.append(f)
return paths
@classmethod
def recursive_find_files(cls, folder, pattern):
for root, dirs, files in os.walk(folder):
for f in files:
if fnmatch(f, pattern):
yield os.path.join(root, f)
@classmethod
def find_files(cls, folder, pattern):
listdir = os.listdir(folder)
listdir.sort()
for entry in listdir:
if not os.path.isdir(entry) and fnmatch(entry, pattern):
yield os.path.join(folder, entry) |
6,268 | inner raising func | # Copyright 2023, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reference counting tests for features of Python3.3 or higher.
These contain functions that do specific things, where we have a suspect
that references may be lost or corrupted. Executing them repeatedly and
checking the reference count is how they are used.
These are Python3 specific constructs, that will give a SyntaxError or
not be relevant on Python2.
"""
# While we use that for comparison code, no need to compile that.
# nuitka-project: --nofollow-import-to=nuitka
import os
import sys
# Find nuitka package relative to us.
sys.path.insert(
0,
os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")
),
)
# isort:start
import types
from nuitka.tools.testing.Common import (
executeReferenceChecked,
someGenerator,
someGeneratorRaising,
)
def simpleFunction1():
def abc(*, _exc=IOError):
pass
for _ in range(100):
abc()
def simpleFunction2():
def abc(*, exc=IOError):
raise ValueError from None
try:
abc()
except (ValueError, TypeError):
pass
def simpleFunction3():
try:
class ClassA(Exception):
pass
class ClassB(Exception):
pass
try:
raise ClassA("foo")
except ClassA as e1:
raise ClassB(str(e1)) from e1
except Exception: # different to Nuitka, pylint: disable=broad-except
pass
def simpleFunction4():
a = 1
def nonlocal_writer():
nonlocal a
for a in range(10): # false alarm, pylint: disable=unused-variable
pass
nonlocal_writer()
assert a == 9, a
def simpleFunction5():
x = 2
def local_func(_a: int, _b: x * x):
pass
local_func(x, x)
def simpleFunction6():
# Make sure exception state is cleaned up as soon as the except
# block is left.
class MyException(Exception):
def __init__(self, obj):
# This is on purpose not called, pylint: disable=super-init-not-called
self.obj = obj
class MyObj:
pass
def METHOD_NAME():
local_ref = obj
raise MyException(obj)
# "except" block raising another exception
obj = MyObj()
try:
try:
METHOD_NAME()
except:
raise KeyError
except KeyError as e: # on purpose, pylint: disable=unused-variable
pass
range_low = 0
range_high = 256
range_step = 13
def simpleFunction7():
# Make sure xranges work nicely
return range(range_low, range_high, range_step)
def simpleFunction8():
# Make sure xranges work nicely
return range(range_low, range_high)
def simpleFunction9():
# Make sure xranges work nicely
return range(range_high)
def simpleFunction10():
def f(_x: int) -> int:
pass
return f
def simpleFunction11():
try:
raise ImportError(path="lala", name="lele")
except ImportError as e:
assert e.name == "lele"
assert e.path == "lala"
def simpleFunction12():
def g():
for a in range(20):
yield a
def h():
yield 4
yield 5
yield 6
def f():
yield from g()
yield from h()
_x = list(f())
def simpleFunction13():
def g():
for a in range(20):
yield a
def h():
yield 4
yield 5
yield 6
raise TypeError
def f():
yield from g()
yield from h()
try:
_x = list(f())
except TypeError:
pass
# Broken iterator class.
class Broken:
def __iter__(self):
return self
def __next__(self):
return 1
def __getattr__(self, attr):
1 / 0 # pylint: disable=pointless-statement
def simpleFunction14():
def g():
yield from Broken()
try:
gi = g()
next(gi)
except Exception: # pylint: disable=broad-except
pass
def simpleFunction15():
def g():
yield from Broken()
try:
gi = g()
next(gi)
gi.throw(AttributeError)
except Exception: # pylint: disable=broad-except
pass
def simpleFunction16():
def g():
yield from (2, 3)
return list(g())
def simpleFunction17():
def g():
yield from (2, 3)
return 9
return list(g())
def simpleFunction18():
def g():
yield from (2, 3)
return 9, 8
return list(g())
def simpleFunction19():
def g():
x = someGenerator()
assert type(x) is types.GeneratorType
yield from x
gen = g()
next(gen)
try:
gen.throw(ValueError)
except ValueError:
pass
def simpleFunction20():
def g():
x = someGeneratorRaising()
assert type(x) is types.GeneratorType
yield from x
gen = g()
next(gen)
try:
next(gen)
except TypeError:
pass
class ClassIteratorBrokenClose:
def __init__(self):
self.my_iter = iter(range(2))
def __iter__(self):
return self
def next(self):
return next(self.my_iter)
def close(self):
raise TypeError(3)
__next__ = next
def simpleFunction21():
def g():
x = ClassIteratorBrokenClose()
yield from x
gen = g()
next(gen)
try:
gen.throw(GeneratorExit)
except TypeError:
pass
class ClassIteratorBrokenThrow:
def __init__(self):
self.my_iter = iter(range(2))
def __iter__(self):
return self
def next(self):
return next(self.my_iter)
def throw(self, *args):
raise TypeError(3)
__next__ = next
def simpleFunction22():
def g():
x = ClassIteratorBrokenThrow()
yield from x
gen = g()
next(gen)
try:
gen.throw(ValueError)
except GeneratorExit:
pass
except TypeError:
pass
class ClassIteratorRejectingThrow:
def __init__(self):
self.my_iter = iter(range(2))
def __iter__(self):
return self
def next(self):
return next(self.my_iter)
def throw(self, *args):
# This should not be subject normalize exceptions.
assert len(args) == 1, args
__next__ = next
# Lets have an exception that must not be instantiated.
class MyError(Exception):
def __init__(self):
# pylint: disable=super-init-not-called
assert False
def simpleFunction23():
def g():
x = ClassIteratorRejectingThrow()
yield from x
gen = g()
next(gen)
gen.throw(MyError)
oho = 1
def simpleFunction24():
def someGenerator():
yield from oho
try:
list(someGenerator())
except TypeError:
pass
# These need stderr to be wrapped.
tests_stderr = (14, 15)
# Disabled tests
tests_skipped = {}
result = executeReferenceChecked(
prefix="simpleFunction",
names=globals(),
tests_skipped=tests_skipped,
tests_stderr=tests_stderr,
)
sys.exit(0 if result else 1) |
6,269 | hash obj | """
Utilities for hashing objects.
"""
from __future__ import absolute_import, division
import base64
from io import IOBase
import pickle
from pickle import PickleError, PicklingError
import hashlib
import struct
from collections.abc import Iterable
from pkg_resources import resource_filename
import numpy as np
from pisa.utils.log import logging, set_verbosity
from pisa.utils.resources import find_resource
__all__ = [
'FAST_HASH_FILESIZE_BYTES',
'FAST_HASH_NDARRAY_ELEMENTS',
'FAST_HASH_STR_CHARS',
'hash_obj',
'hash_file',
'test_hash_obj',
'test_hash_file',
]
__author__ = 'J.L. Lanfranchi'
__license__ = '''Copyright (c) 2014-2017, The IceCube Collaboration
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
FAST_HASH_FILESIZE_BYTES = int(1e4)
"""For a fast hash on a file object, this many bytes of the file are used"""
FAST_HASH_NDARRAY_ELEMENTS = int(1e3)
"""For a fast hash on a numpy array or matrix, this many elements of the array
or matrix are used"""
FAST_HASH_STR_CHARS = int(1e3)
"""For a fast hash on a string (or object's pickle string representation), this
many characters are used"""
# NOTE: adding @line_profile decorator slows down function to order of 10s of
# ms even if set_verbosity(0)!
def METHOD_NAME(obj, hash_to='int', full_hash=True):
"""Return hash for an object. Object can be a numpy ndarray or matrix
(which is serialized to a string), an open file (which has its contents
read), or any pickle-able Python object.
Note that only the first most-significant 8 bytes (64 bits) from the MD5
sum are used in the hash.
Parameters
----------
obj : object
Object to hash. Note that the larger the object, the longer it takes to
hash.
hash_to : string
'i', 'int', or 'integer': First 8 bytes of the MD5 sum are interpreted
as an integer.
'b', 'bin', or 'binary': MD5 sum digest; returns an 8-character string
'h', 'x', 'hex': MD5 sum hexdigest, (string of 16 characters)
'b64', 'base64': first 8 bytes of MD5 sum are base64 encoded (with '+'
and '-' as final two characters of encoding). Returns string of 11
characters.
full_hash : bool
If True, hash on the full object's contents (which can be slow) or if
False, hash on a partial object. For example, only a file's first kB is
read, and only 1000 elements (chosen at random) of a numpy ndarray are
hashed on. This mode of operation should suffice for e.g. a
minimization run, but should _not_ be used for storing to/loading from
disk.
Returns
-------
hash_val : int or string
See also
--------
hash_file : hash a file on disk by filename/path
"""
if hash_to is None:
hash_to = 'int'
hash_to = hash_to.lower()
pass_on_kw = dict(hash_to=hash_to, full_hash=full_hash)
# TODO: convert an existing hash to the desired type, if it isn't already
# in this type
if hasattr(obj, 'hash') and obj.hash is not None and obj.hash == obj.hash:
return obj.hash
# Handle numpy arrays and matrices specially
if isinstance(obj, (np.ndarray, np.matrix)):
if full_hash:
return METHOD_NAME(obj.tostring(), **pass_on_kw)
len_flat = obj.size
stride = 1 + (len_flat // FAST_HASH_NDARRAY_ELEMENTS)
sub_elements = obj.flat[0::stride]
return METHOD_NAME(sub_elements.tostring(), **pass_on_kw)
# Handle an open file object as a special case
if isinstance(obj, IOBase):
if full_hash:
return METHOD_NAME(obj.read(), **pass_on_kw)
return METHOD_NAME(obj.read(FAST_HASH_FILESIZE_BYTES), **pass_on_kw)
# Convert to string (if not one already) in a fast and generic way: pickle;
# this creates a binary string, which is fine for sending to hashlib
if not isinstance(obj, str):
try:
pkl = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
except (PickleError, PicklingError, TypeError):
# Recurse into an iterable that couldn't be pickled
if isinstance(obj, Iterable):
return METHOD_NAME([METHOD_NAME(subobj) for subobj in obj],
**pass_on_kw)
else:
logging.error('Failed to pickle `obj` "%s" of type "%s"',
obj, type(obj))
raise
obj = pkl
if full_hash:
try:
md5hash = hashlib.md5(obj)
except TypeError:
md5hash = hashlib.md5(obj.encode())
else:
# Grab just a subset of the string by changing the stride taken in the
# character array (but if the string is less than
# FAST_HASH_FILESIZE_BYTES, use a stride length of 1)
stride = 1 + (len(obj) // FAST_HASH_STR_CHARS)
try:
md5hash = hashlib.md5(obj[0::stride])
except TypeError:
md5hash = hashlib.md5(obj[0::stride].encode())
if hash_to in ['i', 'int', 'integer']:
hash_val, = struct.unpack('<q', md5hash.digest()[:8])
elif hash_to in ['b', 'bin', 'binary']:
hash_val = md5hash.digest()[:8]
elif hash_to in ['h', 'x', 'hex', 'hexadecimal']:
hash_val = md5hash.hexdigest()[:16]
elif hash_to in ['b64', 'base64']:
hash_val = base64.b64encode(md5hash.digest()[:8], '+-')
else:
raise ValueError('Unrecognized `hash_to`: "%s"' % (hash_to,))
return hash_val
def hash_file(fname, hash_to=None, full_hash=True):
"""Return a hash for a file, passing contents through hash_obj function."""
resource = find_resource(fname)
with open(resource, 'rb') as f:
return METHOD_NAME(f, hash_to=hash_to, full_hash=full_hash)
def test_hash_obj():
"""Unit tests for `hash_obj` function"""
assert METHOD_NAME('x') == 3783177783470249117
assert METHOD_NAME('x', full_hash=False) == 3783177783470249117
assert METHOD_NAME('x', hash_to='hex') == '9dd4e461268c8034'
assert METHOD_NAME(object()) != METHOD_NAME(object)
for nel in [10, 100, 1000]:
rs = np.random.RandomState(seed=0)
a = rs.rand(nel, nel, 2)
a0_h_full = METHOD_NAME(a)
a0_h_part = METHOD_NAME(a, full_hash=False)
rs = np.random.RandomState(seed=1)
a = rs.rand(nel, nel, 2)
a1_h_full = METHOD_NAME(a)
a1_h_part = METHOD_NAME(a, full_hash=False)
rs = np.random.RandomState(seed=2)
a = rs.rand(nel, nel, 2)
a2_h_full = METHOD_NAME(a)
a2_h_part = METHOD_NAME(a, full_hash=False)
assert a1_h_full != a0_h_full
assert a2_h_full != a0_h_full
assert a2_h_full != a1_h_full
assert a1_h_part != a0_h_part
assert a2_h_part != a0_h_part
assert a2_h_part != a1_h_part
logging.info('<< PASS : test_hash_obj >>')
# TODO: test_hash_file function requires a "standard" file to test on
def test_hash_file():
"""Unit tests for `hash_file` function"""
file_hash = hash_file(resource_filename('pisa.utils', 'hash.py'))
logging.debug(file_hash)
file_hash = hash_file(resource_filename('pisa.utils', 'hash.py'),
full_hash=False)
logging.debug(file_hash)
logging.info('<< PASS : test_hash_file >>')
if __name__ == "__main__":
set_verbosity(1)
test_hash_obj()
test_hash_file() |
6,270 | pypi name munger | import os
from itertools import permutations
EXTS = [".tar.gz", ".zip", ".tar", ".tar.bz2", ".tar.xz", ".tgz"]
def _ext_munger(url):
for old_ext, new_ext in permutations(EXTS, 2):
if url.endswith(old_ext):
yield url[: -len(old_ext)] + new_ext
def _jinja2_munger_factory(field):
def _jinja_munger(url):
# the '<' are from ruamel.yaml.jinja2
# if the variable is '{{version}}'
# it comes out in the url as '<{version}}' after
# parsing so we allow for that too
for spc in ["", " "]:
fs = field + spc
curr = "<{%s}}" % fs
not_curr = "<<{%s}}" % fs
new = "{{ %s }}" % field
if curr in url and not_curr not in url:
yield url.replace(curr, new)
for spc in ["", " "]:
fs = field + spc
curr = "<<{%s}}" % fs
new = "{{ %s }}" % field
if curr in url:
yield url.replace(curr, new)
for spc in ["", " "]:
fs = field + spc
curr = "{{%s}}" % fs
new = "{{ %s }}" % field
if curr in url:
yield url.replace(curr, new)
return _jinja_munger
def _v_munger(url):
for vhave, vrep in permutations(["v{{ v", "{{ v"]):
if vhave in url and (vrep in vhave or vrep not in url):
yield url.replace(vhave, vrep)
def _pypi_domain_munger(url):
for old_d, new_d in permutations(
["https://pypi.io", "https://files.pythonhosted.org"],
2,
):
yield url.replace(old_d, new_d, 1)
def METHOD_NAME(url):
bn = os.path.basename(url)
if (
url.startswith("https://pypi.io")
or url.startswith("https://files.pythonhosted.org")
) and ("{{ version }}" in bn and "{{ name" not in bn):
yield os.path.join(os.path.dirname(url), "{{ name }}-{{ version }}.tar.gz")
yield url
def _pypi_munger(url):
names = [
[
"{{ name }}",
(
"{{ name.replace('_', '-') }}",
'{{ name.replace("_", "-") }}',
"{{ name.replace('_','-') }}",
'{{ name.replace("_","-") }}',
"{{ name|replace('_', '-') }}",
'{{ name|replace("_", "-") }}',
"{{ name|replace('_','-') }}",
'{{ name|replace("_","-") }}',
),
],
[
"{{ name }}",
(
"{{ name.replace('-', '_') }}",
'{{ name.replace("-", "_") }}',
"{{ name.replace('-','_') }}",
'{{ name.replace("-","_") }}',
"{{ name|replace('-', '_') }}",
'{{ name|replace("-", "_") }}',
"{{ name|replace('-','_') }}",
'{{ name|replace("-","_") }}',
),
],
]
if "/pypi." in url or "/files.pythonhosted.org" in url:
burl, eurl = url.rsplit("/", 1)
for _names in names:
for vhave, vrep in permutations(_names, 2):
if isinstance(vhave, tuple):
for _v in vhave:
if _v in eurl:
yield burl + "/" + eurl.replace(_v, vrep)
elif vhave in eurl:
assert isinstance(vrep, tuple)
yield burl + "/" + eurl.replace(vhave, vrep[0])
def _github_munger(url):
names = ["/releases/download/v{{ version }}/", "/archive/"]
if "github.com" in url:
burl, eurl = url.rsplit("/", 1)
burl = burl + "/"
for ghave, grep in permutations(names, 2):
if ghave in url:
if ghave == "/archive/":
yield burl.replace(ghave, grep) + "{{ name }}-" + eurl
else:
yield (burl.replace(ghave, grep) + eurl.replace("{{ name }}-", ""))
def _gen_new_urls(url, mungers):
if len(mungers) > 0:
# ignore last one
yield from _gen_new_urls(url, mungers[:-1])
# use it and continue
for new_url in mungers[-1](url):
yield from _gen_new_urls(new_url, mungers[:-1])
else:
yield url
def gen_transformed_urls(url):
"""Generate transformed urls for common variants.
Parameters
----------
url : str
The URL to transform.
"""
yield from _gen_new_urls(
url,
[
_ext_munger,
_v_munger,
_jinja2_munger_factory("name"),
_jinja2_munger_factory("version"),
_jinja2_munger_factory("name[0]"),
_pypi_munger,
_pypi_domain_munger,
METHOD_NAME,
_github_munger,
],
) |
6,271 | write table style info | ###############################################################################
#
# Table - A class for writing the Excel XLSX Worksheet file.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright 2013-2023, John McNamara, jmcnamara@cpan.org
#
from . import xmlwriter
class Table(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Table file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(Table, self).__init__()
self.properties = {}
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
# Write the table element.
self._write_table()
# Write the autoFilter element.
self._write_auto_filter()
# Write the tableColumns element.
self._write_table_columns()
# Write the tableStyleInfo element.
self.METHOD_NAME()
# Close the table tag.
self._xml_end_tag("table")
# Close the file.
self._xml_close()
def _set_properties(self, properties):
# Set the document properties.
self.properties = properties
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_table(self):
# Write the <table> element.
schema = "http://schemas.openxmlformats.org/"
xmlns = schema + "spreadsheetml/2006/main"
table_id = self.properties["id"]
name = self.properties["name"]
display_name = self.properties["name"]
ref = self.properties["range"]
totals_row_shown = self.properties["totals_row_shown"]
header_row_count = self.properties["header_row_count"]
attributes = [
("xmlns", xmlns),
("id", table_id),
("name", name),
("displayName", display_name),
("ref", ref),
]
if not header_row_count:
attributes.append(("headerRowCount", 0))
if totals_row_shown:
attributes.append(("totalsRowCount", 1))
else:
attributes.append(("totalsRowShown", 0))
self._xml_start_tag("table", attributes)
def _write_auto_filter(self):
# Write the <autoFilter> element.
autofilter = self.properties.get("autofilter", 0)
if not autofilter:
return
attributes = [
(
"ref",
autofilter,
)
]
self._xml_empty_tag("autoFilter", attributes)
def _write_table_columns(self):
# Write the <tableColumns> element.
columns = self.properties["columns"]
count = len(columns)
attributes = [("count", count)]
self._xml_start_tag("tableColumns", attributes)
for col_data in columns:
# Write the tableColumn element.
self._write_table_column(col_data)
self._xml_end_tag("tableColumns")
def _write_table_column(self, col_data):
# Write the <tableColumn> element.
attributes = [
("id", col_data["id"]),
("name", col_data["name"]),
]
if col_data.get("total_string"):
attributes.append(("totalsRowLabel", col_data["total_string"]))
elif col_data.get("total_function"):
attributes.append(("totalsRowFunction", col_data["total_function"]))
if "format" in col_data and col_data["format"] is not None:
attributes.append(("dataDxfId", col_data["format"]))
if col_data.get("formula"):
self._xml_start_tag("tableColumn", attributes)
# Write the calculatedColumnFormula element.
self._write_calculated_column_formula(col_data["formula"])
self._xml_end_tag("tableColumn")
else:
self._xml_empty_tag("tableColumn", attributes)
def METHOD_NAME(self):
# Write the <tableStyleInfo> element.
props = self.properties
attributes = []
name = props["style"]
show_first_column = 0 + props["show_first_col"]
show_last_column = 0 + props["show_last_col"]
show_row_stripes = 0 + props["show_row_stripes"]
show_column_stripes = 0 + props["show_col_stripes"]
if name is not None and name != "" and name != "None":
attributes.append(("name", name))
attributes.append(("showFirstColumn", show_first_column))
attributes.append(("showLastColumn", show_last_column))
attributes.append(("showRowStripes", show_row_stripes))
attributes.append(("showColumnStripes", show_column_stripes))
self._xml_empty_tag("tableStyleInfo", attributes)
def _write_calculated_column_formula(self, formula):
# Write the <calculatedColumnFormula> element.
self._xml_data_element("calculatedColumnFormula", formula) |
6,272 | remote source | """Create models with a set of default valid properties, to avoid
changes forcing an update of all test code.
"""
import os
import uuid
from datetime import datetime
from itertools import cycle
from typing import List
from sdclientapi import Reply as SDKReply
from sdclientapi import Source as SDKSource
from sdclientapi import Submission as SDKSubmission
from sdclientapi import User as SDKUser
from securedrop_client import db
from securedrop_client.api_jobs.base import ApiJob
SOURCE_COUNT = 0
MESSAGE_COUNT = 0
FILE_COUNT = 0
REPLY_COUNT = 0
DRAFT_REPLY_COUNT = 0
REPLY_SEND_STATUS_COUNT = 0
USER_COUNT = 0
def User(**attrs):
global USER_COUNT
USER_COUNT += 1
defaults = dict(
uuid="user-uuid-{}".format(USER_COUNT),
username="test-user-id-{}".format(USER_COUNT),
firstname="slim",
lastname="shady",
)
defaults.update(attrs)
return db.User(**defaults)
def Source(**attrs):
with open(os.path.join(os.path.dirname(__file__), "files", "test-key.gpg.pub.asc")) as f:
pub_key = f.read()
global SOURCE_COUNT
SOURCE_COUNT += 1
defaults = dict(
uuid="source-uuid-{}".format(SOURCE_COUNT),
journalist_designation="testy-mctestface",
is_flagged=False,
public_key=pub_key,
fingerprint="B2FF7FB28EED8CABEBC5FB6C6179D97BCFA52E5F",
interaction_count=0,
is_starred=False,
last_updated=datetime.now(),
document_count=0,
)
defaults.update(attrs)
return db.Source(**defaults)
def Message(**attrs):
global MESSAGE_COUNT
MESSAGE_COUNT += 1
defaults = dict(
uuid="msg-uuid-{}".format(MESSAGE_COUNT),
filename="{}-msg.gpg".format(MESSAGE_COUNT),
size=123,
download_url="http://wat.onion/abc",
is_decrypted=True,
is_downloaded=True,
content="content",
)
defaults.update(attrs)
return db.Message(**defaults)
def Reply(**attrs):
global REPLY_COUNT
REPLY_COUNT += 1
defaults = dict(
uuid="reply-uuid-{}".format(REPLY_COUNT),
filename="{}-reply.gpg".format(REPLY_COUNT),
size=123,
is_decrypted=True,
is_downloaded=True,
content="content",
)
defaults.update(attrs)
return db.Reply(**defaults)
def DraftReply(**attrs):
global DRAFT_REPLY_COUNT
DRAFT_REPLY_COUNT += 1
defaults = dict(
uuid="draft-reply-uuid-{}".format(DRAFT_REPLY_COUNT),
timestamp=datetime.utcnow(),
source_id=1,
journalist_id=1,
file_counter=1,
content="content",
send_status_id=1,
)
defaults.update(attrs)
return db.DraftReply(**defaults)
def ReplySendStatus(**attrs):
global REPLY_SEND_STATUS_COUNT
REPLY_SEND_STATUS_COUNT += 1
defaults = dict(name=db.ReplySendStatusCodes.PENDING.value)
defaults.update(attrs)
return db.ReplySendStatus(**defaults)
def File(**attrs):
global FILE_COUNT
FILE_COUNT += 1
defaults = dict(
uuid="file-uuid-{}".format(FILE_COUNT),
filename="{}-doc.gz.gpg".format(FILE_COUNT),
size=123,
download_url="http://wat.onion/abc",
is_decrypted=True,
is_downloaded=True,
)
defaults.update(attrs)
return db.File(**defaults)
def dummy_job_factory(mocker, return_value, **kwargs):
"""
Factory that creates dummy `ApiJob`s to DRY up test code.
"""
class DummyApiJob(ApiJob):
success_signal = mocker.MagicMock()
failure_signal = mocker.MagicMock()
def __init__(self, *nargs, **kwargs):
super().__init__(*nargs, **kwargs)
if isinstance(return_value, List):
self.return_value = iter(return_value)
else:
self.return_value = cycle([return_value])
def call_api(self, api_client, session):
return_value = next(self.return_value)
if isinstance(return_value, Exception):
raise return_value
else:
return return_value
return DummyApiJob
def RemoteUser(**attrs):
defaults = dict(
uuid=str(uuid.uuid4()), username="dellsberg", first_name="Daniel", last_name="Ellsberg"
)
defaults.update(attrs)
return SDKUser(**defaults)
def METHOD_NAME(**attrs):
with open(os.path.join(os.path.dirname(__file__), "files", "test-key.gpg.pub.asc")) as f:
pub_key = f.read()
defaults = dict(
add_star_url="foo",
interaction_count=0,
is_flagged=False,
is_starred=True,
journalist_designation="testerino testy-mctestface",
key={"public": pub_key, "fingerprint": "B2FF7FB28EED8CABEBC5FB6C6179D97BCFA52E5F"},
last_updated=datetime.now().isoformat(),
number_of_documents=0,
number_of_messages=0,
remove_star_url="baz",
replies_url="qux",
submissions_url="wibble",
url="url",
uuid=str(uuid.uuid4()),
seen_by=None,
)
defaults.update(attrs)
return SDKSource(**defaults)
def RemoteReply(**attrs):
source_url = "/api/v1/sources/{}".format(str(uuid.uuid4()))
defaults = dict(
filename="1-reply.filename",
journalist_uuid=str(uuid.uuid4()),
journalist_username="test",
journalist_first_name="",
journalist_last_name="",
file_counter=1,
is_deleted_by_source=False,
reply_url="test",
size=1234,
uuid=str(uuid.uuid4()),
source_url=source_url,
seen_by=[],
)
defaults.update(attrs)
return SDKReply(**defaults)
def RemoteFile(**attrs):
global FILE_COUNT
FILE_COUNT += 1
src_uuid = str(uuid.uuid4())
defaults = dict(
uuid="file-uuid-{}".format(FILE_COUNT),
filename="{}-doc.gz.gpg".format(FILE_COUNT),
source_uuid=src_uuid,
download_url="test",
submission_url="test",
is_read=False,
file_counter=FILE_COUNT,
is_deleted_by_source=False,
reply_url="test",
size=1234,
is_decrypted=True,
is_downloaded=True,
source_url=f"/api/v1/sources/{src_uuid}",
seen_by=[],
)
defaults.update(attrs)
return SDKSubmission(**defaults)
def RemoteMessage(**attrs):
global MESSAGE_COUNT
MESSAGE_COUNT += 1
src_uuid = str(uuid.uuid4())
defaults = dict(
uuid="msg-uuid-{}".format(MESSAGE_COUNT),
filename="{}-msg.gpg".format(MESSAGE_COUNT),
source_uuid=src_uuid,
download_url="test",
submission_url="test",
is_read=False,
file_counter=MESSAGE_COUNT,
is_deleted_by_source=False,
reply_url="test",
size=1234,
is_decrypted=True,
is_downloaded=True,
source_url=f"/api/v1/sources/{src_uuid}",
seen_by=[],
)
defaults.update(attrs)
return SDKSubmission(**defaults) |
6,273 | get supervisors by cluster web api | # -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
from typing import AsyncGenerator, Dict, List, Optional, TypeVar
from ...services.cluster import WebClusterAPI
from ...services.cluster.backends import (
register_cluster_backend,
AbstractClusterBackend,
)
from ...services.cluster.core import NodeRole
from ..utils import wait_all_supervisors_ready, next_in_thread
from .config import MarsReplicationConfig
logger = logging.getLogger(__name__)
RetType = TypeVar("RetType")
@register_cluster_backend
class K8SClusterBackend(AbstractClusterBackend):
name = "k8s"
def __init__(
self, node_role=None, pool_address=None, k8s_config=None, k8s_namespace=None
):
from kubernetes import client
self._node_role = node_role
self._pool_address = pool_address
self._k8s_config = k8s_config
verify_ssl = bool(int(os.environ.get("KUBE_VERIFY_SSL", "1")))
if not verify_ssl:
c = client.Configuration()
c.verify_ssl = False
client.Configuration.set_default(c)
self._k8s_namespace = (
k8s_namespace or os.environ.get("MARS_K8S_POD_NAMESPACE") or "default"
)
self._service_name = os.environ.get("MARS_K8S_SERVICE_NAME")
self._full_label_selector = None
self._client = client.CoreV1Api(client.ApiClient(self._k8s_config))
self._pod_to_ep = dict()
@classmethod
async def create(
cls, node_role: NodeRole, lookup_address: Optional[str], pool_address: str
) -> "AbstractClusterBackend":
from kubernetes import config, client
if lookup_address is None:
k8s_namespace = None
k8s_config = config.load_incluster_config()
else:
address_parts = lookup_address.rsplit("?", 1)
k8s_namespace = None if len(address_parts) == 1 else address_parts[1]
k8s_config = client.Configuration()
if "://" in address_parts[0]:
k8s_config.host = address_parts[0]
else:
config.load_kube_config(
address_parts[0], client_configuration=k8s_config
)
return cls(node_role, pool_address, k8s_config, k8s_namespace)
def __reduce__(self):
return (
type(self),
(
self._node_role,
self._pool_address,
self._k8s_config,
self._k8s_namespace,
),
)
@staticmethod
def _format_endpoint_query_result(result: Dict, filter_ready: bool = True):
port = os.environ["MARS_K8S_SERVICE_PORT"]
endpoints = [
f"{addr['ip']}:{port}" for addr in result["subsets"][0]["addresses"] or []
]
if not filter_ready:
endpoints = [
f"{addr['ip']}:{port}"
for addr in result["subsets"][0]["not_ready_addresses"] or []
]
return endpoints
def _get_web_cluster_api(self):
supervisor_web_port = os.environ["MARS_K8S_SUPERVISOR_WEB_PORT"]
web_url = (
f"http://{self._service_name}.{self._k8s_namespace}:{supervisor_web_port}"
)
api = WebClusterAPI(web_url)
return api
async def _watch_supervisors_by_service_api(
self,
) -> AsyncGenerator[List[str], None]:
from urllib3.exceptions import ReadTimeoutError
from kubernetes.watch import Watch as K8SWatch
w = K8SWatch()
while True:
streamer = w.stream(
self._client.list_namespaced_endpoints,
namespace=self._k8s_namespace,
label_selector=f"mars/service-name={self._service_name}",
timeout_seconds=60,
)
while True:
try:
event = await next_in_thread(streamer)
obj_dict = event["object"].to_dict()
yield self._format_endpoint_query_result(obj_dict)
except (ReadTimeoutError, StopAsyncIteration):
break
except: # noqa: E722 # pragma: no cover # pylint: disable=bare-except
logger.exception("Unexpected error when watching on kubernetes")
break
async def _watch_supervisors_by_cluster_web_api(self):
while True:
try:
api = self._get_web_cluster_api()
async for supervisors in api.watch_supervisors():
yield supervisors
except (OSError, asyncio.TimeoutError):
pass
async def _get_supervisors_by_service_api(
self, filter_ready: bool = True
) -> List[str]:
result = (
await asyncio.to_thread(
self._client.read_namespaced_endpoints,
name=self._service_name,
namespace=self._k8s_namespace,
)
).to_dict()
return self._format_endpoint_query_result(result, filter_ready=filter_ready)
async def METHOD_NAME(self, filter_ready: bool = True):
api = self._get_web_cluster_api()
try:
supervisors = await api.get_supervisors(filter_ready=filter_ready)
return supervisors
except (OSError, asyncio.TimeoutError): # pragma: no cover
return []
async def get_supervisors(self, filter_ready: bool = True) -> List[str]:
if self._node_role == NodeRole.SUPERVISOR:
return await self._get_supervisors_by_service_api(filter_ready)
else:
return await self.METHOD_NAME(filter_ready)
async def watch_supervisors(self) -> AsyncGenerator[List[str], None]:
if self._node_role == NodeRole.SUPERVISOR:
watch_fun = self._watch_supervisors_by_service_api
else:
watch_fun = self._watch_supervisors_by_cluster_web_api
try:
async for supervisors in watch_fun():
yield supervisors
except asyncio.CancelledError:
pass
async def request_worker(
self, worker_cpu: int = None, worker_mem: int = None, timeout: int = None
) -> str:
raise NotImplementedError
async def release_worker(self, address: str):
raise NotImplementedError
async def reconstruct_worker(self, address: str):
raise NotImplementedError
class K8SServiceMixin:
@staticmethod
def write_pid_file():
with open("/tmp/mars-service.pid", "w") as pid_file:
pid_file.write(str(os.getpid()))
async def wait_all_supervisors_ready(self):
"""
Wait till all containers are ready
"""
await wait_all_supervisors_ready(self.args.endpoint)
async def start_readiness_server(self):
readiness_port = os.environ.get(
"MARS_K8S_READINESS_PORT", MarsReplicationConfig.default_readiness_port
)
self._readiness_server = await asyncio.start_server(
lambda r, w: None, port=readiness_port
)
async def stop_readiness_server(self):
self._readiness_server.close()
await self._readiness_server.wait_closed() |
6,274 | before post | from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
from app.api.bootstrap import api
from app.api.custom_placeholders import CustomPlaceholder
from app.api.helpers.db import safe_query_kwargs
from app.api.helpers.errors import ForbiddenError
from app.api.helpers.permission_manager import has_access
from app.api.helpers.utilities import require_relationship
from app.api.schema.event_sub_topics import EventSubTopicSchema
from app.models import db
from app.models.event import Event
from app.models.event_sub_topic import EventSubTopic
from app.models.event_topic import EventTopic
class EventSubTopicListPost(ResourceList):
"""
Create event sub topics
"""
def METHOD_NAME(self, args, kwargs, data):
"""
before post method to check for required relationship and proper permission
:param args:
:param kwargs:
:param data:
:return:
"""
require_relationship(['event_topic'], data)
if not has_access('is_admin'):
raise ForbiddenError({'source': ''}, 'Admin access is required.')
view_kwargs = True
methods = [
'POST',
]
schema = EventSubTopicSchema
data_layer = {'session': db.session, 'model': EventSubTopic}
class EventSubTopicList(ResourceList):
"""
List event sub topics
"""
def query(self, view_kwargs):
"""
query method for event sub-topics list
:param view_kwargs:
:return:
"""
query_ = self.session.query(EventSubTopic)
if view_kwargs.get('event_topic_id'):
event_topic = safe_query_kwargs(EventTopic, view_kwargs, 'event_topic_id')
query_ = query_.join(EventTopic).filter(EventTopic.id == event_topic.id)
return query_
view_kwargs = True
methods = [
'GET',
]
schema = EventSubTopicSchema
data_layer = {
'session': db.session,
'model': EventSubTopic,
'methods': {'query': query},
}
class EventSubTopicDetail(ResourceDetail):
"""
Event sub topic detail by id
"""
def before_get_object(self, view_kwargs):
"""
before get method to get the resource id to fetch details
:param view_kwargs:
:return:
"""
if view_kwargs.get('event_identifier'):
event = safe_query_kwargs(
Event, view_kwargs, 'event_identifier', 'identifier'
)
view_kwargs['event_id'] = event.id
if view_kwargs.get('event_id'):
event = safe_query_kwargs(Event, view_kwargs, 'event_id')
if event.event_sub_topic_id:
view_kwargs['id'] = event.event_sub_topic_id
else:
view_kwargs['id'] = None
if view_kwargs.get('custom_placeholder_id'):
custom_placeholder = safe_query_kwargs(
CustomPlaceholder,
view_kwargs,
'custom_placeholder_id',
)
if custom_placeholder.event_sub_topic_id:
view_kwargs['id'] = custom_placeholder.event_sub_topic_id
else:
view_kwargs['id'] = None
decorators = (api.has_permission('is_admin', methods="PATCH,DELETE"),)
schema = EventSubTopicSchema
data_layer = {
'session': db.session,
'model': EventSubTopic,
'methods': {'before_get_object': before_get_object},
}
class EventSubTopicRelationshipRequired(ResourceRelationship):
"""
Event sub topic Relationship
"""
decorators = (api.has_permission('is_admin', methods="PATCH"),)
methods = ['GET', 'PATCH']
schema = EventSubTopicSchema
data_layer = {'session': db.session, 'model': EventSubTopic}
class EventSubTopicRelationshipOptional(ResourceRelationship):
"""
Event sub topic Relationship
"""
decorators = (api.has_permission('is_admin', methods="PATCH,DELETE"),)
schema = EventSubTopicSchema
data_layer = {'session': db.session, 'model': EventSubTopic} |
6,275 | create | import typing
import uuid
from datetime import datetime
import simplejson as json
from django.db import models
from django.utils import timezone
from task_processor.exceptions import TaskProcessingError
from task_processor.managers import RecurringTaskManager, TaskManager
from task_processor.task_registry import registered_tasks
class AbstractBaseTask(models.Model):
uuid = models.UUIDField(unique=True, default=uuid.uuid4)
created_at = models.DateTimeField(auto_now_add=True)
task_identifier = models.CharField(max_length=200)
serialized_args = models.TextField(blank=True, null=True)
serialized_kwargs = models.TextField(blank=True, null=True)
is_locked = models.BooleanField(default=False)
class Meta:
abstract = True
@property
def args(self) -> typing.List[typing.Any]:
if self.serialized_args:
return self.deserialize_data(self.serialized_args)
return []
@property
def kwargs(self) -> typing.Dict[str, typing.Any]:
if self.serialized_kwargs:
return self.deserialize_data(self.serialized_kwargs)
return {}
@staticmethod
def serialize_data(data: typing.Any):
# TODO: add datetime support if needed
return json.dumps(data)
@staticmethod
def deserialize_data(data: typing.Any):
return json.loads(data)
def mark_failure(self):
self.unlock()
def mark_success(self):
self.unlock()
def unlock(self):
self.is_locked = False
def run(self):
return self.callable(*self.args, **self.kwargs)
@property
def callable(self) -> typing.Callable:
try:
return registered_tasks[self.task_identifier]
except KeyError as e:
raise TaskProcessingError(
"No task registered with identifier '%s'. Ensure your task is "
"decorated with @register_task_handler.",
self.task_identifier,
) from e
class Task(AbstractBaseTask):
scheduled_for = models.DateTimeField(blank=True, null=True, default=timezone.now)
# denormalise failures and completion so that we can use select_for_update
num_failures = models.IntegerField(default=0)
completed = models.BooleanField(default=False)
objects = TaskManager()
class Meta:
# We have customised the migration in 0004 to only apply this change to postgres databases
# TODO: work out how to index the taskprocessor_task table for Oracle and MySQL
indexes = [
models.Index(
name="incomplete_tasks_idx",
fields=["scheduled_for"],
condition=models.Q(completed=False, num_failures__lt=3),
)
]
@classmethod
def METHOD_NAME(
cls,
task_identifier: str,
*,
args: typing.Tuple[typing.Any] = None,
kwargs: typing.Dict[str, typing.Any] = None,
) -> "Task":
return Task(
task_identifier=task_identifier,
serialized_args=cls.serialize_data(args or tuple()),
serialized_kwargs=cls.serialize_data(kwargs or dict()),
)
@classmethod
def schedule_task(
cls,
schedule_for: datetime,
task_identifier: str,
*,
args: typing.Tuple[typing.Any] = None,
kwargs: typing.Dict[str, typing.Any] = None,
) -> "Task":
task = cls.METHOD_NAME(
task_identifier=task_identifier,
args=args,
kwargs=kwargs,
)
task.scheduled_for = schedule_for
return task
def mark_failure(self):
super().mark_failure()
self.num_failures += 1
def mark_success(self):
super().mark_success()
self.completed = True
class RecurringTask(AbstractBaseTask):
run_every = models.DurationField()
first_run_time = models.TimeField(blank=True, null=True)
objects = RecurringTaskManager()
class Meta:
constraints = [
models.UniqueConstraint(
fields=["task_identifier", "run_every"],
name="unique_run_every_tasks",
),
]
@property
def should_execute(self) -> bool:
now = timezone.now()
last_task_run = self.task_runs.order_by("-started_at").first()
if not last_task_run:
# If we have never run this task, then we should execute it only if
# the time has passed after which we want to ensure this task runs.
# This allows us to control when intensive tasks should be run.
return not (self.first_run_time and self.first_run_time > now.time())
# if the last run was at t- run_every, then we should execute it
if (timezone.now() - last_task_run.started_at) >= self.run_every:
return True
# if the last run was not a success and we do not have
# more than 3 failures in t- run_every, then we should execute it
if (
last_task_run.result != TaskResult.SUCCESS.name
and self.task_runs.filter(started_at__gte=(now - self.run_every)).count()
<= 3
):
return True
# otherwise, we should not execute it
return False
@property
def is_task_registered(self) -> bool:
return self.task_identifier in registered_tasks
class TaskResult(models.Choices):
SUCCESS = "SUCCESS"
FAILURE = "FAILURE"
class AbstractTaskRun(models.Model):
started_at = models.DateTimeField()
finished_at = models.DateTimeField(blank=True, null=True)
result = models.CharField(
max_length=50, choices=TaskResult.choices, blank=True, null=True, db_index=True
)
error_details = models.TextField(blank=True, null=True)
class Meta:
abstract = True
class TaskRun(AbstractTaskRun):
task = models.ForeignKey(Task, on_delete=models.CASCADE, related_name="task_runs")
class RecurringTaskRun(AbstractTaskRun):
task = models.ForeignKey(
RecurringTask, on_delete=models.CASCADE, related_name="task_runs"
)
class HealthCheckModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
uuid = models.UUIDField(unique=True, blank=False, null=False) |
6,276 | test is valid hex color | # Copyright (c) Ansible project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import pytest
from ansible_collections.community.general.tests.unit.compat.mock import Mock, patch
from ansible_collections.community.general.plugins.modules import slack
from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
class TestSlackModule(ModuleTestCase):
def setUp(self):
super(TestSlackModule, self).setUp()
self.module = slack
def tearDown(self):
super(TestSlackModule, self).tearDown()
@pytest.fixture
def fetch_url_mock(self, mocker):
return mocker.patch('ansible.module_utils.notification.slack.fetch_url')
def test_without_required_parameters(self):
"""Failure must occurs when all parameters are missing"""
with self.assertRaises(AnsibleFailJson):
set_module_args({})
self.module.main()
def test_invalid_old_token(self):
"""Failure if there is an old style token"""
set_module_args({
'token': 'test',
})
with self.assertRaises(AnsibleFailJson):
self.module.main()
def test_successful_message(self):
"""tests sending a message. This is example 1 from the docs"""
set_module_args({
'token': 'XXXX/YYYY/ZZZZ',
'msg': 'test'
})
with patch.object(slack, "fetch_url") as fetch_url_mock:
fetch_url_mock.return_value = (None, {"status": 200})
with self.assertRaises(AnsibleExitJson):
self.module.main()
self.assertTrue(fetch_url_mock.call_count, 1)
call_data = json.loads(fetch_url_mock.call_args[1]['data'])
assert call_data['username'] == "Ansible"
assert call_data['text'] == "test"
assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ"
def test_failed_message(self):
"""tests failing to send a message"""
set_module_args({
'token': 'XXXX/YYYY/ZZZZ',
'msg': 'test'
})
with patch.object(slack, "fetch_url") as fetch_url_mock:
fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'test'})
with self.assertRaises(AnsibleFailJson):
self.module.main()
def test_message_with_thread(self):
"""tests sending a message with a thread"""
set_module_args({
'token': 'XXXX/YYYY/ZZZZ',
'msg': 'test',
'thread_id': '100.00'
})
with patch.object(slack, "fetch_url") as fetch_url_mock:
fetch_url_mock.return_value = (None, {"status": 200})
with self.assertRaises(AnsibleExitJson):
self.module.main()
self.assertTrue(fetch_url_mock.call_count, 1)
call_data = json.loads(fetch_url_mock.call_args[1]['data'])
assert call_data['username'] == "Ansible"
assert call_data['text'] == "test"
assert call_data['thread_ts'] == '100.00'
assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ"
# https://github.com/ansible-collections/community.general/issues/1097
def test_ts_in_message_does_not_cause_edit(self):
set_module_args({
'token': 'xoxa-123456789abcdef',
'msg': 'test with ts'
})
with patch.object(slack, "fetch_url") as fetch_url_mock:
mock_response = Mock()
mock_response.read.return_value = '{"fake":"data"}'
fetch_url_mock.return_value = (mock_response, {"status": 200})
with self.assertRaises(AnsibleExitJson):
self.module.main()
self.assertTrue(fetch_url_mock.call_count, 1)
self.assertEquals(fetch_url_mock.call_args[1]['url'], "https://slack.com/api/chat.postMessage")
def test_edit_message(self):
set_module_args({
'token': 'xoxa-123456789abcdef',
'msg': 'test2',
'message_id': '12345'
})
with patch.object(slack, "fetch_url") as fetch_url_mock:
mock_response = Mock()
mock_response.read.return_value = '{"messages":[{"ts":"12345","msg":"test1"}]}'
fetch_url_mock.side_effect = [
(mock_response, {"status": 200}),
(mock_response, {"status": 200}),
]
with self.assertRaises(AnsibleExitJson):
self.module.main()
self.assertTrue(fetch_url_mock.call_count, 2)
self.assertEquals(fetch_url_mock.call_args[1]['url'], "https://slack.com/api/chat.update")
call_data = json.loads(fetch_url_mock.call_args[1]['data'])
self.assertEquals(call_data['ts'], "12345")
def test_message_with_blocks(self):
"""tests sending a message with blocks"""
set_module_args({
'token': 'XXXX/YYYY/ZZZZ',
'msg': 'test',
'blocks': [{
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*test*'
},
'accessory': {
'type': 'image',
'image_url': 'https://docs.ansible.com/favicon.ico',
'alt_text': 'test'
}
}, {
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'test',
'emoji': True
}
}]
})
with patch.object(slack, "fetch_url") as fetch_url_mock:
fetch_url_mock.return_value = (None, {"status": 200})
with self.assertRaises(AnsibleExitJson):
self.module.main()
self.assertTrue(fetch_url_mock.call_count, 1)
call_data = json.loads(fetch_url_mock.call_args[1]['data'])
assert call_data['username'] == "Ansible"
assert call_data['blocks'][1]['text']['text'] == "test"
assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ"
def test_message_with_invalid_color(self):
"""tests sending invalid color value to module"""
set_module_args({
'token': 'XXXX/YYYY/ZZZZ',
'msg': 'test',
'color': 'aa',
})
with self.assertRaises(AnsibleFailJson) as exec_info:
self.module.main()
msg = "Color value specified should be either one of" \
" ['normal', 'good', 'warning', 'danger'] or any valid" \
" hex value with length 3 or 6."
assert exec_info.exception.args[0]['msg'] == msg
color_test = [
('#111111', True),
('#00aabb', True),
('#abc', True),
('#gghhjj', False),
('#ghj', False),
('#a', False),
('#aaaaaaaa', False),
('', False),
('aaaa', False),
('$00aabb', False),
('$00a', False),
]
@pytest.mark.parametrize("color_value, ret_status", color_test)
def METHOD_NAME(color_value, ret_status):
generated_value = slack.is_valid_hex_color(color_value)
assert generated_value == ret_status |
6,277 | test generate document not supported | import json, uuid
from app.extensions import cache
from app.api.constants import NOW_DOCUMENT_DOWNLOAD_TOKEN
from tests.now_application_factories import NOWApplicationFactory, NOWApplicationIdentityFactory
from app.api.now_applications.models.now_application_document_type import NOWApplicationDocumentType
class TestGetNOWApplicationDocumentTypeResource:
"""GET /now-applications/application-document-types"""
def test_get_application_document_types(self, test_client, db_session, auth_headers):
"""Should return the correct number of records with a 200 response code"""
get_resp = test_client.get(
f'/now-applications/application-document-types',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert len(get_data['records']) == len(NOWApplicationDocumentType.get_all())
def test_get_application_document_type(self, test_client, db_session, auth_headers):
"""Should return the a single document_type"""
code = NOWApplicationDocumentType.get_all()[0].now_application_document_type_code
get_resp = test_client.get(
f'/now-applications/application-document-types/{code}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200
get_data = json.loads(get_resp.data.decode())
def test_get_application_document_with_context(self, test_client, db_session, auth_headers):
"""Should return the rejection letter document type with form_spec with context-values"""
now_application = NOWApplicationFactory()
now_application_identity = NOWApplicationIdentityFactory(now_application=now_application)
get_resp = test_client.get(
f'/now-applications/application-document-types/RJL?context_guid={now_application_identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['document_template']
assert get_data['document_template'].get('form_spec'), get_data
mine_no_item = [
x for x in get_data['document_template']['form_spec'] if x['id'] == "mine_no"
][0]
assert mine_no_item
assert mine_no_item['id'] == 'mine_no'
assert mine_no_item['context-value'] == str(now_application_identity.mine.mine_no)
def test_application_document_types_have_generation_spec(self, test_client, db_session,
auth_headers):
"""Should return the correct number of records with document_templates"""
get_resp = test_client.get(
f'/now-applications/application-document-types',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200
get_data = json.loads(get_resp.data.decode())
assert len([x for x in get_data['records'] if x['document_template']]) > 0
assert len([x for x in get_data['records'] if x['document_template']]) == len(
[x for x in NOWApplicationDocumentType.get_all() if x.document_template_code])
def test_generate_document_not_found(self, test_client, db_session, auth_headers):
"""Should error is document type doesn't exist"""
post_resp = test_client.post(
f'/now-applications/application-document-types/ZZZ/generate',
headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 404
def METHOD_NAME(self, test_client, db_session, auth_headers):
"""Should error if document type exists but doesn't support generation"""
post_resp = test_client.post(
f'/now-applications/application-document-types/OTH/generate',
headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 400
def test_generate_document_returns_token(self, test_client, db_session, auth_headers):
"""Should return the a token for successful generation"""
now_application = NOWApplicationFactory()
now_application_identity = NOWApplicationIdentityFactory(now_application=now_application)
now_application.issuing_inspector.signature = 'data:image/png;base64,'
data = {
'now_application_guid': now_application_identity.now_application_guid,
'template_data': {
'help': 'test'
}
}
post_resp = test_client.post(
f'/now-applications/application-document-types/CAL/generate',
json=data,
headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 200
post_data = json.loads(post_resp.data.decode())
assert post_data['token']
def test_generate_document_returns_token(self, test_client, db_session, auth_headers):
"""Should return the a token for successful generation"""
now_application = NOWApplicationFactory()
now_application_identity = NOWApplicationIdentityFactory(now_application=now_application)
now_application.issuing_inspector.signature = 'data:image/png;base64,'
changed_mine_no = str(now_application_identity.mine.mine_no + '1')
data = {
'now_application_guid': now_application_identity.now_application_guid,
'template_data': {
'mine_no': changed_mine_no
}
}
post_resp = test_client.post(
f'/now-applications/application-document-types/RJL/generate',
json=data,
headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 200
post_data = json.loads(post_resp.data.decode())
token_data = cache.get(NOW_DOCUMENT_DOWNLOAD_TOKEN(post_data['token']))
assert token_data is not None
assert token_data['template_data']['mine_no'] != changed_mine_no
assert post_data['token' |
6,278 | serverinfo | #!/usr/bin/env python
"""
Version of dbsClient.dbsApi intended to be used with mock or unittest.mock
"""
from __future__ import (division, print_function)
from builtins import object
from future.utils import viewitems
import copy
import json
import os
from RestClient.ErrorHandling.RestClientExceptions import HTTPError
from WMCore.Services.DBS.DBSErrors import DBSReaderError
from WMCore.WMBase import getTestBase
from Utils.Utilities import encodeUnicodeToBytesConditional
from Utils.PythonVersion import PY2
# Read in the data just once so that we don't have to do it for every test (in __init__)
mockData = {}
globalFile = os.path.join(getTestBase(), '..', 'data', 'Mock', 'DBSMockData.json')
phys03File = os.path.join(getTestBase(), '..', 'data', 'Mock', 'DBSMockData03.json')
try:
with open(globalFile, 'r') as mockFile:
mockDataGlobal = json.load(mockFile)
except IOError:
mockDataGlobal = {}
try:
with open(phys03File, 'r') as mockFile:
mockData03 = json.load(mockFile)
except IOError:
mockData03 = {}
mockData['https://cmsweb-prod.cern.ch/dbs/prod/global/DBSReader'] = mockDataGlobal
mockData['https://cmsweb-prod.cern.ch/dbs/prod/phys03/DBSReader'] = mockData03
class MockDbsApi(object):
def __init__(self, url):
print("Using MockDBSApi")
self.url = url.strip('/')
# print("Initializing MockDBSApi")
def METHOD_NAME(self):
return {'dbs_instance': 'MOCK', 'dbs_version': '3.3.144'}
def listFileArray(self, **kwargs):
"""
Handle the case when logical_file_name is called with a list (longer than one) of files
since we don't want to store all permutations. Rebuild the list of dicts that DBS returns
Args:
**kwargs: any kwargs that dbs client accepts
Returns:
"""
self.item = 'listFileArray'
if 'logical_file_name' in kwargs and len(kwargs['logical_file_name']) > 1:
origArgs = copy.deepcopy(kwargs)
returnDicts = []
for lfn in kwargs['logical_file_name']:
origArgs.update({'logical_file_name': [lfn]})
returnDicts.extend(self.genericLookup(**origArgs))
return returnDicts
else:
return self.genericLookup(**kwargs)
def listFileLumiArray(self, **kwargs):
"""
Handle the case when logical_file_name is called with a list (longer than one) of files
since we don't want to store all permutations. Rebuild the list of dicts that DBS returns
Args:
**kwargs: any kwargs that dbs client accepts
Returns:
"""
self.item = 'listFileLumiArray'
if 'logical_file_name' in kwargs and len(kwargs['logical_file_name']) > 1:
origArgs = copy.deepcopy(kwargs)
returnDicts = []
# since we iterate over this, we better make sure it's a list to avoid
# things like: ['U', 'N', 'K', 'N', 'O', 'W', 'N']
if isinstance(kwargs['logical_file_name'], str):
kwargs['logical_file_name'] = [kwargs['logical_file_name']]
for lfn in kwargs['logical_file_name']:
origArgs.update({'logical_file_name': [lfn]})
returnDicts.extend(self.genericLookup(**origArgs))
return returnDicts
else:
return self.genericLookup(**kwargs)
def __getattr__(self, item):
"""
__getattr__ gets called in case lookup of the actual method fails. We use this to return data based on
a lookup table
:param item: The method name the user is trying to call
:return: The generic lookup function
"""
self.item = item
return self.genericLookup
def genericLookup(self, *args, **kwargs):
"""
This function returns the mocked DBS data
:param args: positional arguments it was called with
:param kwargs: named arguments it was called with
:return: the dictionary that DBS would have returned
"""
if self.url not in mockData:
raise DBSReaderError("Mock DBS emulator knows nothing about instance %s" % self.url)
if kwargs:
for k in kwargs:
if isinstance(kwargs[k], (list, tuple)):
kwargs[k] = [encodeUnicodeToBytesConditional(item, condition=PY2) for item in kwargs[k]]
else:
kwargs[k] = encodeUnicodeToBytesConditional(kwargs[k], condition=PY2)
signature = '%s:%s' % (self.item, sorted(viewitems(kwargs)))
else:
signature = self.item
try:
if mockData[self.url][signature] == 'Raises HTTPError':
raise HTTPError('http:/dbs.mock.fail', 400, 'MockDBS is raising an exception in place of DBS', 'Dummy header', 'Dummy body')
else:
return mockData[self.url][signature]
except KeyError:
if kwargs.get('dataset', None) == '/HighPileUp/Run2011A-v1/RAW-BLAH':
return []
raise KeyError("DBS mock API could not return data for method %s, args=%s, and kwargs=%s (URL %s) (Signature: %s)" %
(self.item, args, kwargs, self.url, signature)) |
6,279 | apply dense | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Momentum for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.optimizer_v2 import optimizer_v2
from tensorflow.python.training import training_ops
class MomentumOptimizer(optimizer_v2.OptimizerV2):
"""Optimizer that implements the Momentum algorithm.
Computes (if `use_nesterov = False`):
```
accumulation = momentum * accumulation + gradient
variable -= learning_rate * accumulation
```
Note that in the dense version of this algorithm, `accumulation` is updated
and applied regardless of a gradient's value, whereas the sparse version (when
the gradient is an `IndexedSlices`, typically because of `tf.gather` or an
embedding) only updates variable slices and corresponding `accumulation` terms
when that part of the variable was used in the forward pass.
"""
def __init__(self, learning_rate, momentum,
use_locking=False, name="Momentum", use_nesterov=False):
"""Construct a new Momentum optimizer.
Some of the args below are hyperparameters, where a hyperparameter is
defined as a scalar Tensor, a regular Python value or a callable (which
will be evaluated when `apply_gradients` is called) returning a scalar
Tensor or a Python value.
Args:
learning_rate: A float hyperparameter. The learning rate.
momentum: A float hyperparameter. The momentum.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Momentum".
use_nesterov: If `True` use Nesterov Momentum.
See [Sutskever et al., 2013](
http://jmlr.org/proceedings/papers/v28/sutskever13.pdf).
This implementation always computes gradients at the value of the
variable(s) passed to the optimizer. Using Nesterov Momentum makes the
variable(s) track the values called `theta_t + mu*v_t` in the paper.
@compatibility(eager)
When eager execution is enabled, learning_rate and momentum can each be a
callable that takes no arguments and returns the actual value to use. This
can be useful for changing these values across different invocations of
optimizer functions.
@end_compatibility
"""
super(MomentumOptimizer, self).__init__(use_locking, name)
self._set_hyper("learning_rate", learning_rate)
self._set_hyper("momentum", momentum)
self._use_nesterov = use_nesterov
def _create_vars(self, var_list, state):
for v in var_list:
state.zeros_slot(v, "momentum")
def METHOD_NAME(self, grad, var, state):
mom = state.get_slot(var, "momentum")
return training_ops.apply_momentum(
var,
mom,
state.get_hyper("learning_rate", var.dtype.base_dtype),
grad,
state.get_hyper("momentum", var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_dense(self, grad, var, state):
mom = state.get_slot(var, "momentum")
return training_ops.resource_apply_momentum(
var.handle,
mom.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
grad,
state.get_hyper("momentum", var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)
def _apply_sparse(self, grad, var, state):
mom = state.get_slot(var, "momentum")
return training_ops.sparse_apply_momentum(
var,
mom,
state.get_hyper("learning_rate", var.dtype.base_dtype),
grad.values,
grad.indices,
state.get_hyper("momentum", var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_sparse(self, grad, var, indices, state):
mom = state.get_slot(var, "momentum")
return training_ops.resource_sparse_apply_momentum(
var.handle,
mom.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
grad,
indices,
state.get_hyper("momentum", var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov) |
6,280 | get config | #################################################################################
# FOQUS Copyright (c) 2012 - 2023, by the software owners: Oak Ridge Institute
# for Science and Education (ORISE), TRIAD National Security, LLC., Lawrence
# Livermore National Security, LLC., The Regents of the University of
# California, through Lawrence Berkeley National Laboratory, Battelle Memorial
# Institute, Pacific Northwest Division through Pacific Northwest National
# Laboratory, Carnegie Mellon University, West Virginia University, Boston
# University, the Trustees of Princeton University, The University of Texas at
# Austin, URS Energy & Construction, Inc., et al. All rights reserved.
#
# Please see the file LICENSE.md for full copyright and license information,
# respectively. This file is also available online at the URL
# "https://github.com/CCSI-Toolset/FOQUS".
#################################################################################
import os
import numpy as np
import pandas as pd
import random as rn
import tensorflow as tf
# set seed values for reproducibility
os.environ["PYTHONHASHSEED"] = "0"
os.environ[
"CUDA_VISIBLE_DEVICES"
] = "" # changing "" to "0" or "-1" may solve import issues
np.random.seed(46)
rn.seed(1342)
tf.random.set_seed(62)
# Example follows the sequence below:
# 1) Code at end of file to import data and create model
# 2) Call create_model() to define inputs and outputs
# 3) Call CustomLayer to define network structure, which uses
# call() to define layer connections and get_config to attach
# attributes to CustomLayer class object
# 4) Back to create_model() to compile and train model
# 5) Back to code at end of file to save, load and test model
# custom class to define Keras NN layers
@tf.keras.utils.register_keras_serializable()
class mea_column_model(tf.keras.layers.Layer):
def __init__(
self,
n_hidden=1,
n_neurons=12,
layer_act="relu",
out_act="sigmoid",
input_labels=None,
output_labels=None,
input_bounds=None,
output_bounds=None,
normalized=False,
normalization_form="Linear",
**kwargs
):
super(mea_column_model, self).__init__() # create callable object
# add attributes from training settings
self.n_hidden = n_hidden
self.n_neurons = n_neurons
self.layer_act = layer_act
self.out_act = out_act
# add attributes from model data
self.input_labels = input_labels
self.output_labels = output_labels
self.input_bounds = input_bounds
self.output_bounds = output_bounds
self.normalized = normalized # FOQUS will read this and adjust accordingly
self.normalization_form = (
normalization_form # tells FOQUS which scaling form to use
)
# create lists to contain new layer objects
self.dense_layers = [] # hidden or output layers
self.dropout = [] # for large number of neurons, certain neurons
# can be randomly dropped out to reduce overfitting
for layer in range(self.n_hidden):
self.dense_layers.append(
tf.keras.layers.Dense(self.n_neurons, activation=self.layer_act)
)
self.dense_layers_out = tf.keras.layers.Dense(2, activation=self.out_act)
# define network layer connections
def call(self, inputs):
x = inputs # single input layer, input defined in create_model()
for layer in self.dense_layers: # hidden layers
x = layer(x) # h1 = f(input), h2 = f(h1), ... using act func
for layer in self.dropout: # no dropout layers used in this example
x = layer(x)
x = self.dense_layers_out(x) # single output layer, output = f(h_last)
return x
# attach attributes to class CONFIG
def METHOD_NAME(self):
config = super(mea_column_model, self).METHOD_NAME()
config.update(
{
"n_hidden": self.n_hidden,
"n_neurons": self.n_neurons,
"layer_act": self.layer_act,
"out_act": self.out_act,
"input_labels": self.input_labels,
"output_labels": self.output_labels,
"input_bounds": self.input_bounds,
"output_bounds": self.output_bounds,
"normalized": self.normalized,
"normalization_form": self.normalization_form,
}
)
return config
# method to create model
def create_model(data):
inputs = tf.keras.Input(shape=(np.shape(data)[1],)) # create input layer
layers = mea_column_model( # define the rest of network using our custom class
input_labels=xlabels,
output_labels=zlabels,
input_bounds=xdata_bounds,
output_bounds=zdata_bounds,
normalized=True,
normalization_form="Linear",
)
outputs = layers(inputs) # use network as function outputs = f(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs) # create model
model.compile(loss="mse", optimizer="RMSprop", metrics=["mae", "mse"])
model.fit(xdata, zdata, epochs=500, verbose=0) # train model
return model
# Main code
# import data
data = pd.read_csv(r"MEA_carbon_capture_dataset_mimo.csv")
xdata = data.iloc[:, :6] # there are 6 input variables/columns
zdata = data.iloc[:, 6:] # the rest are output variables/columns
xlabels = xdata.columns.tolist() # set labels as a list (default) from pandas
zlabels = zdata.columns.tolist() # is a set of IndexedDataSeries objects
xdata_bounds = {i: (xdata[i].min(), xdata[i].max()) for i in xdata} # x bounds
zdata_bounds = {j: (zdata[j].min(), zdata[j].max()) for j in zdata} # z bounds
# normalize data using Linear form
# users can normalize with any allowed form # manually, and then pass the
# appropriate flag to FOQUS from the allowed list:
# ["Linear", "Log", "Power", "Log 2", "Power 2"] - see the documentation for
# details on the scaling formulations
xmax, xmin = xdata.max(axis=0), xdata.min(axis=0)
zmax, zmin = zdata.max(axis=0), zdata.min(axis=0)
xdata, zdata = np.array(xdata), np.array(zdata)
for i in range(len(xdata)):
for j in range(len(xlabels)):
xdata[i, j] = (xdata[i, j] - xmin[j]) / (xmax[j] - xmin[j])
for j in range(len(zlabels)):
zdata[i, j] = (zdata[i, j] - zmin[j]) / (zmax[j] - zmin[j])
model_data = np.concatenate(
(xdata, zdata), axis=1
) # Keras requires a Numpy array as input
# define x and z data, not used but will add to variable dictionary
xdata = model_data[:, :-2]
zdata = model_data[:, -2:]
# create model
model = create_model(xdata)
model.summary()
# save model as H5
model.save("mea_column_model.h5") |
6,281 | ship | import atexit
import collections
import datetime
import queue
import sys
import threading
import time
import types
from typing import Any, Callable, Dict, Iterator, List, Optional, TextIO, Union
from determined import core
from determined.common import api
class _LogShipper:
def __init__(
self,
*,
session: api.Session,
trial_id: int,
task_id: str,
distributed: Optional[core.DistributedContext] = None
) -> None:
self._session = session
self._trial_id = trial_id
self._task_id = task_id
self._distributed = distributed
def start(self) -> "_LogShipper":
return self
def close(
self,
exc_type: Optional[type],
exc_val: Optional[BaseException],
exc_tb: Optional[types.TracebackType],
) -> "_LogShipper":
return self
def __enter__(self) -> "_LogShipper":
return self.start()
def __exit__(
self,
exc_type: Optional[type],
exc_val: Optional[BaseException],
exc_tb: Optional[types.TracebackType],
) -> "_LogShipper":
return self.close(exc_type, exc_val, exc_tb)
class _ManagedTrialLogShipper(_LogShipper):
"""
Managed trials will ship their logs normally via fluentd.
"""
pass
class _Interceptor:
def __init__(self, original_io: TextIO, handler: Callable[[str], None]) -> None:
self._original_io = original_io
self._handler = handler
def write(self, data: str) -> int:
self._handler(data)
return self._original_io.write(data)
def flush(self) -> None:
self._original_io.flush()
def __getattr__(self, attr: str) -> Any:
return getattr(self._original_io, attr)
SHIPPER_FLUSH_INTERVAL = 1
SHIPPER_FAILURE_BACKOFF_SECONDS = 1
LOG_BATCH_MAX_SIZE = 1000
SHIP_QUEUE_MAX_SIZE = 3 * LOG_BATCH_MAX_SIZE
class _ShutdownMessage:
pass
_QueueElement = Union[str, _ShutdownMessage]
class _LogSender(threading.Thread):
def __init__(self, session: api.Session, logs_metadata: Dict) -> None:
self._queue = queue.Queue(maxsize=SHIP_QUEUE_MAX_SIZE) # type: queue.Queue[_QueueElement]
self._logs = collections.deque() # type: collections.deque[str]
self._session = session
self._logs_metadata = logs_metadata
self._buf = ""
super().__init__(daemon=True)
def write(self, data: str) -> None:
self._queue.put(data)
def close(self) -> None:
self._queue.put(_ShutdownMessage())
def _pop_until_deadline(self, deadline: float) -> Iterator[_QueueElement]:
while True:
timeout = deadline - time.time()
if timeout <= 0:
break
try:
yield self._queue.get(timeout=timeout)
except queue.Empty:
break
def run(self) -> None:
while True:
deadline = time.time() + SHIPPER_FLUSH_INTERVAL
for m in self._pop_until_deadline(deadline):
if isinstance(m, _ShutdownMessage):
self.METHOD_NAME()
return
self._logs.append(m)
if len(self._logs) >= LOG_BATCH_MAX_SIZE:
self.METHOD_NAME()
self.METHOD_NAME()
def METHOD_NAME(self) -> None:
if len(self._logs) == 0:
return
msgs = []
while len(self._logs):
data = self._logs.popleft()
self._buf += data
while "\n" in self._buf:
idx = self._buf.index("\n") + 1
line = self._buf[:idx]
self._buf = self._buf[idx:]
msg = dict(self._logs_metadata)
msg["log"] = line
msgs.append(msg)
if len(msgs) > LOG_BATCH_MAX_SIZE:
self._ship(msgs)
msgs = []
if len(msgs) > 0:
self._ship(msgs)
def _ship(self, msgs: List[Dict]) -> None:
self._session.post("task-logs", json=msgs)
class _UnmanagedTrialLogShipper(_LogShipper):
def start(self) -> "_LogShipper":
self._original_stdout, self._original_stderr = sys.stdout, sys.stderr
logs_metadata = {
"task_id": self._task_id,
"timestamp": datetime.datetime.now(datetime.timezone.utc).isoformat(),
}
if self._distributed:
logs_metadata["rank"] = str(self._distributed.rank)
self._log_sender = _LogSender(session=self._session, logs_metadata=logs_metadata)
sys.stdout = _Interceptor(sys.stdout, self._log_sender.write) # type: ignore
sys.stderr = _Interceptor(sys.stderr, self._log_sender.write) # type: ignore
self._log_sender.start()
atexit.register(self._exit_handler)
return self
def _exit_handler(self) -> None:
self.close()
def close(
self,
exc_type: Optional[type] = None,
exc_val: Optional[BaseException] = None,
exc_tb: Optional[types.TracebackType] = None,
) -> "_LogShipper":
atexit.unregister(self._exit_handler)
sys.stdout, sys.stderr = self._original_stdout, self._original_stderr
self._log_sender.close()
self._log_sender.join()
return self |
6,282 | test dense | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for PowerSign."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.contrib.opt.python.training import powersign
from tensorflow.contrib.opt.python.training import sign_decay
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def py_linear_decay_fn(decay_steps):
def linear_decay(step):
step = min(step, decay_steps)
return float(decay_steps - step) / decay_steps
return linear_decay
def powersign_update_numpy(params,
g_t,
m,
lr,
base=math.e,
beta=0.9,
py_sign_decay_fn=None,
t=None):
m_t = beta * m + (1 - beta) * g_t
if py_sign_decay_fn is None:
sign_decayed = 1.0
else:
sign_decayed = py_sign_decay_fn(t-1)
multiplier = base ** (sign_decayed * np.sign(g_t) * np.sign(m_t))
params_t = params - lr * multiplier * g_t
return params_t, m_t
class PowerSignTest(xla_test.XLATestCase):
def METHOD_NAME(self,
learning_rate=0.1,
sign_decay_fn=None,
py_sign_decay_fn=None,
base=math.e,
beta=0.9):
for dtype in self.float_types:
with self.session(), self.test_scope():
# Initialize variables for numpy implementation.
m0, m1 = 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
global_step = resource_variable_ops.ResourceVariable(0, trainable=False)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = powersign.PowerSignOptimizer(
learning_rate=learning_rate,
base=base,
beta=beta,
sign_decay_fn=sign_decay_fn,
)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 7 steps of powersign
# first 4 steps with positive gradient
# last 3 steps with negative gradient (sign(gm) should be -1)
for t in range(1, 8):
if t < 5:
update.run()
else:
neg_update.run()
var0_np, m0 = powersign_update_numpy(
var0_np,
grads0_np if t < 5 else -grads0_np,
m0,
learning_rate,
base=base,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
var1_np, m1 = powersign_update_numpy(
var1_np,
grads1_np if t < 5 else -grads1_np,
m1,
learning_rate,
base=base,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testDense(self):
decay_steps = 10
sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
py_sign_decay_fn = py_linear_decay_fn(decay_steps)
self.METHOD_NAME()
self.METHOD_NAME(learning_rate=0.1, base=10.0, beta=0.8)
self.METHOD_NAME(
sign_decay_fn=sign_decay_fn, py_sign_decay_fn=py_sign_decay_fn)
if __name__ == '__main__':
test.main() |
6,283 | list | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._location_extension_types_operations import build_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LocationExtensionTypesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.aio.SourceControlConfigurationClient`'s
:attr:`location_extension_types` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = METHOD_NAME(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def METHOD_NAME(self, location: str, **kwargs: Any) -> AsyncIterable["_models.ExtensionType"]:
"""List all Extension Types.
:param location: extension location. Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExtensionType or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.ExtensionType]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-05-01-preview"))
cls: ClsType[_models.ExtensionTypeList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ExtensionTypeList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
METHOD_NAME.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.KubernetesConfiguration/locations/{location}/extensionTypes"
} |
6,284 | outer | """
Provide basic components for groupby. These defintiions
hold the whitelist of methods that are exposed on the
SeriesGroupBy and the DataFrameGroupBy objects.
"""
import types
from pandas.util._decorators import make_signature
from pandas.core.dtypes.common import is_list_like, is_scalar
class GroupByMixin(object):
"""
Provide the groupby facilities to the mixed object.
"""
@staticmethod
def _dispatch(name, *args, **kwargs):
"""
Dispatch to apply.
"""
def METHOD_NAME(self, *args, **kwargs):
def f(x):
x = self._shallow_copy(x, groupby=self._groupby)
return getattr(x, name)(*args, **kwargs)
return self._groupby.apply(f)
METHOD_NAME.__name__ = name
return METHOD_NAME
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
# we need to make a shallow copy of ourselves
# with the same groupby
kwargs = {attr: getattr(self, attr) for attr in self._attributes}
# Try to select from a DataFrame, falling back to a Series
try:
groupby = self._groupby[key]
except IndexError:
groupby = self._groupby
self = self.__class__(subset,
groupby=groupby,
parent=self,
**kwargs)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
plotting_methods = frozenset(['plot', 'hist'])
common_apply_whitelist = frozenset([
'quantile', 'fillna', 'mad', 'take',
'idxmax', 'idxmin', 'tshift',
'skew', 'corr', 'cov', 'diff'
]) | plotting_methods
series_apply_whitelist = ((common_apply_whitelist |
{'nlargest', 'nsmallest',
'is_monotonic_increasing',
'is_monotonic_decreasing'})
) | frozenset(['dtype', 'unique'])
dataframe_apply_whitelist = ((common_apply_whitelist |
frozenset(['dtypes', 'corrwith'])))
cython_transforms = frozenset(['cumprod', 'cumsum', 'shift',
'cummin', 'cummax'])
cython_cast_blacklist = frozenset(['rank', 'count', 'size'])
def whitelist_method_generator(base, klass, whitelist):
"""
Yields all GroupBy member defs for DataFrame/Series names in whitelist.
Parameters
----------
base : class
base class
klass : class
class where members are defined.
Should be Series or DataFrame
whitelist : list
list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
method_wrapper_template = \
"""def %(name)s(%(sig)s) :
\"""
%(doc)s
\"""
f = %(self)s.__getattr__('%(name)s')
return f(%(args)s)"""
property_wrapper_template = \
"""@property
def %(name)s(self) :
\"""
%(doc)s
\"""
return self.__getattr__('%(name)s')"""
for name in whitelist:
# don't override anything that was explicitly defined
# in the base class
if hasattr(base, name):
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass, name)
doc = f.__doc__
doc = doc if type(doc) == str else ''
if isinstance(f, types.MethodType):
wrapper_template = method_wrapper_template
decl, args = make_signature(f)
# pass args by name to f because otherwise
# GroupBy._make_wrapper won't know whether
# we passed in an axis parameter.
args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]]
params = {'name': name,
'doc': doc,
'sig': ','.join(decl),
'self': args[0],
'args': ','.join(args_by_name)}
else:
wrapper_template = property_wrapper_template
params = {'name': name, 'doc': doc}
yield wrapper_template % params |
6,285 | render | import json
from flask import current_app
from lemur import database
from lemur.dns_providers.models import DnsProvider
from lemur.logs import service as log_service
def METHOD_NAME(args):
"""
Helper that helps us render the REST Api responses.
:param args:
:return:
"""
query = database.session_query(DnsProvider)
return database.sort_and_page(query, DnsProvider, args)
def get(dns_provider_id):
provider = database.get(DnsProvider, dns_provider_id)
return provider
def get_all_dns_providers():
"""
Retrieves all dns providers within Lemur.
:return:
"""
return DnsProvider.query.all()
def get_friendly(dns_provider_id):
"""
Retrieves a dns provider by its lemur assigned ID.
:param dns_provider_id: Lemur assigned ID
:rtype: DnsProvider
:return:
"""
dns_provider = get(dns_provider_id)
if not dns_provider:
return None
dns_provider_friendly = {
"name": dns_provider.name,
"description": dns_provider.description,
"providerType": dns_provider.provider_type,
"options": dns_provider.options,
"credentials": dns_provider.credentials,
}
if dns_provider.provider_type == "route53":
dns_provider_friendly["account_id"] = json.loads(dns_provider.credentials).get(
"account_id"
)
return dns_provider_friendly
def delete(dns_provider_id):
"""
Deletes a DNS provider.
:param dns_provider_id: Lemur assigned ID
"""
dns_provider = get(dns_provider_id)
if dns_provider:
log_service.audit_log("delete_dns_provider", dns_provider.name, "Deleting the DNS provider")
database.delete(dns_provider)
def get_types():
provider_config = current_app.config.get(
"ACME_DNS_PROVIDER_TYPES",
{
"items": [
{
"name": "route53",
"requirements": [
{
"name": "account_id",
"type": "int",
"required": True,
"helpMessage": "AWS Account number",
}
],
},
{
"name": "cloudflare",
"requirements": [
{
"name": "email",
"type": "str",
"required": True,
"helpMessage": "Cloudflare Email",
},
{
"name": "key",
"type": "str",
"required": True,
"helpMessage": "Cloudflare Key",
},
],
},
{"name": "dyn"},
{"name": "nsone"},
{"name": "ultradns"},
{"name": "powerdns"},
]
},
)
if not provider_config:
raise Exception("No DNS Provider configuration specified.")
provider_config["total"] = len(provider_config.get("items"))
return provider_config
def set_domains(dns_provider, domains):
"""
Increments pending certificate attempt counter and updates it in the database.
"""
dns_provider.domains = domains
database.update(dns_provider)
return dns_provider
def create(data):
provider_name = data.get("name")
credentials = {}
for item in data.get("provider_type", {}).get("requirements", []):
credentials[item["name"]] = item["value"]
dns_provider = DnsProvider(
name=provider_name,
description=data.get("description"),
provider_type=data.get("provider_type").get("name"),
credentials=json.dumps(credentials),
)
created = database.create(dns_provider)
log_service.audit_log("create_dns_provider", provider_name, "Created new DNS provider")
return created.id |
6,286 | check new episode scores | from logging import debug, info, warning, error
from datetime import datetime, timedelta
import services
def main(config, db, **kwargs):
# Find data not provided by the edit module
_check_missing_stream_info(config, db, update_db=not config.debug)
# Check for new show scores
if config.record_scores:
METHOD_NAME(config, db, update_db=not config.debug)
# Record poll scores to avoid querying them every time
_record_poll_scores(config, db, update_db=not config.debug)
# Show lengths aren't always known at the start of the season
_check_show_lengths(config, db, update_db=not config.debug)
# Check if shows have finished and disable them if they have
_disable_finished_shows(config, db, update_db=not config.debug)
def _check_show_lengths(config, db, update_db=True):
info("Checking show lengths")
shows = db.get_shows(missing_length=True)
for show in shows:
info("Updating episode count of {} ({})".format(show.name, show.id))
length = None
# Check all info handlers for an episode count
# Some may not implement get_episode_count and return None
for handler in services.get_link_handlers().values():
info(" Checking {} ({})".format(handler.name, handler.key))
# Get show link to site represented by the handler
site = db.get_link_site(key=handler.key)
link = db.get_link(show, site)
if link is None:
error("Failed to create link")
continue
# Validate length
new_length = handler.get_episode_count(link, useragent=config.useragent)
if new_length is not None:
debug(" Lists length: {}".format(new_length))
if length is not None and new_length != length:
warning(" Conflict between lengths {} and {}".format(new_length, length))
length = new_length
# Length found, update database
if length is not None:
info("New episode count: {}".format(length))
if update_db:
db.set_show_episode_count(show, length)
else:
warning("Debug enabled, not updating database")
def _disable_finished_shows(config, db, update_db=True):
info("Checking for disabled shows")
shows = db.get_shows()
for show in shows:
latest_episode = db.get_latest_episode(show)
if latest_episode is not None and 0 < show.length <= latest_episode.number:
info(" Disabling show \"{}\"".format(show.name))
if latest_episode.number > show.length:
warning(" Episode number ({}) greater than show length ({})".format(latest_episode.number, show.length))
if update_db:
db.set_show_enabled(show, enabled=False, commit=False)
if update_db:
db.save()
def _check_missing_stream_info(config, db, update_db=True):
info("Checking for missing stream info")
streams = db.get_streams(missing_name=True)
for stream in streams:
service_info = db.get_service(id=stream.service)
info("Updating missing stream info of {} ({}/{})".format(stream.name, service_info.name, stream.show_key))
service = services.get_service_handler(key=service_info.key)
stream = service.get_stream_info(stream, useragent=config.useragent)
if not stream:
error(" Stream info not found")
continue
debug(" name={}".format(stream.name))
debug(" key={}".format(stream.show_key))
debug(" id={}".format(stream.show_id))
if update_db:
db.update_stream(stream, name=stream.name, show_id=stream.show_id, show_key=stream.show_key, commit=False)
if update_db:
db.commit()
def METHOD_NAME(config, db, update_db):
info("Checking for new episode scores")
shows = db.get_shows(enabled=True)
for show in shows:
latest_episode = db.get_latest_episode(show)
if latest_episode is not None:
info("For show {} ({}), episode {}".format(show.name, show.id, latest_episode .number))
scores = db.get_episode_scores(show, latest_episode)
# Check if any scores have been found rather than checking for each service
if len(scores) == 0:
for handler in services.get_link_handlers().values():
info(" Checking {} ({})".format(handler.name, handler.key))
# Get show link to site represented by the handler
site = db.get_link_site(key=handler.key)
link = db.get_link(show, site)
if link is None:
error("Failed to create link")
continue
new_score = handler.get_show_score(show, link, useragent=config.useragent)
if new_score is not None:
info(" Score: {}".format(new_score))
db.add_episode_score(show, latest_episode, site, new_score, commit=False)
if update_db:
db.commit()
else:
info(" Already has scores, ignoring")
def _record_poll_scores(config, db, update_db):
polls = db.get_polls(missing_score=True)
handler = services.get_default_poll_handler()
info(f"Record scores for service {handler.key}")
updated = 0
for poll in polls:
if timedelta(days=8) < datetime.now() - poll.date < timedelta(days=93) :
score = handler.get_score(poll)
info(f"Updating poll score for show {poll.show_id} / episode {poll.episode} ({score})")
if score:
db.update_poll_score(poll, score, commit=update_db)
updated += 1
info(f"{updated} scores recorded, {len(polls) - updated} scores not updated") |
6,287 | analyser end | #-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frederic Rodrigo 2013 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from . import OsmSax
from .IssuesFile import IssuesFile
class IssuesFileOsmose(IssuesFile):
def begin(self):
output = super().begin()
self.outxml = OsmSax.OsmSaxWriter(output, "UTF-8")
self.outxml.startDocument()
self.outxml.startElement("analysers", {})
self.geom_type_renderer = {"node": self.outxml.NodeCreate, "way": self.outxml.WayCreate, "relation": self.outxml.RelationCreate, "position": self.position}
def end(self):
self.outxml.endElement("analysers")
self.outxml.endDocument()
del self.outxml
super().end()
def analyser(self, timestamp, analyser_version, change=False):
self.mode = "analyserChange" if change else "analyser"
attrs = {}
attrs["timestamp"] = timestamp.strftime("%Y-%m-%dT%H:%M:%SZ")
attrs["analyser_version"] = str(analyser_version)
if self.version is not None:
attrs["version"] = self.version
self.outxml.startElement(self.mode, attrs)
def METHOD_NAME(self):
self.outxml.endElement(self.mode)
def classs(self, id, item, level, tags, title, detail = None, fix = None, trap = None, example = None, source = None, resource = None):
options = {
'id': str(id),
'item': str(item),
}
if source:
options['source'] = str(source)
if resource:
options['resource'] = str(resource)
if level:
options['level'] = str(level)
if tags:
options['tag'] = ','.join(tags)
self.outxml.startElement('class', options)
for (key, value) in [
('classtext', title),
('detail', detail),
('fix', fix),
('trap', trap),
('example', example),
]:
if value:
for lang in sorted(value.keys()):
self.outxml.Element(key, {
'lang': lang,
'title': value[lang]
})
self.outxml.endElement('class')
def error(self, classs, subclass, text, ids, types, fix, geom, allow_override=False):
if self.filter and not self.filter.apply(classs, subclass, geom):
return
if subclass is not None:
self.outxml.startElement("error", {"class":str(classs), "subclass":str(subclass)})
else:
self.outxml.startElement("error", {"class":str(classs)})
for type in geom:
for g in geom[type]:
self.geom_type_renderer[type](g)
if text:
for lang in text:
self.outxml.Element("text", {"lang":lang, "value":text[lang]})
if fix:
fix = self.fixdiff(fix)
if not allow_override:
fix = self.filterfix(ids, types, fix, geom)
self.dumpxmlfix(ids, types, fix)
self.outxml.endElement("error")
def position(self, args):
self.outxml.Element("location", {"lat":str(args["lat"]), "lon":str(args["lon"])})
def delete(self, t, id):
self.outxml.Element("delete", {"type": t, "id": str(id)})
def dumpxmlfix(self, ids, types, fixes):
self.outxml.startElement("fixes", {})
for fix in fixes:
self.outxml.startElement("fix", {})
i = 0
for f in fix:
if f is not None and i < len(types):
type = types[i]
if type:
self.outxml.startElement(type, {'id': str(ids[i])})
for opp, tags in f.items():
for k in tags:
if opp in '~+':
self.outxml.Element('tag', {'action': self.FixTable[opp], 'k': k, 'v': tags[k]})
else:
self.outxml.Element('tag', {'action': self.FixTable[opp], 'k': k})
self.outxml.endElement(type)
i += 1
self.outxml.endElement('fix')
self.outxml.endElement('fixes') |
6,288 | test noisemodel | import pytest
from pytest_lazyfixture import lazy_fixture as lf
import numpy as np
import tensorcircuit as tc
from tensorcircuit.noisemodel import (
NoiseConf,
circuit_with_noise,
sample_expectation_ps_noisfy,
expectation_noisfy,
)
from tensorcircuit.channels import composedkraus
@pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
def METHOD_NAME(backend):
# test data structure
# noise_conf = NoiseConf()
# noise_conf.add_noise("h1", "t0")
# noise_conf.add_noise("h1", ["t1", "t2"], [[0], [1]])
# noise_conf.add_noise("h1", ["t3"], [[0]])
# noise_conf.add_noise("h1", "t4")
# noise_conf.add_noise("h1", ["t5"], [[3]])
# noise_conf.add_noise("h2", ["v1", "v2"], [[0], [1]])
# noise_conf.add_noise("h2", ["v3"], [[0]])
# noise_conf.add_noise("h2", "v4")
c = tc.Circuit(2)
c.cnot(0, 1)
c.rx(0, theta=0.4)
c.rx(1, theta=0.8)
c.h(0)
c.h(1)
dmc = tc.DMCircuit(2)
dmc.cnot(0, 1)
dmc.rx(0, theta=0.4)
dmc.rx(1, theta=0.8)
dmc.h(0)
dmc.h(1)
error1 = tc.channels.generaldepolarizingchannel(0.1, 1)
error2 = tc.channels.generaldepolarizingchannel(0.01, 2)
error3 = tc.channels.thermalrelaxationchannel(300, 400, 100, "ByChoi", 0)
readout_error = []
readout_error.append([0.9, 0.75]) # readout error of qubit 0
readout_error.append([0.4, 0.7]) # readout error of qubit 1
noise_conf = NoiseConf()
noise_conf.add_noise("rx", error1)
noise_conf.add_noise("rx", [error3], [[0]])
noise_conf.add_noise("h", [error3, error1], [[0], [1]])
noise_conf.add_noise("x", [error3], [[0]])
noise_conf.add_noise("cnot", [error2], [[0, 1]])
noise_conf.add_noise("readout", readout_error)
cnoise = circuit_with_noise(c, noise_conf, [0.1] * 7)
value = cnoise.expectation_ps(x=[0, 1])
# value = expectation_ps_noisfy(c, x=[0, 1], noise_conf=noise_conf, nmc=10000)
# np.testing.assert_allclose(value, 0.09, atol=1e-1)
# value = expectation_ps_noisfy(dmc, x=[0, 1], noise_conf=noise_conf)
# np.testing.assert_allclose(value, 0.09, atol=1e-1)
# with readout_error
value = sample_expectation_ps_noisfy(dmc, x=[0, 1], noise_conf=noise_conf)
np.testing.assert_allclose(value, -0.12, atol=1e-2)
value = sample_expectation_ps_noisfy(c, x=[0, 1], noise_conf=noise_conf, nmc=100000)
np.testing.assert_allclose(value, -0.12, atol=1e-2)
# test composed channel and general condition
newerror = composedkraus(error1, error3)
noise_conf1 = NoiseConf()
noise_conf1.add_noise("rx", [newerror, error1], [[0], [1]])
noise_conf1.add_noise("h", [error3, error1], [[0], [1]])
noise_conf1.add_noise("x", [error3], [[0]])
def condition(d):
return d["name"] == "cnot" and d["index"] == (0, 1)
noise_conf1.add_noise_by_condition(condition, error2)
noise_conf1.add_noise("readout", readout_error)
value = sample_expectation_ps_noisfy(dmc, x=[0, 1], noise_conf=noise_conf1)
np.testing.assert_allclose(value, -0.12, atol=1e-2)
# test standardized gate
newerror = composedkraus(error1, error3)
noise_conf2 = NoiseConf()
noise_conf2.add_noise("Rx", [newerror, error1], [[0], [1]])
noise_conf2.add_noise("H", [error3, error1], [[0], [1]])
noise_conf2.add_noise("x", [error3], [[0]])
noise_conf2.add_noise("cx", [error2], [[0, 1]])
noise_conf2.add_noise("readout", readout_error)
value = sample_expectation_ps_noisfy(
c, x=[0, 1], noise_conf=noise_conf2, nmc=100000
)
np.testing.assert_allclose(value, -0.12, atol=1e-2)
@pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")])
def test_general_noisemodel(backend):
c = tc.Circuit(2)
c.cnot(0, 1)
c.rx(0, theta=0.4)
c.rx(1, theta=0.8)
c.h(0)
c.h(1)
dmc = tc.DMCircuit(2)
dmc.cnot(0, 1)
dmc.rx(0, theta=0.4)
dmc.rx(1, theta=0.8)
dmc.h(0)
dmc.h(1)
error1 = tc.channels.generaldepolarizingchannel(0.1, 1)
error2 = tc.channels.generaldepolarizingchannel(0.06, 2)
error3 = tc.channels.thermalrelaxationchannel(300, 400, 100, "ByChoi", 0)
readout_error = []
readout_error.append([0.9, 0.75])
readout_error.append([0.4, 0.7])
noise_conf = NoiseConf()
noise_conf.add_noise("rx", error1)
noise_conf.add_noise("rx", [error3], [[0]])
noise_conf.add_noise("h", [error3, error1], [[0], [1]])
noise_conf.add_noise("x", [error3], [[0]])
noise_conf.add_noise("cnot", [error2], [[0, 1]])
noise_conf.add_noise("readout", readout_error)
nmc = 100000
# # test sample_expectation_ps
value1 = sample_expectation_ps_noisfy(c, x=[0, 1], noise_conf=noise_conf, nmc=nmc)
value2 = c.sample_expectation_ps(x=[0, 1], noise_conf=noise_conf, nmc=nmc)
value3 = dmc.sample_expectation_ps(x=[0, 1], noise_conf=noise_conf)
np.testing.assert_allclose(value1, value2, atol=1e-2)
np.testing.assert_allclose(value3, value2, atol=1e-2)
# test expectation
value1 = expectation_noisfy(c, (tc.gates.z(), [0]), noise_conf=noise_conf, nmc=nmc)
value2 = c.expectation((tc.gates.z(), [0]), noise_conf=noise_conf, nmc=nmc)
value3 = dmc.expectation((tc.gates.z(), [0]), noise_conf=noise_conf)
np.testing.assert_allclose(value1, value2, atol=1e-2)
np.testing.assert_allclose(value3, value2, atol=1e-2)
# test expectation_ps
# value = expectation_ps_noisfy(c, x=[0], noise_conf=noise_conf, nmc=10000)
value1 = c.expectation_ps(x=[0], noise_conf=noise_conf, nmc=nmc)
value2 = dmc.expectation_ps(x=[0], noise_conf=noise_conf)
np.testing.assert_allclose(value1, value2, atol=1e-2) |
6,289 | test dt64 array tzaware indexer | from unittest import TestCase
import pytest
import xarray as xr
import pandas as pd
from pytz import UTC
import numpy as np
from pandas import DatetimeTZDtype
from xcube.util.timeindex import ensure_time_label_compatible
class TimeIndexTest(TestCase):
nonstandard_time_dimension_name = 'a_nonstandard_time_dimension_name'
# A DataArray with a datetime64 time dimension -- actually implicitly
# timezone-aware per the numpy docs, but treated as timezone-naive by
# pandas and xarray.
da_datetime64 = xr.DataArray(
np.arange(1, 4),
coords=dict(time=np.arange('2000-01-01', '2000-01-04',
dtype=np.datetime64)),
dims=['time'])
da_datetime64_nonstandard_name = xr.DataArray(
np.arange(1, 4),
coords={nonstandard_time_dimension_name:
np.arange('2000-01-01', '2000-01-04', dtype=np.datetime64)},
dims=[nonstandard_time_dimension_name])
# As of pandas 1.4.3, pd.date_range seems to produce datetime64
# co-ordinates, but may as well test this as a distinct case in case it
# changes in future.
da_tznaive = xr.DataArray(
np.arange(1, 4),
coords=dict(time=pd.date_range('2000-01-01', '2000-01-03', tz=None)),
dims=['time'])
# To get a timezone-aware array, we use a DatetimeArray with an explicit
# DatetimeTZDtype dtype
da_tzaware = xr.DataArray(
np.arange(1, 4),
coords=dict(time=pd.arrays.DatetimeArray(
pd.date_range('2000-01-01T00:00:00', '2000-01-03T00:00:00',
tz='CET'),
dtype=DatetimeTZDtype(tz='CET'))),
dims=['time'])
labels_tznaive = dict(time='2000-01-02')
labels_tzaware = dict(time='2000-01-02T00:00:00Z')
def test_dt64_array_tznaive_indexer(self):
self.assertEqual(self.labels_tznaive,
ensure_time_label_compatible(self.da_datetime64,
self.labels_tznaive))
def test_dt64_array_tznaive_indexer_nonstandard_name(self):
self.assertEqual(
self.labels_tznaive,
ensure_time_label_compatible(
self.da_datetime64_nonstandard_name,
self.labels_tznaive,
self.nonstandard_time_dimension_name
))
def METHOD_NAME(self):
self.assertTrue(
_are_times_equal(
self.labels_tznaive,
ensure_time_label_compatible(self.da_datetime64,
self.labels_tzaware)))
def test_tznaive_array_tzaware_indexer(self):
self.assertTrue(
_are_times_equal(
self.labels_tznaive,
ensure_time_label_compatible(self.da_tznaive,
self.labels_tzaware)))
def test_ensure_time_label_compatible_no_time(self):
old_labels = dict(x=1)
new_labels = ensure_time_label_compatible(
xr.DataArray([[1, 2], [3, 4]], dims=('x', 'time')),
old_labels
)
self.assertEqual(old_labels, new_labels)
def test_ensure_time_label_compatible_no_timezone_info(self):
old_labels = dict(time='foo')
with pytest.warns(UserWarning):
new_labels = ensure_time_label_compatible(
xr.DataArray([[1, 2], [3, 4]], dims=('x', 'time'),
coords=dict(time=['foo', 'bar'])),
old_labels
)
self.assertEqual(old_labels, new_labels)
def test_ensure_time_label_compatible_no_tz_convert(self):
class AwkwardTime:
tzinfo = UTC
old_labels = dict(time=AwkwardTime())
time_coords = [
pd.Timestamp('2020-01-01T12:00:00'),
pd.Timestamp('2020-01-02T12:00:00')
]
with pytest.warns(UserWarning):
new_labels = ensure_time_label_compatible(
xr.DataArray([[1, 2], [3, 4]], dims=('x', 'time'),
coords=dict(time=time_coords)),
old_labels
)
self.assertEqual(old_labels, new_labels)
def test_ensure_time_label_compatible_no_tz_localize(self):
class AwkwardTime:
tzinfo = None
old_labels = dict(time=AwkwardTime())
time_coords = [
pd.Timestamp('2020-01-01T12:00:00+00:00'),
pd.Timestamp('2020-01-02T12:00:00+00:00')
]
with pytest.warns(UserWarning):
new_labels = ensure_time_label_compatible(
xr.DataArray([[1, 2], [3, 4]], dims=('x', 'time'),
coords=dict(time=time_coords)),
old_labels
)
self.assertEqual(old_labels, new_labels)
def test_ensure_time_label_compatible_tz_localize(self):
old_labels = dict(time=pd.Timestamp('2020-01-01T12:00:00'))
time_coords = [
pd.Timestamp('2020-01-01T12:00:00+00:00'),
pd.Timestamp('2020-01-02T12:00:00+00:00')
]
new_labels = ensure_time_label_compatible(
xr.DataArray([[1, 2], [3, 4]], dims=('x', 'time'),
coords=dict(time=time_coords)),
old_labels
)
self.assertEqual(dict(time=pd.Timestamp('2020-01-01T12:00:00+00:00')),
new_labels)
def test_with_ndarray_time_label(self):
old_labels = dict(time=np.array(pd.Timestamp('2020-01-01T12:00:00')))
time_coords = [
pd.Timestamp('2020-01-01T12:00:00+00:00'),
pd.Timestamp('2020-01-02T12:00:00+00:00')
]
new_labels = ensure_time_label_compatible(
xr.DataArray([[1, 2], [3, 4]], dims=('x', 'time'),
coords=dict(time=time_coords)),
old_labels
)
self.assertEqual(
dict(time=np.array(pd.Timestamp('2020-01-01T12:00:00+00:00'))),
new_labels
)
def _are_times_equal(labels1, labels2):
return pd.Timestamp(labels1['time']) == pd.Timestamp(labels2['time']) |
6,290 | get model params | import copy
import logging
import math
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import fedml
from fedml.core.alg_frame.client_trainer import ClientTrainer
from model.yolov7.utils.general import (
box_iou,
non_max_suppression,
xywh2xyxy,
clip_coords,
)
from model.yolov7.utils.loss import ComputeLoss
from model.yolov7.utils.metrics import ap_per_class
class YOLOv7Trainer(ClientTrainer):
def __init__(self, model, args=None):
super(YOLOv7Trainer, self).__init__(model, args)
self.hyp = args.hyp
self.args = args
self.round_loss = []
self.round_idx = 0
def METHOD_NAME(self):
return self.model.cpu().state_dict()
def set_model_params(self, model_parameters):
logging.info("set_model_params")
self.model.load_state_dict(model_parameters)
def train(self, train_data, device, args):
logging.info("Start training on Trainer {}".format(self.id))
logging.info(f"Hyperparameters: {self.hyp}, Args: {self.args}")
model = self.model
self.round_idx = args.round_idx
args = self.args
hyp = self.hyp if self.hyp else self.args.hyp
epochs = args.epochs # number of epochs
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if args.client_optimizer == "adam":
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
logging.info(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print("freezing %s" % k)
v.requires_grad = False
total_epochs = epochs * args.comm_round
lf = (
lambda x: ((1 + math.cos(x * math.pi / total_epochs)) / 2)
* (1 - hyp["lrf"])
+ hyp["lrf"]
) # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
model.to(device)
model.train()
scheduler = lr_scheduler.LambdaLR(
optimizer, lr_lambda=lambda epoch: 1.0 - epoch / args.epochs
)
compute_loss = ComputeLoss(model)
epoch_loss = []
mloss = torch.zeros(4, device=device) # mean losses
logging.info("Epoch gpu_mem box obj cls total targets img_size time")
for epoch in range(args.epochs):
model.train()
t = time.time()
batch_loss = []
logging.info("Trainer_ID: {0}, Epoch: {1}".format(self.id, epoch))
for (batch_idx, batch) in enumerate(train_data):
imgs, targets, paths, _ = batch
imgs = imgs.to(device, non_blocking=True).float() / 256.0 - 0.5
optimizer.zero_grad()
# with torch.cuda.amp.autocast(amp):
pred = model(imgs) # forward
loss, loss_items = compute_loss(
pred, targets.to(device).float()
) # loss scaled by batch_size
# Backward
loss.backward()
optimizer.step()
batch_loss.append(loss.item())
mloss = (mloss * batch_idx + loss_items) / (
batch_idx + 1
) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
logging.info(s)
scheduler.step()
epoch_loss.append(copy.deepcopy(mloss.cpu().numpy()))
logging.info(
f"Trainer {self.id} epoch {epoch} box: {mloss[0]} obj: {mloss[1]} cls: {mloss[2]} total: {mloss[3]} time: {(time.time() - t)}"
)
logging.info("#" * 20)
logging.info(
f"Trainer {self.id} epoch {epoch} time: {(time.time() - t)}s batch_num: {batch_idx} speed: {(time.time() - t)/batch_idx} s/batch"
)
logging.info("#" * 20)
# plot for client
# plot box, obj, cls, total loss
epoch_loss = np.array(epoch_loss)
# logging.info(f"Epoch loss: {epoch_loss}")
fedml.mlops.log(
{
f"round_idx": self.round_idx,
f"train_box_loss": np.float(epoch_loss[-1, 0]),
f"train_obj_loss": np.float(epoch_loss[-1, 1]),
f"train_cls_loss": np.float(epoch_loss[-1, 2]),
f"train_total_loss": np.float(epoch_loss[-1, 3]),
}
)
self.round_loss.append(epoch_loss[-1, :])
if self.round_idx == args.comm_round:
self.round_loss = np.array(self.round_loss)
# logging.info(f"round_loss shape: {self.round_loss.shape}")
logging.info(
f"Trainer {self.id} round {self.round_idx} finished, round loss: {self.round_loss}"
)
return |
6,291 | test after 2fa | from datetime import timedelta
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.backends.base import SessionBase
from django.http import HttpRequest
from django.urls import reverse
import sentry.utils.auth
from sentry.models import User
from sentry.testutils.cases import TestCase
from sentry.testutils.silo import control_silo_test
from sentry.utils.auth import (
EmailAuthBackend,
SsoSession,
construct_link_with_query,
get_login_redirect,
login,
)
@control_silo_test(stable=True)
class EmailAuthBackendTest(TestCase):
def setUp(self):
self.user = User(username="foo", email="baz@example.com")
self.user.set_password("bar")
self.user.save()
@property
def backend(self):
return EmailAuthBackend()
def test_can_authenticate_with_username(self):
result = self.backend.authenticate(HttpRequest(), username="foo", password="bar")
self.assertEqual(result, self.user)
def test_can_authenticate_with_username_case_insensitive(self):
result = self.backend.authenticate(HttpRequest(), username="FOO", password="bar")
self.assertEqual(result, self.user)
def test_can_authenticate_with_email(self):
result = self.backend.authenticate(
HttpRequest(), username="baz@example.com", password="bar"
)
self.assertEqual(result, self.user)
def test_can_authenticate_with_email_case_insensitive(self):
result = self.backend.authenticate(
HttpRequest(), username="BAZ@example.com", password="bar"
)
self.assertEqual(result, self.user)
def test_does_not_authenticate_with_invalid_password(self):
result = self.backend.authenticate(HttpRequest(), username="foo", password="pizza")
self.assertEqual(result, None)
@control_silo_test(stable=True)
class GetLoginRedirectTest(TestCase):
def make_request(self, next=None):
request = HttpRequest()
request.META["SERVER_NAME"] = "testserver"
request.META["SERVER_PORT"] = "80"
request.session = SessionBase()
request.user = self.user
if next:
request.session["_next"] = next
return request
def test_schema_uses_default(self):
result = get_login_redirect(self.make_request("http://example.com"))
assert result == reverse("sentry-login")
result = get_login_redirect(self.make_request("ftp://testserver"))
assert result == reverse("sentry-login")
def test_next(self):
result = get_login_redirect(self.make_request("http://testserver/foobar/"))
assert result == "http://testserver/foobar/"
result = get_login_redirect(self.make_request("ftp://testserver/foobar/"))
assert result == reverse("sentry-login")
request = self.make_request("/foobar/")
request.subdomain = "orgslug"
result = get_login_redirect(request)
assert result == "http://orgslug.testserver/foobar/"
request = self.make_request("http://testserver/foobar/")
request.subdomain = "orgslug"
result = get_login_redirect(request)
assert result == "http://testserver/foobar/"
request = self.make_request("ftp://testserver/foobar/")
request.subdomain = "orgslug"
result = get_login_redirect(request)
assert result == f"http://orgslug.testserver{reverse('sentry-login')}"
def METHOD_NAME(self):
request = self.make_request()
request.session["_after_2fa"] = "http://testserver/foobar/"
result = get_login_redirect(request)
assert result == "http://testserver/foobar/"
request = self.make_request()
request.subdomain = "orgslug"
request.session["_after_2fa"] = "/foobar/"
result = get_login_redirect(request)
assert result == "http://orgslug.testserver/foobar/"
def test_pending_2fa(self):
request = self.make_request()
request.session["_pending_2fa"] = [1234, 1234, 1234]
result = get_login_redirect(request)
assert result == reverse("sentry-2fa-dialog")
request = self.make_request()
request.subdomain = "orgslug"
request.session["_pending_2fa"] = [1234, 1234, 1234]
result = get_login_redirect(request)
assert result == f"http://orgslug.testserver{reverse('sentry-2fa-dialog')}"
def test_login_uses_default(self):
result = get_login_redirect(self.make_request(reverse("sentry-login")))
assert result == reverse("sentry-login")
def test_no_value_uses_default(self):
result = get_login_redirect(self.make_request())
assert result == reverse("sentry-login")
request = self.make_request()
request.subdomain = "orgslug"
result = get_login_redirect(request)
assert result == f"http://orgslug.testserver{reverse('sentry-login')}"
@control_silo_test(stable=True)
class LoginTest(TestCase):
def make_request(self, next=None):
request = HttpRequest()
request.META["REMOTE_ADDR"] = "127.0.0.1"
request.session = self.session
request.user = AnonymousUser()
if next:
request.session["_next"] = next
return request
def test_simple(self):
request = self.make_request()
assert login(request, self.user)
assert request.user == self.user
assert "_nonce" not in request.session
def test_with_organization(self):
org = self.create_organization(name="foo", owner=self.user)
request = self.make_request()
assert login(request, self.user, organization_id=org.id)
assert request.user == self.user
assert f"{SsoSession.SSO_SESSION_KEY}:{org.id}" in request.session
def test_with_nonce(self):
self.user.refresh_session_nonce()
self.user.save()
assert self.user.session_nonce is not None
request = self.make_request()
assert login(request, self.user)
assert request.user == self.user
assert request.session["_nonce"] == self.user.session_nonce
def test_sso_expiry_default():
value = sentry.utils.auth._sso_expiry_from_env(None)
# make sure no accidental changes affect sso timeout
assert value == timedelta(days=7)
def test_sso_expiry_from_env():
value = sentry.utils.auth._sso_expiry_from_env("20")
assert value == timedelta(seconds=20)
def test_construct_link_with_query():
# testing basic query param construction
path = "foobar"
query_params = {"biz": "baz"}
expected_path = "foobar?biz=baz"
assert construct_link_with_query(path=path, query_params=query_params) == expected_path
# testing no excess '?' appended if query params are empty
path = "foobar"
query_params = {}
expected_path = "foobar"
assert construct_link_with_query(path=path, query_params=query_params) == expected_path |
6,292 | test mine get | import pytest
import salt.config
import salt.daemons.masterapi as masterapi
import salt.utils.platform
from tests.support.mock import MagicMock, patch
pytestmark = [
pytest.mark.slow_test,
]
class FakeCache:
def __init__(self):
self.data = {}
def store(self, bank, key, value):
self.data[bank, key] = value
def fetch(self, bank, key):
return self.data[bank, key]
@pytest.fixture
def funcs(temp_salt_master):
opts = temp_salt_master.config.copy()
salt.cache.MemCache.data.clear()
funcs = masterapi.RemoteFuncs(opts)
funcs.cache = FakeCache()
return funcs
def METHOD_NAME(funcs, tgt_type_key="tgt_type"):
"""
Asserts that ``mine_get`` gives the expected results.
Actually this only tests that:
- the correct check minions method is called
- the correct cache key is subsequently used
"""
funcs.cache.store("minions/webserver", "mine", dict(ip_addr="2001:db8::1:3"))
with patch(
"salt.utils.minions.CkMinions._check_compound_minions",
MagicMock(return_value=(dict(minions=["webserver"], missing=[]))),
):
ret = funcs._mine_get(
{
"id": "requester_minion",
"tgt": "G@roles:web",
"fun": "ip_addr",
tgt_type_key: "compound",
}
)
assert ret == dict(webserver="2001:db8::1:3")
def test_mine_get_pre_nitrogen_compat(funcs):
"""
Asserts that pre-Nitrogen API key ``expr_form`` is still accepted.
This is what minions before Nitrogen would issue.
"""
METHOD_NAME(funcs, tgt_type_key="expr_form")
def test_mine_get_dict_str(funcs, tgt_type_key="tgt_type"):
"""
Asserts that ``mine_get`` gives the expected results when request
is a comma-separated list.
Actually this only tests that:
- the correct check minions method is called
- the correct cache key is subsequently used
"""
funcs.cache.store(
"minions/webserver",
"mine",
dict(ip_addr="2001:db8::1:3", ip4_addr="127.0.0.1"),
)
with patch(
"salt.utils.minions.CkMinions._check_compound_minions",
MagicMock(return_value=(dict(minions=["webserver"], missing=[]))),
):
ret = funcs._mine_get(
{
"id": "requester_minion",
"tgt": "G@roles:web",
"fun": "ip_addr,ip4_addr",
tgt_type_key: "compound",
}
)
assert ret == dict(
ip_addr=dict(webserver="2001:db8::1:3"),
ip4_addr=dict(webserver="127.0.0.1"),
)
def test_mine_get_dict_list(funcs, tgt_type_key="tgt_type"):
"""
Asserts that ``mine_get`` gives the expected results when request
is a list.
Actually this only tests that:
- the correct check minions method is called
- the correct cache key is subsequently used
"""
funcs.cache.store(
"minions/webserver",
"mine",
dict(ip_addr="2001:db8::1:3", ip4_addr="127.0.0.1"),
)
with patch(
"salt.utils.minions.CkMinions._check_compound_minions",
MagicMock(return_value=(dict(minions=["webserver"], missing=[]))),
):
ret = funcs._mine_get(
{
"id": "requester_minion",
"tgt": "G@roles:web",
"fun": ["ip_addr", "ip4_addr"],
tgt_type_key: "compound",
}
)
assert ret == dict(
ip_addr=dict(webserver="2001:db8::1:3"),
ip4_addr=dict(webserver="127.0.0.1"),
)
def test_mine_get_acl_allowed(funcs):
"""
Asserts that ``mine_get`` gives the expected results when this is allowed
in the client-side ACL that was stored in the mine data.
"""
funcs.cache.store(
"minions/webserver",
"mine",
{
"ip_addr": {
salt.utils.mine.MINE_ITEM_ACL_DATA: "2001:db8::1:4",
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
"allow_tgt": "requester_minion",
"allow_tgt_type": "glob",
},
},
)
# The glob check is for the resolution of the allow_tgt
# The compound check is for the resolution of the tgt in the mine_get request.
with patch(
"salt.utils.minions.CkMinions._check_glob_minions",
MagicMock(return_value={"minions": ["requester_minion"], "missing": []}),
), patch(
"salt.utils.minions.CkMinions._check_compound_minions",
MagicMock(return_value={"minions": ["webserver"], "missing": []}),
):
ret = funcs._mine_get(
{
"id": "requester_minion",
"tgt": "anything",
"tgt_type": "compound",
"fun": ["ip_addr"],
}
)
assert ret == {"ip_addr": {"webserver": "2001:db8::1:4"}}
def test_mine_get_acl_rejected(funcs):
"""
Asserts that ``mine_get`` gives the expected results when this is rejected
in the client-side ACL that was stored in the mine data. This results in
no data being sent back (just as if the entry wouldn't exist).
"""
funcs.cache.store(
"minions/webserver",
"mine",
{
"ip_addr": {
salt.utils.mine.MINE_ITEM_ACL_DATA: "2001:db8::1:4",
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
"allow_tgt": "not_requester_minion",
"allow_tgt_type": "glob",
}
},
)
# The glob check is for the resolution of the allow_tgt
# The compound check is for the resolution of the tgt in the mine_get request.
with patch(
"salt.utils.minions.CkMinions._check_glob_minions",
MagicMock(return_value={"minions": ["not_requester_minion"], "missing": []}),
), patch(
"salt.utils.minions.CkMinions._check_compound_minions",
MagicMock(return_value={"minions": ["webserver"], "missing": []}),
):
ret = funcs._mine_get(
{
"id": "requester_minion",
"tgt": "anything",
"tgt_type": "compound",
"fun": ["ip_addr"],
}
)
assert ret == {} |
6,293 | test clean permission | """Tests for the Schema endpoint on the API of the Marsha project."""
from django.test import TestCase, override_settings
from rest_framework.permissions import BasePermission
from marsha.core.api.schema import (
clean_permission,
extract_permission_docstring,
format_permissions_and_docstring,
)
from marsha.core.tests.testing_utils import reload_urlconf
@override_settings(DEBUG=True)
class SchemaAPITest(TestCase):
"""Test the API route for the schema."""
@classmethod
def setUpClass(cls):
super().setUpClass()
# Force URLs reload to use DEBUG=true settings in this test suite.
reload_urlconf()
def test_api_schema(self):
"""The API has a schema route that answers."""
response = self.client.get("/api/schema/")
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.get("Content-Type"), "application/vnd.oai.openapi; charset=utf-8"
)
self.assertEqual(
response.get("Content-Disposition"), 'inline; filename="Marsha API.yaml"'
)
class PermissionA(BasePermission):
"""Permission A."""
class PermissionB(BasePermission):
"""Permission B."""
class PermissionC(BasePermission):
"""Permission C."""
class PermissionForSchemaAPITest(TestCase):
"""Test case dedicated to the permission formatting/display in the Swagger UI."""
def METHOD_NAME(self):
"""Test the `clean_permission` expected behavior."""
for permission, expected_string in [
(
PermissionA & PermissionB,
" **(** PermissionA **AND** PermissionB **)** ",
),
(
PermissionA | PermissionB,
" **(** PermissionA **OR** PermissionB **)** ",
),
(
~PermissionA,
" **(NOT** PermissionA **)** ",
),
(
PermissionA,
"PermissionA",
),
(
(PermissionA & PermissionB) | ~PermissionC,
(
" **(** **(** PermissionA **AND** PermissionB **)** "
"**OR** **(NOT** PermissionC **)** **)** "
),
),
]:
with self.subTest(permission=permission):
self.assertEqual(
# mimic `get_permissions` by calling permission
clean_permission(permission()),
expected_string,
)
def test_extract_permission_docstring(self):
"""Test the `extract_permission_docstring` expected behavior."""
for permission, expected_dict in [
(
PermissionA & PermissionB,
{
"PermissionA": "Permission A.",
"PermissionB": "Permission B.",
},
),
(
PermissionA | PermissionB,
{
"PermissionA": "Permission A.",
"PermissionB": "Permission B.",
},
),
(
~PermissionA,
{
"PermissionA": "Permission A.",
},
),
(
PermissionA,
{
"PermissionA": "Permission A.",
},
),
(
(PermissionA & PermissionB) | ~PermissionA,
{
"PermissionA": "Permission A.",
"PermissionB": "Permission B.",
},
),
(
(PermissionA & PermissionB) | ~PermissionC,
{
"PermissionA": "Permission A.",
"PermissionB": "Permission B.",
"PermissionC": "Permission C.",
},
),
]:
with self.subTest(permission=permission):
self.assertEqual(
# mimic `get_permissions` by calling permission
extract_permission_docstring(permission()),
expected_dict,
)
def test_format_permissions_and_docstring(self):
"""Test the `format_permissions_and_docstring` expected behavior."""
self.assertEqual(
format_permissions_and_docstring(
["permission formatted string"],
{"some": "docstring"},
),
(
"## Permissions\n\n"
"permission formatted string\n"
"### Permission description\n\n"
"- **some** : docstring"
),
)
self.assertEqual(
format_permissions_and_docstring(
["permission formatted string", "another permission"],
{"some": "docstring", "another": "docstring"},
),
(
"## Permissions\n\n"
"- permission formatted string\n"
"- another permission\n"
"### Permission description\n\n"
"- **some** : docstring\n"
"- **another** : docstring"
),
) |
6,294 | transform | """Extract LIEF features from PE files"""
from h2oaicore.transformer_utils import CustomTransformer
import datatable as dt
import numpy as np
class PEDataDirectoryFeatures(CustomTransformer):
_unsupervised = True
_modules_needed_by_name = ['lief==0.11.4']
_regression = True
_binary = True
_multiclass = True
_is_reproducible = True
_parallel_task = True # if enabled, params_base['n_jobs'] will be >= 1 (adaptive to system), otherwise 1
_can_use_gpu = True # if enabled, will use special job scheduler for GPUs
_can_use_multi_gpu = True # if enabled, can get access to multiple GPUs for single transformer (experimental)
_numeric_output = True
@staticmethod
def get_default_properties():
return dict(col_type="text", min_cols=1, max_cols=1, relative_importance=1)
@staticmethod
def do_acceptance_test():
return False
def fit_transform(self, X: dt.Frame, y: np.array = None):
return self.METHOD_NAME(X)
def load_pe(self, file_path):
with open(file_path, 'rb') as f:
bytez = bytearray(f.read())
return (bytez)
def data_directory_features(self, lief_binary):
data_directories = lief_binary.data_directories
features = {}
for data_directory in data_directories:
features.update(
{'Data_Directory_{}_size'.format(str(data_directory.type).split(".")[1]): data_directory.size})
features.update({'Data_Directory_{}_virtual_address'.format(
str(data_directory.type).split(".")[1]): data_directory.rva})
return features
def get_data_directory_features(self, file_path):
import lief
try:
pe_bytez = self.load_pe(file_path)
lief_binary = lief.PE.parse(list(pe_bytez))
X = self.data_directory_features(lief_binary)
return X
except:
X = {'Data_Directory_EXPORT_TABLE_size': 0,
'Data_Directory_EXPORT_TABLE_virtual_address': 0,
'Data_Directory_IMPORT_TABLE_size': 0,
'Data_Directory_IMPORT_TABLE_virtual_address': 0,
'Data_Directory_RESOURCE_TABLE_size': 0,
'Data_Directory_RESOURCE_TABLE_virtual_address': 0,
'Data_Directory_EXCEPTION_TABLE_size': 0,
'Data_Directory_EXCEPTION_TABLE_virtual_address': 0,
'Data_Directory_CERTIFICATE_TABLE_size': 0,
'Data_Directory_CERTIFICATE_TABLE_virtual_address': 0,
'Data_Directory_BASE_RELOCATION_TABLE_size': 0,
'Data_Directory_BASE_RELOCATION_TABLE_virtual_address': 0,
'Data_Directory_DEBUG_size': 0,
'Data_Directory_DEBUG_virtual_address': 0,
'Data_Directory_ARCHITECTURE_size': 0,
'Data_Directory_ARCHITECTURE_virtual_address': 0,
'Data_Directory_GLOBAL_PTR_size': 0,
'Data_Directory_GLOBAL_PTR_virtual_address': 0,
'Data_Directory_TLS_TABLE_size': 0,
'Data_Directory_TLS_TABLE_virtual_address': 0,
'Data_Directory_LOAD_CONFIG_TABLE_size': 0,
'Data_Directory_LOAD_CONFIG_TABLE_virtual_address': 0,
'Data_Directory_BOUND_IMPORT_size': 0,
'Data_Directory_BOUND_IMPORT_virtual_address': 0,
'Data_Directory_IAT_size': 0,
'Data_Directory_IAT_virtual_address': 0,
'Data_Directory_DELAY_IMPORT_DESCRIPTOR_size': 0,
'Data_Directory_DELAY_IMPORT_DESCRIPTOR_virtual_address': 0,
'Data_Directory_CLR_RUNTIME_HEADER_size': 0,
'Data_Directory_CLR_RUNTIME_HEADER_virtual_address': 0}
return X
def METHOD_NAME(self, X: dt.Frame):
import pandas as pd
ret_df = pd.DataFrame(
[
self.get_data_directory_features(x)
for x in X.to_pandas().values[:, 0]
]
)
self._output_feature_names = ret_df.columns.to_list()
self._feature_desc = self._output_feature_names
return ret_df |
6,295 | decorate | # event/api.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Public API functions for the event system.
"""
from __future__ import absolute_import
from .. import util, exc
from .base import _registrars
from .registry import _EventKey
CANCEL = util.symbol('CANCEL')
NO_RETVAL = util.symbol('NO_RETVAL')
def _event_key(target, identifier, fn):
for evt_cls in _registrars[identifier]:
tgt = evt_cls._accept_with(target)
if tgt is not None:
return _EventKey(target, identifier, fn, tgt)
else:
raise exc.InvalidRequestError("No such event '%s' for target '%s'" %
(identifier, target))
def listen(target, identifier, fn, *args, **kw):
"""Register a listener function for the given target.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
event.listen(
UniqueConstraint,
"after_parent_attach",
unique_constraint_name)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
def on_config():
do_config()
event.listen(Mapper, "before_configure", on_config, once=True)
.. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
.. note::
The :func:`.listen` function cannot be called at the same time
that the target event is being run. This has implications
for thread safety, and also means an event cannot be added
from inside the listener function for itself. The list of
events to be run are present inside of a mutable collection
that can't be changed during iteration.
Event registration and removal is not intended to be a "high
velocity" operation; it is a configurational operation. For
systems that need to quickly associate and deassociate with
events at high scale, use a mutable structure that is handled
from inside of a single listener.
.. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
used as the container for the list of events, which explicitly
disallows collection mutation while the collection is being
iterated.
.. seealso::
:func:`.listens_for`
:func:`.remove`
"""
_event_key(target, identifier, fn).listen(*args, **kw)
def listens_for(target, identifier, *args, **kw):
"""Decorate a function as a listener for the given target + identifier.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
@event.listens_for(UniqueConstraint, "after_parent_attach")
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
@event.listens_for(Mapper, "before_configure", once=True)
def on_config():
do_config()
.. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
.. seealso::
:func:`.listen` - general description of event listening
"""
def METHOD_NAME(fn):
listen(target, identifier, fn, *args, **kw)
return fn
return METHOD_NAME
def remove(target, identifier, fn):
"""Remove an event listener.
The arguments here should match exactly those which were sent to
:func:`.listen`; all the event registration which proceeded as a result
of this call will be reverted by calling :func:`.remove` with the same
arguments.
e.g.::
# if a function was registered like this...
@event.listens_for(SomeMappedClass, "before_insert", propagate=True)
def my_listener_function(*arg):
pass
# ... it's removed like this
event.remove(SomeMappedClass, "before_insert", my_listener_function)
Above, the listener function associated with ``SomeMappedClass`` was also
propagated to subclasses of ``SomeMappedClass``; the :func:`.remove`
function will revert all of these operations.
.. versionadded:: 0.9.0
.. note::
The :func:`.remove` function cannot be called at the same time
that the target event is being run. This has implications
for thread safety, and also means an event cannot be removed
from inside the listener function for itself. The list of
events to be run are present inside of a mutable collection
that can't be changed during iteration.
Event registration and removal is not intended to be a "high
velocity" operation; it is a configurational operation. For
systems that need to quickly associate and deassociate with
events at high scale, use a mutable structure that is handled
from inside of a single listener.
.. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now
used as the container for the list of events, which explicitly
disallows collection mutation while the collection is being
iterated.
.. seealso::
:func:`.listen`
"""
_event_key(target, identifier, fn).remove()
def contains(target, identifier, fn):
"""Return True if the given target/ident/fn is set up to listen.
.. versionadded:: 0.9.0
"""
return _event_key(target, identifier, fn).contains() |
6,296 | setup | from __future__ import annotations
import ast
import importlib
import inspect
import re
from functools import cache
from pathlib import Path
from typing import TYPE_CHECKING, Any, Generator
from docutils.utils import get_source_line
if TYPE_CHECKING:
from docutils.nodes import Element, Node
from sphinx.addnodes import pending_xref
from sphinx.application import Sphinx
from sphinx.environment import BuildEnvironment
@cache
def _get_module_ast(source_file: str) -> ast.AST | ast.Module:
return ast.parse(Path(source_file).read_text())
def _get_import_nodes(nodes: list[ast.stmt]) -> Generator[ast.Import | ast.ImportFrom, None, None]:
for node in nodes:
if isinstance(node, (ast.Import, ast.ImportFrom)):
yield node
elif isinstance(node, ast.If) and getattr(node.test, "id", None) == "TYPE_CHECKING":
yield from _get_import_nodes(node.body)
@cache
def get_module_global_imports(module_import_path: str, reference_target_source_obj: str) -> set[str]:
"""Return a set of names that are imported globally within the containing module of ``reference_target_source_obj``,
including imports in ``if TYPE_CHECKING`` blocks.
"""
module = importlib.import_module(module_import_path)
obj = getattr(module, reference_target_source_obj)
tree = _get_module_ast(inspect.getsourcefile(obj))
import_nodes = _get_import_nodes(tree.body)
return {path.asname or path.name for import_node in import_nodes for path in import_node.names}
def on_warn_missing_reference(app: Sphinx, domain: str, node: Node) -> bool | None:
ignore_refs: dict[str | re.Pattern, set[str] | re.Pattern] = app.config["ignore_missing_refs"]
if node.tagname != "pending_xref": # type: ignore[attr-defined]
return None
if not hasattr(node, "attributes"):
return None
attributes = node.attributes # type: ignore[attr-defined]
target = attributes["reftarget"]
if reference_target_source_obj := attributes.get("py:class", attributes.get("py:meth", attributes.get("py:func"))):
global_names = get_module_global_imports(attributes["py:module"], reference_target_source_obj)
if target in global_names:
# autodoc has issues with if TYPE_CHECKING imports, and randomly with type aliases in annotations,
# so we ignore those errors if we can validate that such a name exists in the containing modules global
# scope or an if TYPE_CHECKING block.
# see: https://github.com/sphinx-doc/sphinx/issues/11225 and https://github.com/sphinx-doc/sphinx/issues/9813
# for reference
return True
# for various other autodoc issues that can't be resolved automatically, we check the exact path to be able
# to suppress specific warnings
source_line = get_source_line(node)[0]
source = source_line.split(" ")[-1]
if target in ignore_refs.get(source, []):
return True
ignore_ref_rgs = {rg: targets for rg, targets in ignore_refs.items() if isinstance(rg, re.Pattern)}
for pattern, targets in ignore_ref_rgs.items():
if not pattern.match(source):
continue
if isinstance(targets, set) and target in targets:
return True
if targets.match(target):
return True
return None
def on_missing_reference(app: Sphinx, env: BuildEnvironment, node: pending_xref, contnode: Element) -> Any:
if not hasattr(node, "attributes"):
return None
attributes = node.attributes # type: ignore[attr-defined]
target = attributes["reftarget"]
py_domain = env.domains["py"]
# autodoc sometimes incorrectly resolves these types, so we try to resolve them as py:data fist and fall back to any
new_node = py_domain.resolve_xref(env, node["refdoc"], app.builder, "data", target, node, contnode)
if new_node is None:
resolved_xrefs = py_domain.resolve_any_xref(env, node["refdoc"], app.builder, target, node, contnode)
for ref in resolved_xrefs:
if ref:
return ref[1]
return new_node
def on_env_before_read_docs(app: Sphinx, env: BuildEnvironment, docnames: set[str]) -> None:
tmp_examples_path = Path.cwd() / "docs/_build/_tmp_examples"
tmp_examples_path.mkdir(exist_ok=True, parents=True)
env.tmp_examples_path = tmp_examples_path
def METHOD_NAME(app: Sphinx) -> dict[str, bool]:
app.connect("env-before-read-docs", on_env_before_read_docs)
app.connect("missing-reference", on_missing_reference)
app.connect("warn-missing-reference", on_warn_missing_reference)
app.add_config_value("ignore_missing_refs", default={}, rebuild=False)
return {"parallel_read_safe": True, "parallel_write_safe": True} |
6,297 | test tensordot list neg dim | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.test_util import GenArgList
from oneflow.test_utils.automated_test_util import *
@flow.unittest.skip_unless_1n1d()
class TestTensordot(flow.unittest.TestCase):
@autotest(n=5, rtol=1e-2, atol=1e-3)
def test_tensordot_intdim(test_case):
device = random_device()
dims = random()
dims_list = [random().to(int).value() for i in range(dims.to(int).value() + 3)]
x = random_tensor(
ndim=3, dim0=dims_list[0], dim1=dims_list[1], dim2=dims_list[2],
).to(device)
y = random_tensor(
ndim=3,
dim0=dims_list[0 + dims.to(int).value()],
dim1=dims_list[1 + dims.to(int).value()],
dim2=dims_list[2 + dims.to(int).value()],
).to(device)
z = torch.tensordot(x, y, dims=3 - dims.to(int).value())
return z
@autotest(n=5, rtol=1e-2, atol=1e-3)
def test_tensordot_list_dim(test_case):
device = random_device()
x = random_tensor(4, 1, 3, 2, 5).to(device)
y = random_tensor(4, 4, 2, 3, 5).to(device)
z = torch.tensordot(x, y, dims=[[1, 2, 0], [2, 1, 0]])
return z
@autotest(n=5, rtol=1e-2, atol=1e-2)
def test_tensordot_tuple_dim(test_case):
device = random_device()
x = random_tensor(4, 1, 3, 2, 5).to(device)
y = random_tensor(4, 4, 2, 3, 5).to(device)
z = torch.tensordot(x, y, dims=([1, 2, 0], [2, 1, 0]))
return z
@autotest(n=5, rtol=1e-2, atol=1e-3)
def METHOD_NAME(test_case):
device = random_device()
x = random_tensor(4, 1, 3, 2, 5).to(device)
y = random_tensor(4, 4, 2, 3, 5).to(device)
z = torch.tensordot(x, y, dims=[[-3, -2, -4], [-2, -3, -4]])
return z
@autotest(check_graph=False, rtol=1e-2, atol=1e-3)
def test_tensordot_backward(test_case):
device = random_device()
x = random_tensor(3, 3, 4, 5).to(device)
y = random_tensor(2, 4, 5).to(device)
z = torch.tensordot(x, y, dims=[[1, 2], [0, 1]])
z.sum().backward()
@autotest(check_graph=False)
def test_tensordot_tensor_dim(test_case):
def _test_tensor_dim(test_case, device):
np_dim = np.array([[1, 2, 3], [1, 2, 3]], dtype=int)
flow_dim = flow.tensor(np_dim).to(device)
torch_dim = torch.tensor(np_dim).to(device)
np_random_array = np.random.randn(2, 3, 4, 5)
flow_tensor = flow.tensor(np_random_array).to(device)
torch_tensor = torch.tensor(np_random_array).to(device)
flow_result = flow.tensordot(flow_tensor, flow_tensor, dims=flow_dim)
torch_result = torch.tensordot(torch_tensor, torch_tensor, dims=torch_dim)
test_case.assertTrue(
np.allclose(
flow_result.numpy(),
torch_result.cpu().numpy(),
rtol=0.0001,
atol=0.0001,
)
)
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_tensor_dim(test_case, arg[0])
@autotest(n=5, check_graph=False, rtol=1e-2, atol=1e-2)
def test_tensordot_single_item_tensor_dim(test_case):
device = random_device()
dims = random_tensor(1, dim0=1, low=0, high=4, dtype=int).to(device)
x = random_tensor(3, dim0=4, dim1=4, dim2=4).to(device)
y = random_tensor(3, dim0=4, dim1=4, dim2=4).to(device)
z = torch.tensordot(x, y, dims=dims)
return z
@autotest(n=5, rtol=1e-3, atol=1e-4)
def test_tensordot_broadcast(test_case):
device = random_device()
x = random_tensor(4, 1, 1, 1, 1).to(device)
y = random_tensor(4, 2, 3, 4, 5).to(device)
z = torch.tensordot(x, y, dims=random(high=5).to(int).value())
return z
if __name__ == "__main__":
unittest.main() |
6,298 | wrap inputs | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Tuple
import torch
from nncf.common.quantization.structs import QuantizationPreset
from nncf.data import Dataset
from nncf.parameters import ModelType
from nncf.parameters import TargetDevice
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from nncf.scopes import IgnoredScope
from nncf.torch.dynamic_graph.context import no_nncf_trace
from nncf.torch.dynamic_graph.io_handling import replicate_same_tensors
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_inputs_with_objwalk
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_outputs_with_objwalk
from nncf.torch.nested_objects_traversal import objwalk
from nncf.torch.nncf_network import NNCFNetwork
from nncf.torch.utils import get_model_device
from nncf.torch.utils import is_tensor
from nncf.torch.utils import training_mode_switcher
def create_nncf_network(model: torch.nn.Module, dataset: Dataset) -> NNCFNetwork:
"""
Creates NNCFNetwork instance for the PyTorch model where the first item of dataset
is used for model tracing.
:param model: PyTorch model
:param dataset: Dataset for model tracing
:return: NNCFNetwork instance for the input model
"""
def get_inputs(dataloader_output: Any) -> Tuple[Tuple, Dict]:
if not isinstance(dataloader_output, tuple):
dataloader_output = (dataloader_output,)
return dataloader_output, {}
def METHOD_NAME(args, kwargs):
return wrap_nncf_model_inputs_with_objwalk(args, kwargs)
def wrap_outputs(retval):
return wrap_nncf_model_outputs_with_objwalk(retval)
def create_dummy_forward_fn(dataset, device):
def dummy_forward(model):
with no_nncf_trace():
args = next(iter(dataset.get_inference_data()))
args, kwargs = get_inputs(args)
def send_to_device(tensor):
return tensor.to(device)
args = objwalk(args, is_tensor, send_to_device)
kwargs = objwalk(kwargs, is_tensor, send_to_device)
args, kwargs = METHOD_NAME(args, kwargs)
retval = model(*args, **kwargs)
retval = replicate_same_tensors(retval)
return wrap_outputs(retval)
return dummy_forward
device = get_model_device(model)
dummy_forward_fn = create_dummy_forward_fn(dataset, device)
with training_mode_switcher(model, is_training=False):
nncf_network = NNCFNetwork(
model, dummy_forward_fn=dummy_forward_fn, wrap_inputs_fn=METHOD_NAME, wrap_outputs_fn=wrap_outputs
)
nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()
return nncf_network
def quantize_impl(
model: torch.nn.Module,
calibration_dataset: Dataset,
preset: QuantizationPreset,
target_device: TargetDevice,
subset_size: int,
fast_bias_correction: bool,
model_type: Optional[ModelType] = None,
ignored_scope: Optional[IgnoredScope] = None,
advanced_parameters: Optional[AdvancedQuantizationParameters] = None,
) -> torch.nn.Module:
"""
Experimental implementation of the `quantize()` method for the PyTorch backend.
"""
if fast_bias_correction is False:
raise ValueError(f"fast_bias_correction={fast_bias_correction} is not supported")
if target_device == TargetDevice.CPU_SPR:
raise RuntimeError("target_device == CPU_SPR is not supported")
nncf_network = create_nncf_network(model.eval(), calibration_dataset)
quantization_algorithm = PostTrainingQuantization(
preset=preset,
target_device=target_device,
subset_size=subset_size,
fast_bias_correction=fast_bias_correction,
model_type=model_type,
ignored_scope=ignored_scope,
advanced_parameters=advanced_parameters,
)
quantized_model = quantization_algorithm.apply(
nncf_network, nncf_network.nncf.get_graph(), dataset=calibration_dataset
)
quantized_model.nncf.disable_dynamic_graph_building()
return quantized_model |
6,299 | resource apply sparse | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exponentiated Gradient Delta-Delta optimizer."""
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
# pylint: enable=g-direct-tensorflow-import
class EGDD(optimizer.Optimizer):
"""A version of GD Momentum with adaptive gain and learning rate.
Exponentiated Gradient Delta-delta optimizer starts with a local gain of 1.0
for every weight and a lr_scale of 1.0 for all weights. The EGDD update rule
applies:
momentum <- mu * momentum + learning_rate * gain * grad
var <- var - lr_scale * momentum
The gain as well as the lr_scale are updated using the unnormalized
exponentiated gradient algorithm [KW97].
Reference: TBA
[KW97] Kivinen, J., & Warmuth, M. K. Exponentiated gradient versus gradient
descent for linear predictors. Information and Computation, 1997.
"""
def __init__(self,
learning_rate,
momentum,
beta=0.9,
gain_learning_rate=0.01,
scale_learning_rate=0.001,
initial_gain=1.0,
min_gain=1e-2,
max_gain=1e2,
initial_scale=1.0,
min_scale=1e-1,
max_scale=1e1,
use_directions=True,
use_signs=True,
name="EGDD"):
"""Construct a new EG-DD optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
momentum: A `Tensor` or a floating point value.
beta: `float` decay rate of the gradient EMA.
gain_learning_rate: `float` gain learning rate.
scale_learning_rate: `float` scale learning rate.
initial_gain: `float` initial gain.
min_gain: `float` minimum gain.
max_gain: `float` maximum gain,
initial_scale: `float` initial scale.
min_scale: `float` minimum learning rate scale.
max_scale: `float` maximum learning rate scale.
use_directions: `bool` whether to use directions only for scale updates.
use_signs: `bool` whether to use the signs for updating gains.
name: Optional name prefix for the operations created when applying
gradients.
Raises:
ValueError: If the `initial_accumulator_value` is invalid.
"""
super().__init__(False, name)
self._learning_rate = learning_rate
self._momentum = momentum
self._beta = beta
self._gain_learning_rate = gain_learning_rate
self._scale_learning_rate = scale_learning_rate
self._initial_gain = initial_gain
self._min_gain = min_gain
self._max_gain = max_gain
self._initial_scale = initial_scale
self._min_scale = min_scale
self._max_scale = max_scale
self._use_directions = use_directions
self._use_signs = use_signs
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, "momentum", self._name)
self._zeros_slot(v, "gbar", self._name)
g_tensor = ops.convert_to_tensor(v)
gain_init = self._initial_gain * array_ops.ones_like(g_tensor)
_ = self._get_or_make_slot(v, self._initial_scale * array_ops.ones((1)),
"lr_scale", self._name)
_ = self._get_or_make_slot(v, gain_init, "gain", self._name)
_ = self._get_or_make_slot(v, array_ops.zeros((1)), "counter", self._name)
def _prepare(self):
learning_rate = self._call_if_callable(self._learning_rate)
self._learning_rate_tensor = ops.convert_to_tensor(
learning_rate, name="learning_rate")
momentum = self._call_if_callable(self._momentum)
self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum")
def _apply_dense(self, grad, var):
lr_scale = self.get_slot(var, "lr_scale")
momentum = self.get_slot(var, "momentum")
gbar = self.get_slot(var, "gbar")
gain = self.get_slot(var, "gain")
counter = self.get_slot(var, "counter")
counter_updated = state_ops.assign(counter, counter + 1)
# lr_scale update uses normalized grad and momentum to be independent of dim
normalized_grad = grad / (linalg_ops.norm(grad) + 1e-10)
normalized_momentum = momentum / (linalg_ops.norm(momentum) + 1e-10)
# Apply EG updates on lr_scale:
# grad_lr_scale = -inner_product(current_grad, old_momentum)
# lr_scale <- lr_scale * exp(-scale_learning_rate * grad_lr_scale)
lr_scale_unnormalized_updated = clip_ops.clip_by_value(
lr_scale * math_ops.exp(
self._scale_learning_rate * math_ops.reduce_sum(grad * momentum)),
self._min_scale, self._max_scale)
lr_scale_normalized_updated = clip_ops.clip_by_value(
lr_scale * math_ops.exp(self._scale_learning_rate * math_ops.reduce_sum(
normalized_grad * normalized_momentum)), self._min_scale,
self._max_scale)
lr_scale_updated = state_ops.assign(
lr_scale,
array_ops.where(self._use_directions, lr_scale_normalized_updated,
lr_scale_unnormalized_updated))
# remove the bias of zero initialization in gbar
corrected_gbar = gbar / (
1.0 - self._beta**math_ops.maximum(counter_updated - 1, 1))
# Apply EG updates on gain:
# grad_gain = - current_grad * old_gbar
# gain <- gain * exp(-gain_learning_rate * grad_gain)
gain_unnormalized_updated = clip_ops.clip_by_value(
gain * math_ops.exp(self._gain_learning_rate * grad * corrected_gbar),
self._min_gain, self._max_gain)
# Normalized update uses sign(grad) * sign(gbar) as a proxy for grad_gain.
gain_normalized_updated = clip_ops.clip_by_value(
gain * math_ops.exp(self._gain_learning_rate * math_ops.sign(grad) *
math_ops.sign(gbar)), self._min_gain,
self._max_gain)
gain_updated = state_ops.assign(
gain,
array_ops.where(self._use_signs, gain_normalized_updated,
gain_unnormalized_updated))
scaled_g = self._learning_rate_tensor * gain_updated * grad
with ops.control_dependencies([lr_scale_updated, scaled_g]):
momentum_updated = state_ops.assign(
momentum, self._momentum_tensor * momentum + scaled_g)
gbar_updated = state_ops.assign(
gbar, self._beta * gbar + (1.0 - self._beta) * grad)
with ops.control_dependencies([gbar_updated]):
return state_ops.assign_sub(var, lr_scale_updated * momentum_updated)
def _resource_apply_dense(self, grad, var):
return self._apply_dense(grad, var)
# Sparse gradients are not handled currently and is part of future work.
def METHOD_NAME(self, grad_values, var, grad_indices):
return control_flow_ops.no_op()
def _apply_sparse(self, grad, var):
return control_flow_ops.no_op() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.