id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
1,900 | test testscenario object args only | #!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
__author__ = ["fkiraly"]
__all__ = []
from aeon.utils._testing.scenarios import TestScenario
class TestedMockClass:
"""Mock class to test TestScenario."""
def __init__(self, a):
self.a = a
def foo(self, b):
"""Test method for mock class to test TestScenario."""
self.a += b
return self.a
def bar(self, c, d="0"):
"""Test method for mock class to test TestScenario."""
self.a += c
self.a += d
return self.a
@classmethod
def baz(cls):
return "foo"
def METHOD_NAME():
"""Test basic workflow: construct only with args, call run with minimal args."""
obj = TestedMockClass(a="super")
scenario = TestScenario(
args={"foo": {"b": "cali"}, "bar": {"c": "fragi", "d": "listic"}}
)
result = scenario.run(obj, method_sequence=["foo", "bar"])
assert result == "supercalifragilistic"
def test_testscenario_object_default_method_sequence():
"""Test basic workflow: construct with args and default method sequence."""
obj = TestedMockClass(a="super")
scenario = TestScenario(
args={"foo": {"b": "cali"}, "bar": {"c": "fragi", "d": "listic"}},
default_method_sequence=["foo", "bar"],
)
result = scenario.run(obj)
assert result == "supercalifragilistic"
def test_testscenario_object_default_arg_sequence():
"""Test basic workflow: construct with args and default arg sequence."""
obj = TestedMockClass(a="super")
scenario = TestScenario(
args={"foo": {"b": "cali"}, "bar": {"c": "fragi", "d": "listic"}},
default_arg_sequence=["foo", "bar"],
)
result = scenario.run(obj)
assert result == "supercalifragilistic"
def test_testscenario_object_return_all():
"""Test basic workflow: construct with args and default arg sequence."""
obj = TestedMockClass(a="super")
scenario = TestScenario(
args={"foo": {"b": "cali"}, "bar": {"c": "fragi", "d": "listic"}},
default_arg_sequence=["foo", "bar"],
)
result = scenario.run(obj, return_all=True)
assert result == ["supercali", "supercalifragilistic"]
def test_testscenario_object_multi_call_defaults():
"""Test basic workflow: default args where methods are called multiple times."""
obj = TestedMockClass(a="super")
scenario = TestScenario(
args={
"foo": {"b": "cali"},
"bar": {"c": "fragi", "d": "listic"},
"foo-2nd": {"b": "expi"},
"bar-2nd": {"c": "ali", "d": "docious"},
},
default_arg_sequence=["foo", "bar", "foo-2nd", "bar-2nd"],
default_method_sequence=["foo", "bar", "foo", "bar"],
)
result = scenario.run(obj)
assert result == "supercalifragilisticexpialidocious"
def test_testscenario_object_multi_call_in_run():
"""Test advanced workflow: run args where methods are called multiple times."""
obj = TestedMockClass(a="super")
scenario = TestScenario(
args={
"foo": {"b": "cali"},
"bar": {"c": "fragi", "d": "listic"},
"foo-2nd": {"b": "expi"},
"bar-2nd": {"c": "ali", "d": "docious"},
},
)
result = scenario.run(
obj,
arg_sequence=["foo", "bar", "foo-2nd", "bar-2nd"],
method_sequence=["foo", "bar", "foo", "bar"],
)
assert result == "supercalifragilisticexpialidocious"
def test_testscenario_class_full_options():
"""Test advanced workflow: constructor and methods called multiple times."""
obj = TestedMockClass
scenario = TestScenario(
args={
"__init__": {"a": "super"},
"foo": {"b": "cali"},
"bar": {"c": "fragi", "d": "listic"},
"foo-2nd": {"b": "expi"},
"bar-2nd": {"c": "ali", "d": "docious"},
},
)
result = scenario.run(
obj,
arg_sequence=["__init__", "foo", "bar", "foo-2nd", "bar-2nd"],
method_sequence=["__init__", "foo", "bar", "foo", "bar"],
)
assert result == "supercalifragilisticexpialidocious"
def test_testscenario_class_simple():
"""Test advanced workflow: constructor, but only simple function calls."""
obj = TestedMockClass
scenario = TestScenario(
args={
"__init__": {"a": "super"},
"foo": {"b": "cali"},
"bar": {"c": "fragi", "d": "listic"},
},
)
result = scenario.run(
obj,
method_sequence=["__init__", "foo", "bar"],
)
assert result == "supercalifragilistic" |
1,901 | test cond acquire success | """Generic thread tests.
Meant to be used by dummy_thread and thread. To allow for different modules
to be used, test_main() can be called with the module to use as the thread
implementation as its sole argument.
"""
import dummy_thread as _thread
import time
import Queue
import random
import unittest
from test import test_support
DELAY = 0 # Set > 0 when testing a module other than dummy_thread, such as
# the 'thread' module.
class LockTests(unittest.TestCase):
"""Test lock objects."""
def setUp(self):
# Create a lock
self.lock = _thread.allocate_lock()
def test_initlock(self):
#Make sure locks start locked
self.assertTrue(not self.lock.locked(),
"Lock object is not initialized unlocked.")
def test_release(self):
# Test self.lock.release()
self.lock.acquire()
self.lock.release()
self.assertTrue(not self.lock.locked(),
"Lock object did not release properly.")
def test_improper_release(self):
#Make sure release of an unlocked thread raises _thread.error
self.assertRaises(_thread.error, self.lock.release)
def METHOD_NAME(self):
#Make sure the conditional acquiring of the lock works.
self.assertTrue(self.lock.acquire(0),
"Conditional acquiring of the lock failed.")
def test_cond_acquire_fail(self):
#Test acquiring locked lock returns False
self.lock.acquire(0)
self.assertTrue(not self.lock.acquire(0),
"Conditional acquiring of a locked lock incorrectly "
"succeeded.")
def test_uncond_acquire_success(self):
#Make sure unconditional acquiring of a lock works.
self.lock.acquire()
self.assertTrue(self.lock.locked(),
"Uncondional locking failed.")
def test_uncond_acquire_return_val(self):
#Make sure that an unconditional locking returns True.
self.assertTrue(self.lock.acquire(1) is True,
"Unconditional locking did not return True.")
self.assertTrue(self.lock.acquire() is True)
def test_uncond_acquire_blocking(self):
#Make sure that unconditional acquiring of a locked lock blocks.
def delay_unlock(to_unlock, delay):
"""Hold on to lock for a set amount of time before unlocking."""
time.sleep(delay)
to_unlock.release()
self.lock.acquire()
start_time = int(time.time())
_thread.start_new_thread(delay_unlock,(self.lock, DELAY))
if test_support.verbose:
print
print "*** Waiting for thread to release the lock "\
"(approx. %s sec.) ***" % DELAY
self.lock.acquire()
end_time = int(time.time())
if test_support.verbose:
print "done"
self.assertTrue((end_time - start_time) >= DELAY,
"Blocking by unconditional acquiring failed.")
class MiscTests(unittest.TestCase):
"""Miscellaneous tests."""
def test_exit(self):
#Make sure _thread.exit() raises SystemExit
self.assertRaises(SystemExit, _thread.exit)
def test_ident(self):
#Test sanity of _thread.get_ident()
self.assertIsInstance(_thread.get_ident(), int,
"_thread.get_ident() returned a non-integer")
self.assertTrue(_thread.get_ident() != 0,
"_thread.get_ident() returned 0")
def test_LockType(self):
#Make sure _thread.LockType is the same type as _thread.allocate_locke()
self.assertIsInstance(_thread.allocate_lock(), _thread.LockType,
"_thread.LockType is not an instance of what "
"is returned by _thread.allocate_lock()")
def test_interrupt_main(self):
#Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
self.assertRaises(KeyboardInterrupt, _thread.start_new_thread,
call_interrupt, tuple())
def test_interrupt_in_main(self):
# Make sure that if interrupt_main is called in main threat that
# KeyboardInterrupt is raised instantly.
self.assertRaises(KeyboardInterrupt, _thread.interrupt_main)
class ThreadTests(unittest.TestCase):
"""Test thread creation."""
def test_arg_passing(self):
#Make sure that parameter passing works.
def arg_tester(queue, arg1=False, arg2=False):
"""Use to test _thread.start_new_thread() passes args properly."""
queue.put((arg1, arg2))
testing_queue = Queue.Queue(1)
_thread.start_new_thread(arg_tester, (testing_queue, True, True))
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using tuple failed")
_thread.start_new_thread(arg_tester, tuple(), {'queue':testing_queue,
'arg1':True, 'arg2':True})
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using kwargs failed")
_thread.start_new_thread(arg_tester, (testing_queue, True), {'arg2':True})
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using both tuple"
" and kwargs failed")
def test_multi_creation(self):
#Make sure multiple threads can be created.
def queue_mark(queue, delay):
"""Wait for ``delay`` seconds and then put something into ``queue``"""
time.sleep(delay)
queue.put(_thread.get_ident())
thread_count = 5
testing_queue = Queue.Queue(thread_count)
if test_support.verbose:
print
print "*** Testing multiple thread creation "\
"(will take approx. %s to %s sec.) ***" % (DELAY, thread_count)
for count in xrange(thread_count):
if DELAY:
local_delay = round(random.random(), 1)
else:
local_delay = 0
_thread.start_new_thread(queue_mark,
(testing_queue, local_delay))
time.sleep(DELAY)
if test_support.verbose:
print 'done'
self.assertTrue(testing_queue.qsize() == thread_count,
"Not all %s threads executed properly after %s sec." %
(thread_count, DELAY))
def test_main(imported_module=None):
global _thread, DELAY
if imported_module:
_thread = imported_module
DELAY = 2
if test_support.verbose:
print
print "*** Using %s as _thread module ***" % _thread
test_support.run_unittest(LockTests, MiscTests, ThreadTests)
if __name__ == '__main__':
test_main() |
1,902 | class gradient | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import numpy as np
from art.estimators.classification.classifier import ClassGradientsMixin, ClassifierMixin
from art.estimators.estimator import BaseEstimator, LossGradientsMixin, NeuralNetworkMixin
from tests.utils import TestBase, master_seed
logger = logging.getLogger(__name__)
class ClassifierInstance(ClassifierMixin, BaseEstimator):
estimator_params = BaseEstimator.estimator_params + ClassifierMixin.estimator_params
def __init__(self, clip_values=None, channels_first=True):
super(ClassifierInstance, self).__init__(model=None, clip_values=clip_values)
def fit(self, x, y, **kwargs):
pass
def predict(self, x, **kwargs):
pass
def nb_classes(self):
pass
def save(self, filename, path=None):
pass
def input_shape(self):
pass
class ClassifierNeuralNetworkInstance(
ClassGradientsMixin, ClassifierMixin, NeuralNetworkMixin, LossGradientsMixin, BaseEstimator
):
estimator_params = (
BaseEstimator.estimator_params + NeuralNetworkMixin.estimator_params + ClassifierMixin.estimator_params
)
def __init__(self, clip_values, channels_first=True):
super(ClassifierNeuralNetworkInstance, self).__init__(
model=None, clip_values=clip_values, channels_first=channels_first
)
def METHOD_NAME(self, x, label=None, **kwargs):
pass
def fit(self, x, y, batch_size=128, nb_epochs=20, **kwargs):
pass
def get_activations(self, x, layer, batch_size):
pass
def compute_loss(self, x, y, **kwargs):
pass
def loss_gradient(self, x, y, **kwargs):
pass
def predict(self, x, batch_size=128, **kwargs):
pass
def nb_classes(self):
pass
def save(self, filename, path=None):
pass
def layer_names(self):
pass
def input_shape(self):
pass
class TestClassifier(TestBase):
@classmethod
def setUpClass(cls):
master_seed(seed=1234)
super().setUpClass()
def setUp(self):
master_seed(seed=1234)
super().setUp()
def test_preprocessing_normalisation(self):
classifier = ClassifierInstance()
x = np.random.rand(2, 3)
x_new, _ = classifier._apply_preprocessing(x=x, y=None, fit=False)
x_new_expected = np.asarray([[0.19151945, 0.62210877, 0.43772774], [0.78535858, 0.77997581, 0.27259261]])
np.testing.assert_array_almost_equal(x_new, x_new_expected)
def test_repr(self):
classifier = ClassifierInstance()
repr_ = repr(classifier)
self.assertIn("ClassifierInstance", repr_)
self.assertIn("clip_values=None", repr_)
self.assertIn("defences=None", repr_)
self.assertIn(
"preprocessing=StandardisationMeanStd(mean=0.0, std=1.0, apply_fit=True, apply_predict=True)", repr_
)
class TestClassifierNeuralNetwork(TestBase):
@classmethod
def setUpClass(cls):
master_seed(seed=1234)
super().setUpClass()
def setUp(self):
master_seed(seed=1234)
super().setUp()
def test_preprocessing_normalisation(self):
classifier = ClassifierNeuralNetworkInstance((0, 1))
x = np.random.rand(2, 3)
x_new_expected = np.asarray([[0.19151945, 0.62210877, 0.43772774], [0.78535858, 0.77997581, 0.27259261]])
x_new, _ = classifier._apply_preprocessing(x, y=None, fit=False)
np.testing.assert_array_almost_equal(x_new, x_new_expected, decimal=4)
def test_repr(self):
classifier = ClassifierNeuralNetworkInstance((0, 1))
repr_ = repr(classifier)
self.assertIn("ClassifierNeuralNetworkInstance", repr_)
self.assertIn("channels_first=True", repr_)
self.assertIn("clip_values=[0. 1.]", repr_)
self.assertIn("defences=None", repr_)
self.assertIn(
"preprocessing=StandardisationMeanStd(mean=0.0, std=1.0, apply_fit=True, apply_predict=True)", repr_
)
if __name__ == "__main__":
unittest.main() |
1,903 | copy adaptive exodus test files | #pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import glob
import shutil
import subprocess
import numpy as np
import vtk
import mooseutils
from .Options import Option, Options
from . import AxisOptions
from . import FontOptions
from . import LegendOptions
def get_active_filenames(basename, pattern=None):
"""
Return a list of tuples containing 'active' filenames and modified times.
Inputs:
basename[str]: The base filename (e.g., file_out.e)
pattern[str]: (Optional) Additional files to consider via glob pattern (e.g., file_out.e-s*)
"""
def file_number(fname):
idx = fname.find('.e-s')
return int(fname[idx+4:]) if idx > 0 else 0
# List of all matching filenames
filenames = [basename]
if pattern:
filenames += glob.glob(pattern)
filenames.sort(key=file_number)
# Minimum filename modified time
modified = os.path.getmtime(filenames[0]) if os.path.exists(filenames[0]) else 0
# Populate a list of tuples: (filename, modified time)
output = []
for filename in filenames:
current_modified = os.path.getmtime(filename) if os.path.exists(filename) else 0
if current_modified >= modified:
output.append((filename, current_modified))
return output
def METHOD_NAME(testbase):
"""
A helper for copying test Exodus files.
"""
basename = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'tests', 'input',
'step10_micro_out.e'))
pattern = basename + '-s*'
testfiles = []
for src in [basename] + glob.glob(pattern):
_, ext = os.path.splitext(src)
dst = os.path.join(os.getcwd(), testbase + ext)
testfiles.append(dst)
shutil.copy(src, dst)
return sorted(testfiles)
def get_bounds_min_max(*all_bounds):
"""
Returns min,max bounds arrays provided a list of bounds sets.
"""
xmin = [float('inf'), float('inf'), float('inf')]
xmax = [float('-inf'), float('-inf'), float('-inf')]
for bounds in all_bounds:
for i, j in enumerate([0, 2, 4]):
xmin[i] = min(xmin[i], bounds[j])
for i, j in enumerate([1, 3, 5]):
xmax[i] = max(xmax[i], bounds[j])
return xmin, xmax
def get_bounds(*sources):
"""
Returns the bounding box for all supplied sources.
"""
bnds = []
for src in sources:
bnds.append(src.getVTKMapper().GetBounds())
return get_bounds_min_max(*bnds)
def compute_distance(*sources):
"""
Returns the distance across the bounding box for all supplied sources.
"""
xmin, xmax = get_bounds(*sources)
return np.linalg.norm(np.array(xmax) - np.array(xmin))
def get_min_max(*pairs):
"""
Retuns the min/max from a set of min/max pairs.
"""
xmin = float('inf')
xmax = float('-inf')
for x0, x1 in pairs:
xmin = min(xmin, x0)
xmax = max(xmax, x1)
return xmin, xmax
def print_camera(camera, prefix='camera', precision=10):
"""
Prints vtkCamera object to screen.
"""
if not isinstance(camera, vtk.vtkCamera):
print("You must supply a vtkCarmera object.")
return
view_up = camera.GetViewUp()
position = camera.GetPosition()
focal = camera.GetFocalPoint()
def dump(precision, vec):
"""
Helper for dumping settings.
"""
p = str(precision)
frmt = ''.join(['{:', p, '.', p, 'f}'])
d = ''.join(['(', frmt, ', ', frmt, ', ', frmt, ')'])
return d.format(*vec)
return [prefix + '.SetViewUp' + dump(precision, view_up), prefix + '.SetPosition' + \
dump(precision, position), prefix + '.SetFocalPoint' + \
dump(precision, focal)]
def animate(pattern, output, delay=20, restart_delay=500, loop=True):
"""
Runs ImageMagic convert to create an animate gif from a series of images.
"""
filenames = sorted(glob.glob(pattern))
delay = [delay]*len(filenames)
delay[-1] = restart_delay
cmd = ['convert']
for d, f in zip(delay, filenames):
cmd += ['-delay', str(d), f]
if loop:
cmd += ['-loop', '0']
cmd += [output]
subprocess.call(cmd)
def img2mov(pattern, output, ffmpeg='ffmpeg', duration=60, framerate=None, bitrate='10M',
num_threads=1, quality=1, dry_run=False, output_framerate_increase=0, overwrite=False):
"""
Use ffmpeg to convert a series of images to a movie.
Args:
pattern[str]: The glob pattern defining the files to be converted.
output[str]: The name of the output file, including the extension.
ffmpeg[str]: The ffmpeg executable.
duration[int]: The desired duration of the movie (in seconds)
framerate[int]: Ignores the duration and sets the movie framerate directly.
bitrate[str]: The ffmeg "-b:v" setting.
num_threads[int]: The number of threads to utilize in rendering.
quality[int]: The ffmpeg quality setting (ranges from 1 to 31).
dry_run[bool]: When True the command is not executed.
factor[float]: Output framerate adjustment to help guarantee no dropped frames, if you see
dropped frames in ffmpeg output, increase this number.
"""
# Compute framerate from the duration if framerate is not given
if not framerate:
n = len(glob.glob(pattern))
framerate = int(n/duration)
# Build the command
cmd = [ffmpeg]
cmd += ['-pattern_type', 'glob']
cmd += ['-framerate', str(framerate)]
cmd += ['-i', pattern]
cmd += ['-b:v', bitrate]
cmd += ['-pix_fmt', 'yuv420p']
cmd += ['-q:v', str(quality)]
cmd += ['-threads', str(num_threads)]
cmd += ['-framerate', str(framerate + output_framerate_increase)]
if overwrite:
cmd += ['-y']
cmd += [output]
c = ' '.join(cmd)
print('{0}\n{1}\n{0}'.format('-'*(len(c)), c))
if not dry_run:
subprocess.call(cmd) |
1,904 | test init | """Test runway.cfngin.hooks.awslambda.source_code."""
# pylint: disable=protected-access, unnecessary-dunder-call
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from mock import Mock, call
from runway.cfngin.hooks.awslambda.source_code import SourceCode
if TYPE_CHECKING:
from pytest_mock import MockerFixture
MODULE = "runway.cfngin.hooks.awslambda.source_code"
class TestSourceCode:
"""Test SourceCode."""
def test___eq___other(self, tmp_path: Path) -> None:
"""Test __eq__."""
assert SourceCode(tmp_path) != tmp_path
assert SourceCode(tmp_path) != str(tmp_path)
def test___eq___source_code(self, tmp_path: Path) -> None:
"""Test __eq__."""
assert SourceCode(tmp_path) == SourceCode(tmp_path)
assert SourceCode(tmp_path) == SourceCode(str(tmp_path))
assert SourceCode(tmp_path) != SourceCode(tmp_path / "foo")
def test___fspath__(self, tmp_path: Path) -> None:
"""Test __fspath__."""
assert SourceCode(tmp_path).__fspath__() == str(tmp_path)
def METHOD_NAME(self, mocker: MockerFixture, tmp_path: Path) -> None:
"""Test __init__."""
gitignore_filter = mocker.patch("igittigitt.IgnoreParser", Mock())
gitignore_filter.return_value = gitignore_filter
obj = SourceCode(tmp_path)
assert obj._include_files_in_hash == []
assert obj.gitignore_filter == gitignore_filter
gitignore_filter.assert_called_once_with()
assert obj.project_root == tmp_path
assert obj.root_directory == tmp_path
gitignore_filter.parse_rule_files.assert_called_once_with(tmp_path)
gitignore_filter.add_rule.assert_has_calls(
[call(".git/", tmp_path), call(".gitignore", tmp_path)]
)
def test___init___gitignore_filter_provided(self, tmp_path: Path) -> None:
"""Test __init__ gitignore_filter provided."""
gitignore_filter = Mock()
obj = SourceCode(
tmp_path,
gitignore_filter=gitignore_filter,
include_files_in_hash=[tmp_path],
)
assert obj._include_files_in_hash == [tmp_path]
assert obj.gitignore_filter == gitignore_filter
gitignore_filter.parse_rule_files.assert_not_called()
gitignore_filter.add_rule.assert_not_called()
def test___init___handle_str(self, mocker: MockerFixture, tmp_path: Path) -> None:
"""Test __init__ root_directory provided as str."""
gitignore_filter = mocker.patch("igittigitt.IgnoreParser", Mock())
gitignore_filter.return_value = gitignore_filter
src_path = tmp_path / "src"
obj = SourceCode(str(src_path), project_root=str(tmp_path))
assert obj.project_root == tmp_path
assert obj.root_directory == src_path
assert isinstance(obj.root_directory, Path)
def test___iter__(self, tmp_path: Path) -> None:
"""Test __iter__."""
src_path = tmp_path / "src"
src_path.mkdir()
file0 = src_path / "foo0.txt"
file0.touch()
file1 = src_path / "foo1.txt"
file1.touch()
(src_path / "dir").mkdir()
gitignore_filter = Mock(match=Mock(side_effect=[False, True]))
assert (
len(
list(
iter(
SourceCode(
src_path,
gitignore_filter=gitignore_filter,
project_root=tmp_path,
)
)
)
)
== 1
)
gitignore_filter.match.assert_has_calls(
[call(file0), call(file1)], any_order=True
)
def test___str__(self, tmp_path: Path) -> None:
"""Test __str__."""
assert str(SourceCode(tmp_path)) == str(tmp_path)
def test___truediv__(self, tmp_path: Path) -> None:
"""Test __truediv__."""
assert SourceCode(tmp_path) / "foo" == tmp_path / "foo"
def test_add_filter_rule(self, tmp_path: Path) -> None:
"""Test add_filter_rule."""
gitignore_filter = Mock()
pattern = "foobar/"
src_path = tmp_path / "src"
obj = SourceCode(
src_path, gitignore_filter=gitignore_filter, project_root=tmp_path
)
assert not obj.add_filter_rule(pattern)
gitignore_filter.add_rule.assert_called_once_with(
pattern=pattern, base_path=src_path
)
def test_md5_hash(self, mocker: MockerFixture, tmp_path: Path) -> None:
"""Test md5_hash."""
file_hash = Mock(hexdigest="success")
mock_file_hash_class = mocker.patch(
f"{MODULE}.FileHash", return_value=file_hash
)
mock_md5 = mocker.patch("hashlib.md5")
src_path = tmp_path / "src"
src_path.mkdir()
test_file = src_path / "test.txt"
assert (
SourceCode(
src_path,
gitignore_filter=Mock(),
include_files_in_hash=[test_file],
project_root=tmp_path,
).md5_hash
== file_hash.hexdigest
)
mock_file_hash_class.assert_called_once_with(mock_md5.return_value)
file_hash.add_files.assert_called_once_with([test_file], relative_to=tmp_path)
@pytest.mark.parametrize("reverse", [False, True])
def test_sorted(self, mocker: MockerFixture, reverse: bool, tmp_path: Path) -> None:
"""Test sorted."""
mock_sorted = mocker.patch(f"{MODULE}.sorted", return_value="success")
obj = SourceCode(tmp_path)
assert obj.sorted(reverse=reverse)
mock_sorted.assert_called_once_with(obj, reverse=reverse) |
1,905 | test anonymous | import platform
from unittest.mock import patch
from uuid import uuid4
import responses
import sentry
from sentry import options
from sentry.models import Broadcast
from sentry.tasks.beacon import BEACON_URL, send_beacon, send_beacon_metric
from sentry.testutils.cases import TestCase
from sentry.testutils.silo import no_silo_test
from sentry.utils import json
@no_silo_test(stable=True)
class SendBeaconTest(TestCase):
@patch("sentry.tasks.beacon.get_all_package_versions")
@patch("sentry.tasks.beacon.safe_urlopen")
@patch("sentry.tasks.beacon.safe_urlread")
@responses.activate
def test_simple(self, safe_urlread, safe_urlopen, mock_get_all_package_versions):
self.organization
self.project
self.team
mock_get_all_package_versions.return_value = {"foo": "1.0"}
safe_urlread.return_value = json.dumps({"notices": [], "version": {"stable": "1.0.0"}})
assert options.set("system.admin-email", "foo@example.com")
assert options.set("beacon.anonymous", False)
send_beacon()
install_id = options.get("sentry:install-id")
assert install_id and len(install_id) == 40
safe_urlopen.assert_called_once_with(
BEACON_URL,
json={
"install_id": install_id,
"version": sentry.get_version(),
"docker": sentry.is_docker(),
"python_version": platform.python_version(),
"data": {
"organizations": 1,
"users": 1,
"projects": 1,
"teams": 1,
"events.24h": 0,
},
"anonymous": False,
"admin_email": "foo@example.com",
"packages": mock_get_all_package_versions.return_value,
},
timeout=5,
)
safe_urlread.assert_called_once_with(safe_urlopen.return_value)
assert options.get("sentry:latest_version") == "1.0.0"
@patch("sentry.tasks.beacon.get_all_package_versions")
@patch("sentry.tasks.beacon.safe_urlopen")
@patch("sentry.tasks.beacon.safe_urlread")
@responses.activate
def METHOD_NAME(self, safe_urlread, safe_urlopen, mock_get_all_package_versions):
self.organization
self.project
self.team
mock_get_all_package_versions.return_value = {"foo": "1.0"}
safe_urlread.return_value = json.dumps({"notices": [], "version": {"stable": "1.0.0"}})
assert options.set("system.admin-email", "foo@example.com")
assert options.set("beacon.anonymous", True)
send_beacon()
install_id = options.get("sentry:install-id")
assert install_id and len(install_id) == 40
safe_urlopen.assert_called_once_with(
BEACON_URL,
json={
"install_id": install_id,
"version": sentry.get_version(),
"docker": sentry.is_docker(),
"python_version": platform.python_version(),
"data": {
"organizations": 1,
"users": 1,
"projects": 1,
"teams": 1,
"events.24h": 0,
},
"anonymous": True,
"packages": mock_get_all_package_versions.return_value,
},
timeout=5,
)
safe_urlread.assert_called_once_with(safe_urlopen.return_value)
assert options.get("sentry:latest_version") == "1.0.0"
@patch("sentry.tasks.beacon.get_all_package_versions")
@patch("sentry.tasks.beacon.safe_urlopen")
@patch("sentry.tasks.beacon.safe_urlread")
@responses.activate
def test_with_broadcasts(self, safe_urlread, safe_urlopen, mock_get_all_package_versions):
broadcast_id = uuid4().hex
mock_get_all_package_versions.return_value = {}
safe_urlread.return_value = json.dumps(
{
"notices": [
{
"id": broadcast_id,
"title": "Hello!",
"message": "Hello world",
"active": True,
}
],
"version": {"stable": "1.0.0"},
}
)
with self.settings():
send_beacon()
assert Broadcast.objects.count() == 1
broadcast = Broadcast.objects.get(upstream_id=broadcast_id)
assert broadcast.title == "Hello!"
assert broadcast.message == "Hello world"
assert broadcast.is_active
# ensure we arent duplicating the broadcast
with self.settings():
send_beacon()
assert Broadcast.objects.count() == 1
broadcast = Broadcast.objects.get(upstream_id=broadcast_id)
assert broadcast.title == "Hello!"
assert broadcast.message == "Hello world"
assert broadcast.is_active
# now remove it and it should become inactive
safe_urlread.return_value = json.dumps({"notices": [], "version": {"stable": "1.0.0"}})
with self.settings():
send_beacon()
assert Broadcast.objects.count() == 1
broadcast = Broadcast.objects.get(upstream_id=broadcast_id)
assert not broadcast.is_active
@patch("sentry.tasks.beacon.get_all_package_versions")
@patch("sentry.tasks.beacon.safe_urlopen")
@patch("sentry.tasks.beacon.safe_urlread")
@responses.activate
def test_disabled(self, safe_urlread, safe_urlopen, mock_get_all_package_versions):
mock_get_all_package_versions.return_value = {"foo": "1.0"}
with self.settings(SENTRY_BEACON=False):
send_beacon()
assert not safe_urlopen.mock_calls
@patch("sentry.tasks.beacon.get_all_package_versions")
@patch("sentry.tasks.beacon.safe_urlopen")
@patch("sentry.tasks.beacon.safe_urlread")
@responses.activate
def test_debug(self, safe_urlread, safe_urlopen, mock_get_all_package_versions):
mock_get_all_package_versions.return_value = {"foo": "1.0"}
with self.settings(DEBUG=True):
send_beacon()
assert not safe_urlopen.mock_calls
@patch("sentry.tasks.beacon.safe_urlopen")
@responses.activate
def test_metrics(self, safe_urlopen):
metrics = [
{
"description": "SentryApp",
"component": "Foo",
},
{
"description": "SentryApp",
"component": "Bar",
},
]
send_beacon_metric(metrics=metrics)
install_id = options.get("sentry:install-id")
assert install_id and len(install_id) == 40
assert safe_urlopen.call_count == 1
safe_urlopen.assert_called_once_with(
BEACON_URL,
json={
"type": "metric",
"install_id": install_id,
"version": sentry.get_version(),
"data": {"metrics": metrics},
},
timeout=5,
) |
1,906 | forward | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules.fairseq_dropout import FairseqDropout
default_conv_enc_config = """[
(400, 13, 170, 0.2),
(440, 14, 0, 0.214),
(484, 15, 0, 0.22898),
(532, 16, 0, 0.2450086),
(584, 17, 0, 0.262159202),
(642, 18, 0, 0.28051034614),
(706, 19, 0, 0.30014607037),
(776, 20, 0, 0.321156295296),
(852, 21, 0, 0.343637235966),
(936, 22, 0, 0.367691842484),
(1028, 23, 0, 0.393430271458),
(1130, 24, 0, 0.42097039046),
(1242, 25, 0, 0.450438317792),
(1366, 26, 0, 0.481969000038),
(1502, 27, 0, 0.51570683004),
(1652, 28, 0, 0.551806308143),
(1816, 29, 0, 0.590432749713),
]"""
@register_model("asr_w2l_conv_glu_encoder")
class W2lConvGluEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--conv-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one conv layer
[(out_channels, kernel_size, padding, dropout), ...]
""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
encoder = W2lConvGluEncoder(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
in_channels=args.in_channels,
conv_enc_config=eval(conv_enc_config),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = False
return lprobs
class W2lConvGluEncoder(FairseqEncoder):
def __init__(
self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config
):
super().__init__(None)
self.input_dim = input_feat_per_channel
if in_channels != 1:
raise ValueError("only 1 input channel is currently supported")
self.conv_layers = nn.ModuleList()
self.linear_layers = nn.ModuleList()
self.dropouts = []
cur_channels = input_feat_per_channel
for out_channels, kernel_size, padding, dropout in conv_enc_config:
layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding)
layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init
self.conv_layers.append(nn.utils.weight_norm(layer))
self.dropouts.append(
FairseqDropout(dropout, module_name=self.__class__.__name__)
)
if out_channels % 2 != 0:
raise ValueError("odd # of out_channels is incompatible with GLU")
cur_channels = out_channels // 2 # halved by GLU
for out_channels in [2 * cur_channels, vocab_size]:
layer = nn.Linear(cur_channels, out_channels)
layer.weight.data.mul_(math.sqrt(3))
self.linear_layers.append(nn.utils.weight_norm(layer))
cur_channels = out_channels // 2
def METHOD_NAME(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
B, T, _ = src_tokens.size()
x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
x = F.glu(x, dim=1)
x = self.dropouts[layer_idx](x)
x = x.transpose(1, 2).contiguous() # (B, T, 908)
x = self.linear_layers[0](x)
x = F.glu(x, dim=2)
x = self.dropouts[-1](x)
x = self.linear_layers[1](x)
assert x.size(0) == B
assert x.size(1) == T
encoder_out = x.transpose(0, 1) # (T, B, vocab_size)
# need to debug this -- find a simpler/elegant way in pytorch APIs
encoder_padding_mask = (
torch.arange(T).view(1, T).expand(B, -1).to(x.device)
>= src_lengths.view(B, 1).expand(-1, T)
).t() # (B x T) -> (T x B)
return {
"encoder_out": encoder_out, # (T, B, vocab_size)
"encoder_padding_mask": encoder_padding_mask, # (T, B)
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
@register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc")
def w2l_conv_glu_enc(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.in_channels = getattr(args, "in_channels", 1)
args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config) |
1,907 | create | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Interface for Dynamips virtual Cisco 1700 instances module ("c1700")
http://github.com/GNS3/dynamips/blob/master/README.hypervisor#L428
"""
import asyncio
from .router import Router
from ..adapters.c1700_mb_1fe import C1700_MB_1FE
from ..adapters.c1700_mb_wic1 import C1700_MB_WIC1
import logging
log = logging.getLogger(__name__)
class C1700(Router):
"""
Dynamips c1700 router.
:param name: The name of this router
:param node_id: Node instance identifier
:param project: Project instance
:param manager: Parent VM Manager
:param dynamips_id: ID to use with Dynamips
:param console: console port
:param aux: auxiliary console port
:param chassis: chassis for this router:
1720, 1721, 1750, 1751 or 1760 (default = 1720).
1710 is not supported.
"""
def __init__(self, name, node_id, project, manager, dynamips_id, console=None, console_type="telnet", aux=None, chassis="1720"):
super().__init__(name, node_id, project, manager, dynamips_id, console, console_type, aux, platform="c1700")
# Set default values for this platform (must be the same as Dynamips)
self._ram = 64
self._nvram = 32
self._disk0 = 0
self._disk1 = 0
self._chassis = chassis
self._iomem = 15 # percentage
self._clock_divisor = 8
self._sparsemem = False # never activate sparsemem for c1700 (unstable)
def __json__(self):
c1700_router_info = {"iomem": self._iomem,
"chassis": self._chassis,
"sparsemem": self._sparsemem}
router_info = Router.__json__(self)
router_info.update(c1700_router_info)
return router_info
async def METHOD_NAME(self):
await Router.METHOD_NAME(self)
if self._chassis != "1720":
await self.set_chassis(self._chassis)
self._setup_chassis()
def _setup_chassis(self):
"""
Sets up the router with the corresponding chassis
(create slots and insert default adapters).
"""
# With 1751 and 1760, WICs in WIC slot 1 show up as in slot 1, not 0
# e.g. s1/0 not s0/2
if self._chassis in ['1751', '1760']:
self._create_slots(2)
self._slots[1] = C1700_MB_WIC1()
else:
self._create_slots(1)
self._slots[0] = C1700_MB_1FE()
@property
def chassis(self):
"""
Returns the chassis.
:returns: chassis string
"""
return self._chassis
async def set_chassis(self, chassis):
"""
Sets the chassis.
:param: chassis string:
1720, 1721, 1750, 1751 or 1760
"""
await self._hypervisor.send('c1700 set_chassis "{name}" {chassis}'.format(name=self._name, chassis=chassis))
log.info('Router "{name}" [{id}]: chassis set to {chassis}'.format(name=self._name,
id=self._id,
chassis=chassis))
self._chassis = chassis
self._setup_chassis()
@property
def iomem(self):
"""
Returns I/O memory size for this router.
:returns: I/O memory size (integer)
"""
return self._iomem
async def set_iomem(self, iomem):
"""
Sets I/O memory size for this router.
:param iomem: I/O memory size
"""
await self._hypervisor.send('c1700 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
id=self._id,
old_iomem=self._iomem,
new_iomem=iomem))
self._iomem = iomem |
1,908 | exeption handler | from __future__ import print_function
import sys
from qtpy import QtWidgets
from pprint import pformat
import atexit
import openpype.hosts.flame.api as opfapi
from openpype.pipeline import (
install_host,
registered_host,
)
def openpype_install():
"""Registering OpenPype in context
"""
install_host(opfapi)
print("Registered host: {}".format(registered_host()))
# Exception handler
def METHOD_NAME(exctype, value, _traceback):
"""Exception handler for improving UX
Args:
exctype (str): type of exception
value (str): exception value
tb (str): traceback to show
"""
import traceback
msg = "OpenPype: Python exception {} in {}".format(value, exctype)
mbox = QtWidgets.QMessageBox()
mbox.setText(msg)
mbox.setDetailedText(
pformat(traceback.format_exception(exctype, value, _traceback)))
mbox.setStyleSheet('QLabel{min-width: 800px;}')
mbox.exec_()
sys.__excepthook__(exctype, value, _traceback)
# add exception handler into sys module
sys.excepthook = METHOD_NAME
# register clean up logic to be called at Flame exit
def cleanup():
"""Cleaning up Flame framework context
"""
if opfapi.CTX.flame_apps:
print('`{}` cleaning up flame_apps:\n {}\n'.format(
__file__, pformat(opfapi.CTX.flame_apps)))
while len(opfapi.CTX.flame_apps):
app = opfapi.CTX.flame_apps.pop()
print('`{}` removing : {}'.format(__file__, app.name))
del app
opfapi.CTX.flame_apps = []
if opfapi.CTX.app_framework:
print('openpype\t: {} cleaning up'.format(
opfapi.CTX.app_framework.bundle_name)
)
opfapi.CTX.app_framework.save_prefs()
opfapi.CTX.app_framework = None
atexit.register(cleanup)
def load_apps():
"""Load available flame_apps into Flame framework
"""
opfapi.CTX.flame_apps.append(
opfapi.FlameMenuProjectConnect(opfapi.CTX.app_framework))
opfapi.CTX.flame_apps.append(
opfapi.FlameMenuTimeline(opfapi.CTX.app_framework))
opfapi.CTX.flame_apps.append(
opfapi.FlameMenuUniversal(opfapi.CTX.app_framework))
opfapi.CTX.app_framework.log.info("Apps are loaded")
def project_changed_dict(info):
"""Hook for project change action
Args:
info (str): info text
"""
cleanup()
def app_initialized(parent=None):
"""Inicialization of Framework
Args:
parent (obj, optional): Parent object. Defaults to None.
"""
opfapi.CTX.app_framework = opfapi.FlameAppFramework()
print("{} initializing".format(
opfapi.CTX.app_framework.bundle_name))
load_apps()
"""
Initialisation of the hook is starting from here
First it needs to test if it can import the flame module.
This will happen only in case a project has been loaded.
Then `app_initialized` will load main Framework which will load
all menu objects as flame_apps.
"""
try:
import flame # noqa
app_initialized(parent=None)
except ImportError:
print("!!!! not able to import flame module !!!!")
def rescan_hooks():
import flame # noqa
flame.execute_shortcut('Rescan Python Hooks')
def _build_app_menu(app_name):
"""Flame menu object generator
Args:
app_name (str): name of menu object app
Returns:
list: menu object
"""
menu = []
# first find the relative appname
app = None
for _app in opfapi.CTX.flame_apps:
if _app.__class__.__name__ == app_name:
app = _app
if app:
menu.append(app.build_menu())
if opfapi.CTX.app_framework:
menu_auto_refresh = opfapi.CTX.app_framework.prefs_global.get(
'menu_auto_refresh', {})
if menu_auto_refresh.get('timeline_menu', True):
try:
import flame # noqa
flame.schedule_idle_event(rescan_hooks)
except ImportError:
print("!-!!! not able to import flame module !!!!")
return menu
""" Flame hooks are starting here
"""
def project_saved(project_name, save_time, is_auto_save):
"""Hook to activate when project is saved
Args:
project_name (str): name of project
save_time (str): time when it was saved
is_auto_save (bool): autosave is on or off
"""
if opfapi.CTX.app_framework:
opfapi.CTX.app_framework.save_prefs()
def get_main_menu_custom_ui_actions():
"""Hook to create submenu in start menu
Returns:
list: menu object
"""
# install openpype and the host
openpype_install()
return _build_app_menu("FlameMenuProjectConnect")
def get_timeline_custom_ui_actions():
"""Hook to create submenu in timeline
Returns:
list: menu object
"""
# install openpype and the host
openpype_install()
return _build_app_menu("FlameMenuTimeline")
def get_batch_custom_ui_actions():
"""Hook to create submenu in batch
Returns:
list: menu object
"""
# install openpype and the host
openpype_install()
return _build_app_menu("FlameMenuUniversal")
def get_media_panel_custom_ui_actions():
"""Hook to create submenu in desktop
Returns:
list: menu object
"""
# install openpype and the host
openpype_install()
return _build_app_menu("FlameMenuUniversal") |
1,909 | read table | """
Tool for training the EnergyRegressor
"""
import numpy as np
from ctapipe.core import Tool
from ctapipe.core.traits import Int, IntTelescopeParameter, Path
from ctapipe.exceptions import TooFewEvents
from ctapipe.io import TableLoader
from ctapipe.reco import CrossValidator, EnergyRegressor
from ctapipe.reco.preprocessing import check_valid_rows
__all__ = [
"TrainEnergyRegressor",
]
class TrainEnergyRegressor(Tool):
"""
Tool to train a `~ctapipe.reco.EnergyRegressor` on dl1b/dl2 data.
The tool first performs a cross validation to give an initial estimate
on the quality of the estimation and then finally trains one model
per telescope type on the full dataset.
"""
name = "ctapipe-train-energy-regressor"
description = __doc__
examples = """
ctapipe-train-energy-regressor \\
--config train_energy_regressor.yaml \\
--input gamma.dl2.h5 \\
--output energy_regressor.pkl
"""
output_path = Path(
default_value=None,
allow_none=False,
directory_ok=False,
help=(
"Output path for the trained reconstructor."
" At the moment, pickle is the only supported format."
),
).tag(config=True)
n_events = IntTelescopeParameter(
default_value=None,
allow_none=True,
help=(
"Number of events for training the model."
" If not given, all available events will be used."
),
).tag(config=True)
random_seed = Int(
default_value=0, help="Random seed for sampling and cross validation"
).tag(config=True)
aliases = {
("i", "input"): "TableLoader.input_url",
("o", "output"): "TrainEnergyRegressor.output_path",
"n-events": "TrainEnergyRegressor.n_events",
"cv-output": "CrossValidator.output_path",
}
classes = [
TableLoader,
EnergyRegressor,
CrossValidator,
]
def setup(self):
"""
Initialize components from config
"""
self.loader = self.enter_context(
TableLoader(
parent=self,
load_dl1_images=False,
load_dl1_parameters=True,
load_dl2=True,
load_simulated=True,
load_instrument=True,
)
)
self.n_events.attach_subarray(self.loader.subarray)
self.regressor = EnergyRegressor(self.loader.subarray, parent=self)
self.cross_validate = CrossValidator(
parent=self, model_component=self.regressor
)
self.rng = np.random.default_rng(self.random_seed)
self.check_output(self.output_path, self.cross_validate.output_path)
def start(self):
"""
Train models per telescope type.
"""
types = self.loader.subarray.telescope_types
self.log.info("Inputfile: %s", self.loader.input_url)
self.log.info("Training models for %d types", len(types))
for tel_type in types:
self.log.info("Loading events for %s", tel_type)
table = self.METHOD_NAME(tel_type)
self.log.info("Train on %s events", len(table))
self.cross_validate(tel_type, table)
self.log.info("Performing final fit for %s", tel_type)
self.regressor.fit(tel_type, table)
self.log.info("done")
def METHOD_NAME(self, telescope_type):
table = self.loader.read_telescope_events([telescope_type])
self.log.info("Events read from input: %d", len(table))
if len(table) == 0:
raise TooFewEvents(
f"Input file does not contain any events for telescope type {telescope_type}"
)
mask = self.regressor.quality_query.get_table_mask(table)
table = table[mask]
self.log.info("Events after applying quality query: %d", len(table))
if len(table) == 0:
raise TooFewEvents(
f"No events after quality query for telescope type {telescope_type}"
)
table = self.regressor.feature_generator(table, subarray=self.loader.subarray)
feature_names = self.regressor.features + [self.regressor.target]
table = table[feature_names]
valid = check_valid_rows(table)
if np.any(~valid):
self.log.warning("Dropping non-predictable events.")
table = table[valid]
n_events = self.n_events.tel[telescope_type]
if n_events is not None:
if n_events > len(table):
self.log.warning(
"Number of events in table (%d) is less than requested number of events %d",
len(table),
n_events,
)
else:
self.log.info("Sampling %d events", n_events)
idx = self.rng.choice(len(table), n_events, replace=False)
idx.sort()
table = table[idx]
return table
def finish(self):
"""
Write-out trained models and cross-validation results.
"""
self.log.info("Writing output")
self.regressor.write(self.output_path, overwrite=self.overwrite)
if self.cross_validate.output_path:
self.cross_validate.write(overwrite=self.overwrite)
self.loader.close()
def main():
TrainEnergyRegressor().run()
if __name__ == "__main__":
main() |
1,910 | test add instructions | #!/usr/bin/env python
import logging
import sys
from pathlib import Path
from unittest.mock import Mock
from pyscaffold import shell
from pyscaffold.api import create_project
from pyscaffold.cli import run
from pyscaffold.extensions import pre_commit
from pyscaffold.templates import get_template
def assert_in_logs(caplog, *expected):
log = caplog.text
for text in expected:
assert text in log
# ---- "Isolated" tests ----
def test_find_executable(monkeypatch):
# When an executable can be found
exec = Mock()
monkeypatch.setattr(shell, "get_command", Mock(return_value=exec))
_, opts = pre_commit.find_executable({}, {})
# then pre-commit should no be added to venv_install
assert "pre-commit" not in opts.get("venv_install", [])
# and the command should be stored in opts
assert opts[pre_commit.CMD_OPT] == exec
# When an executable can not be found
monkeypatch.setattr(shell, "get_command", Mock(return_value=None))
_, opts = pre_commit.find_executable({}, {})
# then pre-commit should be added to venv_install
assert "pre-commit" in opts.get("venv_install", [])
# and the command should not be stored in opts
assert pre_commit.CMD_OPT not in opts
def test_install(monkeypatch, caplog):
caplog.set_level(logging.WARNING)
# When an executable can be found
exec = Mock()
monkeypatch.setattr(shell, "get_command", Mock(return_value=exec))
pre_commit.install({}, {})
# then `pre-commit` install should run
assert exec.called
args, _ = exec.call_args
assert "install" in args
# When no executable can be found
monkeypatch.setattr(shell, "get_command", Mock(return_value=None))
pre_commit.install({}, {})
# then the proper log message should be displayed
msg = pre_commit.INSTALL_MSG.format(project_path="PROJECT_DIR")
assert_in_logs(caplog, msg)
# When an error occurs during installation
caplog.set_level(logging.ERROR)
exec = Mock(side_effect=shell.ShellCommandException)
monkeypatch.setattr(shell, "get_command", Mock(return_value=exec))
# then PyScaffold should not stop, only log the error.
pre_commit.install({}, {})
assert_in_logs(caplog, pre_commit.ERROR_MSG)
# When a command is available in opts
cmd = Mock()
exec = Mock()
monkeypatch.setattr(shell, "get_command", Mock(return_value=exec))
# then it should be ussed, and get_command not called
pre_commit.install({}, {pre_commit.CMD_OPT: cmd})
assert cmd.called
args, _ = cmd.call_args
assert "install" in args
assert not exec.called
def METHOD_NAME():
old_text = get_template("readme")
opts = {"title": "proj", "name": "proj", "description": "desc", "version": "99.9"}
new_text, _ = pre_commit.add_instructions(opts, old_text, Mock())
note = pre_commit.README_NOTE.format(**opts)
assert note in new_text
# ---- Integration tests ----
def test_create_project_with_pre_commit(tmpfolder, caplog):
caplog.set_level(logging.WARNING)
# Given options with the pre-commit extension,
opts = dict(project_path="proj", extensions=[pre_commit.PreCommit("pre-commit")])
# when the project is created,
create_project(opts)
# then pre-commit files should exist
assert Path("proj/.pre-commit-config.yaml").exists()
assert Path("proj/.isort.cfg").exists()
note = pre_commit.README_NOTE.format(name="proj")
assert note in Path("proj/README.rst").read_text()
# and the user should be instructed to update pre-commit
assert_in_logs(caplog, pre_commit.UPDATE_MSG)
def test_create_project_without_pre_commit(tmpfolder):
# Given options without the pre-commit extension,
opts = dict(project_path="proj")
# when the project is created,
create_project(opts)
# then pre-commit files should not exist
assert not Path("proj/.pre-commit-config.yaml").exists()
assert not Path("proj/.isort.cfg").exists()
def test_cli_with_pre_commit(tmpfolder):
# Given the command line with the pre-commit option,
sys.argv = ["pyscaffold", "--pre-commit", "proj"]
# when pyscaffold runs,
run()
# then pre-commit files should exist
assert Path("proj/.pre-commit-config.yaml").exists()
assert Path("proj/.isort.cfg").exists()
def test_cli_with_pre_commit_or_pretend(tmpfolder):
# Given the command line with the pre-commit option and pretend
sys.argv = ["pyscaffold", "--pretend", "--pre-commit", "proj"]
# when pyscaffold runs,
run()
# then pre-commit files should not exist (or the project itself)
assert not Path("proj/.pre-commit-config.yaml").exists()
assert not Path("proj").exists()
def test_cli_without_pre_commit(tmpfolder):
# Given the command line without the pre-commit option,
sys.argv = ["pyscaffold", "proj"]
# when pyscaffold runs,
run()
# then pre-commit files should not exist
assert not Path("proj/.pre-commit-config.yaml").exists()
assert not Path("proj/.isort.cfg").exists() |
1,911 | get sql hash | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import hashlib
from typing import (
Iterator, List, Optional,
)
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.query.base import QueryBase
from databuilder.models.table_metadata import TableMetadata
from databuilder.models.user import User as UserMetadata
class QueryMetadata(QueryBase):
"""
Query model. This creates a Query object as well as relationships
between the Query and the Table(s) that are used within the query.
The list of TableMetadata corresponding to the tables used in the
query must be provided. Optionally, the ID of the user that executed
the query can be provided as well.
By default, all tables and users must already exist in the database
before this QueryMetadata object in order to create the relationships.
Implementers have the option to set `yield_relation_nodes` = True
in order to create all user and table nodes on the fly at the time
that this QueryMetadata is created.
"""
NODE_LABEL = 'Query'
KEY_FORMAT = '{sql_hash}'
# Relation between entity and query
TABLE_QUERY_RELATION_TYPE = 'HAS_QUERY'
INVERSE_TABLE_QUERY_RELATION_TYPE = 'QUERY_FOR'
USER_QUERY_RELATION_TYPE = 'EXECUTED_QUERY'
INVERSE_USER_QUERY_RELATION_TYPE = 'EXECUTED_BY'
# Attributes
SQL = 'sql'
TABLES = 'tables'
def __init__(
self,
sql: str,
tables: List[TableMetadata],
clean_sql: Optional[str] = None, #
user: Optional[UserMetadata] = None, # Key for the user that executed the query
yield_relation_nodes: bool = False # Allow creation of related nodes if they do not exist
):
"""
:param sql: Full, raw SQL for a given Query
:param tables: List of table meteadata objects corresponding to tables in the query
:param clean_sql: A modified sql that should be used to create the hash if available. This
may be used if you have a query that is run on a set schedule but the where clause has
a new date or hour value injected before the query is run. You can "clean" that value
and pass in a SQL string that corresponds to the underlying query - which would should
remain the same across executions.
:param user: The user that executed the query.
:param yield_relation_nodes: A boolean, indicating whether or not all tables and users
associated to this query should have nodes created if they do not already exist.
"""
self.sql = sql
self.clean_sql = clean_sql
self.sql_hash = self.METHOD_NAME(clean_sql or sql)
self.tables = tables
self.table_keys = [tm._get_table_key() for tm in tables]
self.user = user
self.yield_relation_nodes = yield_relation_nodes
self._sql_begin = sql[:25] + '...'
self._node_iter = self._create_next_node()
self._relation_iter = self._create_relation_iterator()
def __repr__(self) -> str:
return f'QueryMetadata(SQL: {self._sql_begin}, Tables: {self.table_keys})'
def METHOD_NAME(self, sql: str) -> str:
"""
Generates a deterministic SQL hash. Attempts to remove any formatting from the
SQL code where possible.
"""
sql_no_fmt = self._normalize(sql)
return hashlib.md5(sql_no_fmt.encode('utf-8')).hexdigest()
def create_next_node(self) -> Optional[GraphNode]:
# return the string representation of the data
try:
return next(self._node_iter)
except StopIteration:
return None
def create_next_relation(self) -> Optional[GraphRelationship]:
try:
return next(self._relation_iter)
except StopIteration:
return None
@staticmethod
def get_key(sql_hash: str) -> str:
return QueryMetadata.KEY_FORMAT.format(sql_hash=sql_hash)
def get_key_self(self) -> str:
return QueryMetadata.get_key(self.sql_hash)
def get_query_relations(self) -> List[GraphRelationship]:
relations = []
for table_key in self.table_keys:
table_relation = GraphRelationship(
start_label=TableMetadata.TABLE_NODE_LABEL,
end_label=self.NODE_LABEL,
start_key=table_key,
end_key=self.get_key_self(),
type=self.TABLE_QUERY_RELATION_TYPE,
reverse_type=self.INVERSE_TABLE_QUERY_RELATION_TYPE,
attributes={}
)
relations.append(table_relation)
if self.user:
user_relation = GraphRelationship(
start_label=UserMetadata.USER_NODE_LABEL,
end_label=self.NODE_LABEL,
start_key=self.user.get_user_model_key(email=self.user.email),
end_key=self.get_key_self(),
type=self.USER_QUERY_RELATION_TYPE,
reverse_type=self.INVERSE_USER_QUERY_RELATION_TYPE,
attributes={}
)
relations.append(user_relation)
return relations
def _create_next_node(self) -> Iterator[GraphNode]:
"""
Create query nodes
:return:
"""
yield GraphNode(
key=self.get_key_self(),
label=self.NODE_LABEL,
attributes={
self.SQL: self.sql
}
)
if self.yield_relation_nodes:
for table in self.tables:
for tbl_item in table._create_next_node():
yield tbl_item
if self.user:
usr = self.user.create_next_node()
while usr:
yield usr
usr = self.user.create_next_node()
def _create_relation_iterator(self) -> Iterator[GraphRelationship]:
relations = self.get_query_relations()
for relation in relations:
yield relation
if self.yield_relation_nodes:
for table in self.tables:
for tbl_rel in table._create_next_relation():
yield tbl_rel
if self.user:
for usr_rel in self.user._create_relation_iterator():
yield usr_rel |
1,912 | is shortcut | import os
import sys
sys.setrecursionlimit(10000)
ALL_WEIGHT_OP = [
'conv2d', 'mul', 'matmul', 'embedding', 'conv2d_transpose',
'depthwise_conv2d', 'batch_norm', 'layer_norm', 'instance_norm',
'sync_batch_norm', 'matmul_v2'
]
def traversal_ops(op, graph, target_op_idx):
""" Get all operators in the multi-path from op to target op. """
pattern_ops = []
pattern_ops_type = []
visited = []
pq = [op]
while pq:
cnt = len(pq)
level = []
for _ in range(cnt):
cur = pq.pop(0)
level.append(cur.type())
if cur.idx() not in visited:
### first op must be start op
pattern_ops.append(cur)
pattern_ops_type.append(cur.type())
visited.append(cur.idx())
for n_op in graph.next_ops(cur):
if n_op.is_opt_op() or n_op.is_bwd_op():
break
if n_op.idx() == target_op_idx or n_op.idx() in visited:
continue
pq.append(n_op)
return pattern_ops, pattern_ops_type
def find_weight_op(op, graph):
""" Find operators with weight."""
next_ops = sorted(graph.next_ops(op))
for next_op in next_ops:
if has_trainable_var(next_op):
return next_op
else:
return find_weight_op(next_op, graph)
def get_weight(op, return_name=True):
""" get the weight of operators with weight."""
for inp in op.all_inputs():
if inp._var.persistable == True:
if return_name:
return inp.name()
else:
return inp
return None
def has_trainable_var(op):
""" Judge whether the operator with trainable variable """
weight_ops = ALL_WEIGHT_OP
if op.type() in weight_ops:
for inp in sorted(op.all_inputs()):
if inp._var.persistable == True:
return True
return False
return False
def is_final_op_with_trainable_var(op, graph):
""" Judge whether is the final op with weights in the graph """
next_ops = sorted(graph.next_ops(op))
for next_op in next_ops:
if has_trainable_var(next_op):
return False
return is_final_op_with_trainable_var(next_op, graph)
return True
def has_bias(op, graph):
""" Get the bias of the op if exists """
n_op = graph.next_ops(op)[0]
if op.type() in ALL_WEIGHT_OP:
if n_op.type() == 'elementwise_add':
for inp in n_op.all_inputs():
if inp._var.persistable == True:
return n_op
return None
def _find_next_target_op(op, graph, target_op_idx, sc_path):
""" Find the target op from other branch in the shortcut """
if op.idx() == target_op_idx:
return True
n_ops = graph.next_ops(op)
for n_op in n_ops:
sc_path.append(n_op.type())
return _find_next_target_op(n_op, graph, target_op_idx, sc_path)
return False
def _is_identity_op(op):
if op.type() == 'scale' and op.attr('scale') == 1:
return True
return False
def METHOD_NAME(op, graph, sc_path, shortcut_start_op):
"""
op /```````````````````\\ add
\\____op1___op2__..._/
"""
inps = op.all_inputs()
pre_ops = graph.pre_ops(op)
for p_op in pre_ops:
if _is_identity_op(p_op):
p_op = graph.pre_ops(p_op)[0]
n_ops = graph.next_ops(p_op)
if len(n_ops) == 1:
continue
### note: only support one branch donnot have op or has one scale op
has_sc = False
for n_op in n_ops:
if _is_identity_op(n_op):
n_op = graph.next_ops(n_op)[0]
if n_op.idx() == op.idx():
shortcut_start_op.append(p_op)
has_sc = True
if has_sc:
for n_op in n_ops:
if n_op.idx() != op.idx():
sc_path.append(p_op.type())
sc_path.append(n_op.type())
return _find_next_target_op(n_op, graph, op.idx(),
sc_path), op.idx()
return False, -1 |
1,913 | get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2020-11-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.AppPlatform/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appplatform.v2020_11_01_preview.AppPlatformManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.OperationDetail"]:
"""Lists all of the available REST API operations of the Microsoft.AppPlatform provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationDetail or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.appplatform.v2020_11_01_preview.models.OperationDetail]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2020-11-01-preview")
)
cls: ClsType[_models.AvailableOperations] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AvailableOperations", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(METHOD_NAME, extract_data)
list.metadata = {"url": "/providers/Microsoft.AppPlatform/operations"} |
1,914 | menus | ###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2019, the ilastik team
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import logging
import os
import socket
from PyQt5 import uic, QtCore
from PyQt5.QtWidgets import QWidget, QStackedWidget, QListWidgetItem, QListWidget
from PyQt5.QtCore import QStateMachine, QState, QSignalTransition, pyqtSignal
logger = logging.getLogger(__name__)
from PyQt5.Qt import (
QIcon,
QStringListModel,
QAbstractItemModel,
QAbstractItemDelegate,
Qt,
QModelIndex,
QDataWidgetMapper,
pyqtProperty,
QItemDelegate,
QAbstractListModel,
QListWidgetItem,
pyqtSignal,
)
from PyQt5.QtWidgets import QWidget, QComboBox, QToolButton, QHBoxLayout, QVBoxLayout, QLabel, QLineEdit, QListWidget
from .serverConfigForm import ServerConfigForm, ServerFormWorkflow
from .serverListWidget import ServerListWidget, ServerListModel
from .configStorage import SERVER_CONFIG
from . import types
from ilastik import config
import tiktorch
class ServerConfigGui(QWidget):
gotDevices = pyqtSignal()
def centralWidget(self):
return self._centralWidget
def appletDrawer(self):
return self._drawer
def METHOD_NAME(self):
return []
def viewerControlWidget(self):
return None
def getServerIdFromOp(self):
if self.topLevelOp.ServerId.ready():
return self.topLevelOp.ServerId.value
return None
def __init__(self, parentApplet, topLevelOperatorView):
super().__init__()
self.parentApplet = parentApplet
self.topLevelOp = topLevelOperatorView
self._centralWidget = self._makeServerConfigWidget(self.getServerIdFromOp())
self._centralWidget.saved.connect(self._serverSelected)
self._initAppletDrawer()
def _serverSelected(self):
self.topLevelOp.ServerId.disconnect()
self.topLevelOp.ServerId.setValue(self._centralWidget.currentServerId())
def _makeServerConfigWidget(self, serverId):
w = ServerConfigurationEditor(self.parentApplet.connectionFactory)
w.setModel(ServerListModel(conf_store=SERVER_CONFIG))
w.selectServer(serverId)
return w
def _initAppletDrawer(self):
"""
Load the ui file for the applet drawer.
"""
local_dir = os.path.split(__file__)[0] + "/"
self._drawer = uic.loadUi(local_dir + "/serverConfigDrawer.ui")
def stopAndCleanUp(self):
pass
def setEnabled(self, enabled):
pass
def setImageIndex(self, index):
pass
def imageLaneAdded(self, laneIndex):
pass
def imageLaneRemoved(self, laneIndex, finalLength):
pass
def allowLaneSelectionChange(self):
return False
class ServerFormItemDelegate(QItemDelegate):
def setEditorData(self, editor: QWidget, index: QModelIndex) -> None:
conf = index.data(role=Qt.EditRole)
editor.config = conf
super().setEditorData(editor, index)
def setModelData(self, editor: QWidget, model: QAbstractItemModel, index: QModelIndex) -> None:
conf = editor.config
model.setData(index, conf)
class ServerConfigurationEditor(QWidget):
currentConfigChanged = pyqtSignal(object)
saved = pyqtSignal()
def __init__(self, connectionFactory, parent=None) -> None:
super().__init__(parent)
self._srv_list = ServerListWidget()
self._srv_form = ServerConfigForm(connectionFactory)
self._workflow = ServerFormWorkflow(self._srv_form)
self._model = None
layout = QVBoxLayout(self)
layout.addWidget(self._srv_list)
layout.addWidget(self._srv_form)
def selectServer(self, serverId):
self._srv_list.selectServer(serverId)
def _selectedServer(self, idx):
data = self._model.index(idx).data(role=Qt.EditRole)
self.currentConfigChanged.emit(data)
def currentServerId(self):
return self._srv_list.currentServerId()
def setModel(self, model):
self._model = model
self._srv_list.setModel(model)
self._mapper = QDataWidgetMapper(self)
self._mapper.setModel(model)
self._mapper.setItemDelegate(ServerFormItemDelegate(self))
self._mapper.addMapping(self._srv_form, 1)
self._mapper.currentIndexChanged.connect(self._workflow.restart)
self._mapper.setCurrentIndex(self._srv_list.currentIndex())
self._mapper.setSubmitPolicy(QDataWidgetMapper.ManualSubmit)
self._srv_form.saveBtn.clicked.connect(self._mapper.submit)
self._srv_form.saveBtn.clicked.connect(self.saved)
self._srv_list.currentIndexChanged.connect(self._mapper.setCurrentIndex)
self._srv_list.currentIndexChanged.connect(self._selectedServer) |
1,915 | is subnet address | import os
import socket
import struct
from urllib.parse import unquote, urlparse
"""
_url.py
websocket - WebSocket client library for Python
Copyright 2023 engn33r
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["parse_url", "get_proxy_info"]
def parse_url(url: str) -> tuple:
"""
parse url and the result is tuple of
(hostname, port, resource path and the flag of secure mode)
Parameters
----------
url: str
url string.
"""
if ":" not in url:
raise ValueError("url is invalid")
scheme, url = url.split(":", 1)
parsed = urlparse(url, scheme="http")
if parsed.hostname:
hostname = parsed.hostname
else:
raise ValueError("hostname is invalid")
port = 0
if parsed.port:
port = parsed.port
is_secure = False
if scheme == "ws":
if not port:
port = 80
elif scheme == "wss":
is_secure = True
if not port:
port = 443
else:
raise ValueError("scheme %s is invalid" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if parsed.query:
resource += "?" + parsed.query
return hostname, port, resource, is_secure
DEFAULT_NO_PROXY_HOST = ["localhost", "127.0.0.1"]
def _is_ip_address(addr: str) -> bool:
try:
socket.inet_aton(addr)
except socket.error:
return False
else:
return True
def METHOD_NAME(hostname: str) -> bool:
try:
addr, netmask = hostname.split("/")
return _is_ip_address(addr) and 0 <= int(netmask) < 32
except ValueError:
return False
def _is_address_in_network(ip: str, net: str) -> bool:
ipaddr = struct.unpack('!I', socket.inet_aton(ip))[0]
netaddr, netmask = net.split('/')
netaddr = struct.unpack('!I', socket.inet_aton(netaddr))[0]
netmask = (0xFFFFFFFF << (32 - int(netmask))) & 0xFFFFFFFF
return ipaddr & netmask == netaddr
def _is_no_proxy_host(hostname: str, no_proxy: list) -> bool:
if not no_proxy:
v = os.environ.get("no_proxy", os.environ.get("NO_PROXY", "")).replace(" ", "")
if v:
no_proxy = v.split(",")
if not no_proxy:
no_proxy = DEFAULT_NO_PROXY_HOST
if '*' in no_proxy:
return True
if hostname in no_proxy:
return True
if _is_ip_address(hostname):
return any([_is_address_in_network(hostname, subnet) for subnet in no_proxy if METHOD_NAME(subnet)])
for domain in [domain for domain in no_proxy if domain.startswith('.')]:
if hostname.endswith(domain):
return True
return False
def get_proxy_info(
hostname: str, is_secure: bool, proxy_host: str = None, proxy_port: int = 0, proxy_auth: tuple = None,
no_proxy: list = None, proxy_type: str = 'http') -> tuple:
"""
Try to retrieve proxy host and port from environment
if not provided in options.
Result is (proxy_host, proxy_port, proxy_auth).
proxy_auth is tuple of username and password
of proxy authentication information.
Parameters
----------
hostname: str
Websocket server name.
is_secure: bool
Is the connection secure? (wss) looks for "https_proxy" in env
instead of "http_proxy"
proxy_host: str
http proxy host name.
proxy_port: str or int
http proxy port.
no_proxy: list
Whitelisted host names that don't use the proxy.
proxy_auth: tuple
HTTP proxy auth information. Tuple of username and password. Default is None.
proxy_type: str
Specify the proxy protocol (http, socks4, socks4a, socks5, socks5h). Default is "http".
Use socks4a or socks5h if you want to send DNS requests through the proxy.
"""
if _is_no_proxy_host(hostname, no_proxy):
return None, 0, None
if proxy_host:
port = proxy_port
auth = proxy_auth
return proxy_host, port, auth
env_key = "https_proxy" if is_secure else "http_proxy"
value = os.environ.get(env_key, os.environ.get(env_key.upper(), "")).replace(" ", "")
if value:
proxy = urlparse(value)
auth = (unquote(proxy.username), unquote(proxy.password)) if proxy.username else None
return proxy.hostname, proxy.port, auth
return None, 0, None |
1,916 | mesh | """
stl_export
==========
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class stl_export(Operator):
"""export a mesh into a stl file.
Parameters
----------
mesh : MeshedRegion
file_path : str
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.mesh.stl_export()
>>> # Make input connections
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> my_file_path = str()
>>> op.inputs.file_path.connect(my_file_path)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.mesh.stl_export(
... mesh=my_mesh,
... file_path=my_file_path,
... )
>>> # Get output data
>>> result_data_sources = op.outputs.data_sources()
"""
def __init__(self, METHOD_NAME=None, file_path=None, config=None, server=None):
super().__init__(name="stl_export", config=config, server=server)
self._inputs = InputsStlExport(self)
self._outputs = OutputsStlExport(self)
if METHOD_NAME is not None:
self.inputs.METHOD_NAME.connect(METHOD_NAME)
if file_path is not None:
self.inputs.file_path.connect(file_path)
@staticmethod
def _spec():
description = """export a mesh into a stl file."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="mesh",
type_names=["abstract_meshed_region"],
optional=False,
document="""""",
),
1: PinSpecification(
name="file_path",
type_names=["string"],
optional=False,
document="""""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="data_sources",
type_names=["data_sources"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="stl_export", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsStlExport
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsStlExport
"""
return super().outputs
class InputsStlExport(_Inputs):
"""Intermediate class used to connect user inputs to
stl_export operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.mesh.stl_export()
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> my_file_path = str()
>>> op.inputs.file_path.connect(my_file_path)
"""
def __init__(self, op: Operator):
super().__init__(stl_export._spec().inputs, op)
self._mesh = Input(stl_export._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._mesh)
self._file_path = Input(stl_export._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._file_path)
@property
def METHOD_NAME(self):
"""Allows to connect mesh input to the operator.
Parameters
----------
my_mesh : MeshedRegion
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.mesh.stl_export()
>>> op.inputs.mesh.connect(my_mesh)
>>> # or
>>> op.inputs.mesh(my_mesh)
"""
return self._mesh
@property
def file_path(self):
"""Allows to connect file_path input to the operator.
Parameters
----------
my_file_path : str
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.mesh.stl_export()
>>> op.inputs.file_path.connect(my_file_path)
>>> # or
>>> op.inputs.file_path(my_file_path)
"""
return self._file_path
class OutputsStlExport(_Outputs):
"""Intermediate class used to get outputs from
stl_export operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.mesh.stl_export()
>>> # Connect inputs : op.inputs. ...
>>> result_data_sources = op.outputs.data_sources()
"""
def __init__(self, op: Operator):
super().__init__(stl_export._spec().outputs, op)
self._data_sources = Output(stl_export._spec().output_pin(0), 0, op)
self._outputs.append(self._data_sources)
@property
def data_sources(self):
"""Allows to get data_sources output of the operator
Returns
----------
my_data_sources : DataSources
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.mesh.stl_export()
>>> # Connect inputs : op.inputs. ...
>>> result_data_sources = op.outputs.data_sources()
""" # noqa: E501
return self._data_sources |
1,917 | avg | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import errno
import os
import time
from collections import defaultdict, deque
import paddle
from paddlenlp.transformers import (
CosineDecayWithWarmup,
LinearDecayWithWarmup,
PolyDecayWithWarmup,
)
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
t = paddle.to_tensor([self.count, self.total], dtype="float64")
t = t.numpy().tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = paddle.to_tensor(list(self.deque))
return d.median().item()
@property
def METHOD_NAME(self):
d = paddle.to_tensor(list(self.deque), dtype="float32")
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
METHOD_NAME=self.METHOD_NAME,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, paddle.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if paddle.device.is_compiled_with_cuda():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
)
else:
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
)
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print("{} Total time: {}".format(header, total_time_str))
scheduler_type2cls = {
"linear": LinearDecayWithWarmup,
"cosine": CosineDecayWithWarmup,
"polynomial": PolyDecayWithWarmup,
}
def get_scheduler(
learning_rate,
scheduler_type,
num_warmup_steps=None,
num_training_steps=None,
**scheduler_kwargs,
):
if scheduler_type not in scheduler_type2cls.keys():
data = " ".join(scheduler_type2cls.keys())
raise ValueError(f"scheduler_type must be choson from {data}")
if num_warmup_steps is None:
raise ValueError("requires `num_warmup_steps`, please provide that argument.")
if num_training_steps is None:
raise ValueError("requires `num_training_steps`, please provide that argument.")
return scheduler_type2cls[scheduler_type](
learning_rate=learning_rate,
total_steps=num_training_steps,
warmup=num_warmup_steps,
**scheduler_kwargs,
)
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise |
1,918 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetConfigurationProfilesVersionResult',
'AwaitableGetConfigurationProfilesVersionResult',
'get_configuration_profiles_version',
'get_configuration_profiles_version_output',
]
@pulumi.output_type
class GetConfigurationProfilesVersionResult:
"""
Definition of the configuration profile.
"""
def __init__(__self__, METHOD_NAME=None, location=None, name=None, properties=None, system_data=None, tags=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ConfigurationProfilePropertiesResponse':
"""
Properties of the configuration profile.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetConfigurationProfilesVersionResult(GetConfigurationProfilesVersionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConfigurationProfilesVersionResult(
METHOD_NAME=self.METHOD_NAME,
location=self.location,
name=self.name,
properties=self.properties,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_configuration_profiles_version(configuration_profile_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
version_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConfigurationProfilesVersionResult:
"""
Get information about a configuration profile version
:param str configuration_profile_name: The configuration profile name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str version_name: The configuration profile version name.
"""
__args__ = dict()
__args__['configurationProfileName'] = configuration_profile_name
__args__['resourceGroupName'] = resource_group_name
__args__['versionName'] = version_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:automanage/v20220504:getConfigurationProfilesVersion', __args__, opts=opts, typ=GetConfigurationProfilesVersionResult).value
return AwaitableGetConfigurationProfilesVersionResult(
METHOD_NAME=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_configuration_profiles_version)
def get_configuration_profiles_version_output(configuration_profile_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
version_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConfigurationProfilesVersionResult]:
"""
Get information about a configuration profile version
:param str configuration_profile_name: The configuration profile name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str version_name: The configuration profile version name.
"""
... |
1,919 | config random id | # =================================================================
#
# Authors: Francesco Bartoli <xbartolone@gmail.com>
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2020 Francesco Bartoli
# Copyright (c) 2022 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
# Needs to be run like: python3 -m pytest
# https://sampleserver6.arcgisonline.com/arcgis/rest/services/CommunityAddressing/FeatureServer/0
import logging
import random
import pytest
from pygeoapi.provider.base import ProviderItemNotFoundError
from pygeoapi.provider.ogr import OGRProvider
LOGGER = logging.getLogger(__name__)
@pytest.fixture()
def config_ArcGIS_ESRIJSON():
return {
'name': 'OGR',
'type': 'feature',
'data': {
'source_type': 'ESRIJSON',
'source': 'https://sampleserver6.arcgisonline.com/arcgis/rest/services/CommunityAddressing/FeatureServer/0/query?where=objectid+%3D+objectid&outfields=*&orderByFields=objectid+ASC&f=json', # noqa
# 'source_srs': 'EPSG:4326',
# 'target_srs': 'EPSG:4326',
'source_capabilities': {
'paging': True
},
'open_options': {
'FEATURE_SERVER_PAGING': 'YES',
},
'gdal_ogr_options': {
'EMPTY_AS_NULL': 'NO',
'GDAL_CACHEMAX': '64',
'CPL_DEBUG': 'NO'
},
},
'id_field': 'objectid',
'layer': 'ESRIJSON'
}
@pytest.fixture()
def METHOD_NAME(config_ArcGIS_ESRIJSON):
p = OGRProvider(config_ArcGIS_ESRIJSON)
# Get bunch of features to randomly have an id
feature_collection = p.query(offset=0, limit=10, resulttype='results')
features = feature_collection.get('features')
features_list = []
for feature in features:
features_list.append(feature['id'])
selected_id = random.choice(features_list)
fulladdr = p.get(selected_id)['properties']['fulladdr']
return (selected_id, fulladdr.split(' ')[0])
def test_get_fields_agol(config_ArcGIS_ESRIJSON):
"""Testing field types"""
p = OGRProvider(config_ArcGIS_ESRIJSON)
results = p.get_fields()
assert results['fulladdr']['type'] == 'string'
assert results['municipality']['type'] == 'string'
def test_get_agol(config_ArcGIS_ESRIJSON, METHOD_NAME):
"""Testing query for a specific object"""
p = OGRProvider(config_ArcGIS_ESRIJSON)
id, addr_number = METHOD_NAME
result = p.get(id)
assert result['id'] == id
assert addr_number in result['properties']['fulladdr']
def test_get_agol_not_existing_feature_raise_exception(
config_ArcGIS_ESRIJSON
):
"""Testing query for a not existing object"""
p = OGRProvider(config_ArcGIS_ESRIJSON)
with pytest.raises(ProviderItemNotFoundError):
p.get(-1)
def test_query_hits_agol(config_ArcGIS_ESRIJSON):
"""Testing query on entire collection for hits"""
p = OGRProvider(config_ArcGIS_ESRIJSON)
feature_collection = p.query(resulttype='hits')
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 0
hits = feature_collection.get('numberMatched')
assert hits is not None
assert hits > 100
# def test_query_bbox_hits_agol(config_ArcGIS_ESRIJSON):
# """Testing query for a valid JSON object with geometry"""
# p = OGRProvider(config_ArcGIS_ESRIJSON)
# feature_collection = p.query(
# bbox=[-9822165.181154, 5112669.004249,
# -9807305.104750, 5133712.297986],
# resulttype='hits')
# assert feature_collection.get('type') == 'FeatureCollection'
# features = feature_collection.get('features')
# assert len(features) == 0
# hits = feature_collection.get('numberMatched')
# assert hits is not None
# assert hits > 1
def test_query_with_limit_agol(config_ArcGIS_ESRIJSON):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_ArcGIS_ESRIJSON)
feature_collection = p.query(limit=2, resulttype='results')
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 2
hits = feature_collection.get('numberMatched')
assert hits is None
feature = features[0]
properties = feature.get('properties')
assert properties is not None
geometry = feature.get('geometry')
assert geometry is not None
def test_query_with_offset(config_ArcGIS_ESRIJSON):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(config_ArcGIS_ESRIJSON)
feature_collection = p.query(offset=10, limit=10, resulttype='results')
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 10
hits = feature_collection.get('numberMatched')
assert hits is None
feature = features[0]
properties = feature.get('properties')
assert properties is not None
assert properties['fulladdr'] is not None
geometry = feature.get('geometry')
assert geometry is not None |
1,920 | drop unsupported columns and fetch raw data | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Features extractor."""
import copy
from typing import Mapping, Optional, Text, Tuple
import apache_beam as beam
import numpy as np
import pyarrow as pa
from tensorflow_model_analysis import constants
from tensorflow_model_analysis.api import types
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.utils import util
from tensorflow_metadata.proto.v0 import schema_pb2
_FEATURES_EXTRACTOR_STAGE_NAME = 'ExtractFeatures'
FEATURES_KEY = 'features'
ARROW_RECORD_BATCH_KEY = 'arrow_record_batch'
def FeaturesExtractor( # pylint: disable=invalid-name
eval_config: config_pb2.EvalConfig,
tensor_representations: Optional[Mapping[
Text, schema_pb2.TensorRepresentation]] = None) -> extractor.Extractor:
"""Creates an extractor for extracting features.
The extractor acts as follows depending on the existence of certain keys
within the incoming extracts:
1) Extracts contains tfma.ARROW_RECORD_BATCH_KEY
The features stored in the RecordBatch will be extracted and added to the
output extract under the key tfma.FEATURES_KEY and the raw serialized inputs
will be added under the tfma.INPUT_KEY. Any extracts that already exist will
be merged with the values from the RecordBatch with the RecordBatch values
taking precedence when duplicate keys are detected. The
tfma.ARROW_RECORD_BATCH_KEY key will be removed from the output extracts.
2) Extracts contains tfma.FEATURES_KEY (but not tfma.ARROW_RECORD_BATCH_KEY)
The operation will be a no-op and the incoming extracts will be passed as is
to the output.
3) Extracts contains neither tfma.FEATURES_KEY | tfma.ARROW_RECORD_BATCH_KEY
An exception will be raised.
Args:
eval_config: Eval config.
tensor_representations: Optional tensor representations to use when parsing
the data. If tensor_representations are not passed or a representation is
not found for a given feature name a default representation will be used
where possible, otherwise an exception will be raised.
Returns:
Extractor for extracting features.
"""
del eval_config
# pylint: disable=no-value-for-parameter
return extractor.Extractor(
stage_name=_FEATURES_EXTRACTOR_STAGE_NAME,
ptransform=_ExtractFeatures(tensor_representations or {}))
# TODO(b/214273030): Move to tfx-bsl.
def _is_list_like(arrow_type: pa.DataType) -> bool:
return pa.types.is_list(arrow_type) or pa.types.is_large_list(arrow_type)
# TODO(b/214273030): Move to tfx-bsl.
def _is_binary_like(arrow_type: pa.DataType) -> bool:
return (pa.types.is_binary(arrow_type) or
pa.types.is_large_binary(arrow_type) or
pa.types.is_string(arrow_type) or
pa.types.is_large_string(arrow_type))
# TODO(b/214273030): Move to tfx-bsl.
def _is_supported_arrow_value_type(arrow_type: pa.DataType) -> bool:
return (pa.types.is_integer(arrow_type) or pa.types.is_floating(arrow_type) or
_is_binary_like(arrow_type))
def METHOD_NAME(
record_batch: pa.RecordBatch
) -> Tuple[pa.RecordBatch, Optional[np.ndarray]]:
"""Drops unsupported columns and fetches the raw data column.
Currently, types that are not binary_like or ListArray[primitive types] are
dropped.
Args:
record_batch: An Arrow RecordBatch.
Returns:
Arrow RecordBatch with only supported columns.
"""
column_names, column_arrays = [], []
serialized_examples = None
for column_name, column_array in zip(record_batch.schema.names,
record_batch.columns):
column_type = column_array.type
if column_name == constants.ARROW_INPUT_COLUMN:
assert (_is_list_like(column_type) and
_is_binary_like(column_type.value_type)), (
'Invalid type for batched input key: {}. '
'Expected binary like.'.format(column_type))
serialized_examples = np.asarray(column_array.flatten())
# Currently we only handle columns of type list<primitive|binary_like>.
# We ignore other columns as we cannot efficiently convert them into an
# instance dict format.
elif (_is_list_like(column_type) and
_is_supported_arrow_value_type(column_type.value_type)):
column_names.append(column_name)
column_arrays.append(column_array)
return (pa.RecordBatch.from_arrays(column_arrays,
column_names), serialized_examples)
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(types.Extracts)
def _ExtractFeatures( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection,
tensor_representations: Mapping[str, schema_pb2.TensorRepresentation]
) -> beam.pvalue.PCollection:
"""Extracts features from extracts.
Args:
extracts: PCollection containing features under tfma.ARROW_RECORD_BATCH_KEY
or tfma.FEATURES_KEY.
tensor_representations: Tensor representations.
Returns:
PCollection of extracts with additional features added under the key
tfma.FEATURES_KEY and optionally inputs added under the tfma.INPUTS_KEY.
Raises:
ValueError: If incoming extracts contains neither tfma.FEATURES_KEY nor
tfma.ARROW_RECORD_BATCH_KEY.
"""
def extract_features(extracts: types.Extracts) -> types.Extracts:
"""Extract features from extracts containing arrow table."""
result = copy.copy(extracts)
if constants.ARROW_RECORD_BATCH_KEY in extracts:
(record_batch, serialized_examples) = (
METHOD_NAME(
extracts[constants.ARROW_RECORD_BATCH_KEY]
)
)
del result[constants.ARROW_RECORD_BATCH_KEY]
features = (
result[constants.FEATURES_KEY]
if constants.FEATURES_KEY in result
else {}
)
features.update(
util.record_batch_to_tensor_values(
record_batch, tensor_representations
)
)
result[constants.FEATURES_KEY] = features
result[constants.INPUT_KEY] = serialized_examples
elif constants.FEATURES_KEY not in extracts:
raise ValueError(
'Incoming extracts must contain either tfma.ARROW_RECORD_BATCH_KEY '
f'or tfma.FEATURES_KEY, but extracts={extracts}'
)
return result
return extracts | 'ExtractFeatures' >> beam.Map(extract_features) |
1,921 | query parameters | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network manager scope-connection delete",
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete the pending scope connection created by this network manager.
:example: Delete network manager scope connection
az network manager scope-connection delete --name "TestScopeConnect" --network-manager-name "testNetworkManager" --resource-group "rg1"
"""
_aaz_info = {
"version": "2022-01-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/networkmanagers/{}/scopeconnections/{}", "2022-01-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return None
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.network_manager_name = AAZStrArg(
options=["--network-manager", "--network-manager-name"],
help="The name of the network manager.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.scope_connection_name = AAZStrArg(
options=["-n", "--name", "--connection-name", "--scope-connection-name"],
help="Name for the cross-tenant connection.",
required=True,
id_part="child_name_1",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.ScopeConnectionsDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class ScopeConnectionsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
if session.http_response.status_code in [204]:
return self.on_204(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkManagers/{networkManagerName}/scopeConnections/{scopeConnectionName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"networkManagerName", self.ctx.args.network_manager_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"scopeConnectionName", self.ctx.args.scope_connection_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-01-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
1,922 | test noop | # Copyright © Michal Čihař <michal@weblate.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Tests for unitdata models."""
from django.urls import reverse
from django.utils.html import format_html
from weblate.checks.models import Check
from weblate.checks.tasks import batch_update_checks
from weblate.trans.models import Unit
from weblate.trans.tasks import auto_translate
from weblate.trans.tests.test_views import FixtureTestCase, ViewTestCase
class CheckModelTestCase(FixtureTestCase):
def create_check(self, name):
return Check.objects.create(unit=self.get_unit(), name=name)
def test_check(self):
check = self.create_check("same")
self.assertEqual(
str(check.get_description()), "Source and translation are identical"
)
self.assertTrue(check.get_doc_url().endswith("user/checks.html#check-same"))
self.assertEqual(str(check), "Unchanged translation")
def test_check_nonexisting(self):
check = self.create_check("-invalid-")
self.assertEqual(check.get_description(), "-invalid-")
self.assertEqual(check.get_doc_url(), "")
def test_check_render(self):
unit = self.get_unit()
unit.source_unit.extra_flags = "max-size:1:1"
unit.source_unit.save()
check = self.create_check("max-size")
url = reverse(
"render-check", kwargs={"check_id": check.name, "unit_id": unit.id}
)
self.assertEqual(
str(check.get_description()),
format_html(
'<a href="{0}?pos=0" class="thumbnail">'
'<img class="img-responsive" src="{0}?pos=0" /></a>',
url,
),
)
self.assert_png(self.client.get(url))
class BatchUpdateTest(ViewTestCase):
"""Test for complex manipulating translation."""
def setUp(self):
super().setUp()
self.translation = self.get_translation()
def do_base(self):
# Single unit should have no consistency check
self.edit_unit("Hello, world!\n", "Nazdar svete!\n")
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, set())
# Add linked project
other = self.create_link_existing()
# Now the inconsistent check should be there
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, {"inconsistent"})
return other
def test_autotranslate(self):
other = self.do_base()
translation = other.translation_set.get(language_code="cs")
auto_translate(
None,
translation.pk,
"translate",
"todo",
"others",
self.component.pk,
[],
99,
translation=translation,
)
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, set())
def METHOD_NAME(self):
other = self.do_base()
# The batch update should not remove it
batch_update_checks(self.component.id, ["inconsistent"])
batch_update_checks(other.id, ["inconsistent"])
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, {"inconsistent"})
def test_toggle(self):
other = self.do_base()
one_unit = self.get_unit()
other_unit = Unit.objects.get(
translation__language_code=one_unit.translation.language_code,
translation__component=other,
id_hash=one_unit.id_hash,
)
translated = one_unit.target
combinations = (
(translated, "", {"inconsistent"}),
("", translated, {"inconsistent"}),
("", "", set()),
(translated, translated, set()),
("", translated, {"inconsistent"}),
)
for update_one, update_other, expected in combinations:
Unit.objects.filter(pk=one_unit.pk).update(target=update_one)
Unit.objects.filter(pk=other_unit.pk).update(target=update_other)
batch_update_checks(self.component.id, ["inconsistent"])
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, expected)
for update_one, update_other, expected in combinations:
Unit.objects.filter(pk=one_unit.pk).update(target=update_one)
Unit.objects.filter(pk=other_unit.pk).update(target=update_other)
batch_update_checks(other.id, ["inconsistent"])
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, expected) |
1,923 | help msg | # Author: Hubert Kario, (c) 2015
# Released under Gnu GPL v2.0, see LICENSE file for details
"""Minimal test for verifying DHE_RSA key exchange support"""
from __future__ import print_function
import traceback
import sys
import getopt
import re
from itertools import chain
from random import sample
from tlsfuzzer.runner import Runner
from tlsfuzzer.messages import Connect, ClientHelloGenerator, \
ClientKeyExchangeGenerator, ChangeCipherSpecGenerator, \
FinishedGenerator, ApplicationDataGenerator, AlertGenerator
from tlsfuzzer.expect import ExpectServerHello, ExpectCertificate, \
ExpectServerHelloDone, ExpectChangeCipherSpec, ExpectFinished, \
ExpectAlert, ExpectClose, ExpectServerKeyExchange, \
ExpectApplicationData
from tlslite.constants import CipherSuite, AlertLevel, AlertDescription, \
ExtensionType
from tlsfuzzer.utils.lists import natural_sort_keys
version = 3
def METHOD_NAME():
print("Usage: <script-name> [-h hostname] [-p port] [[probe-name] ...]")
print(" -h hostname name of the host to run the test against")
print(" localhost by default")
print(" -p port port number to use for connection, 4433 by default")
print(" probe-name if present, will run only the probes with given")
print(" names and not all of them, e.g \"sanity\"")
print(" -e probe-name exclude the probe from the list of the ones run")
print(" may be specified multiple times")
print(" -x probe-name expect the probe to fail. When such probe passes despite being marked like this")
print(" it will be reported in the test summary and the whole script will fail.")
print(" May be specified multiple times.")
print(" -X message expect the `message` substring in exception raised during")
print(" execution of preceding expected failure probe")
print(" usage: [-x probe-name] [-X exception], order is compulsory!")
print(" -n num run 'num' or all(if 0) tests instead of default(all)")
print(" (excluding \"sanity\" tests)")
print(" --help this message")
def main():
"""Test if server supports the DHE_RSA key exchange"""
host = "localhost"
port = 4433
num_limit = None
run_exclude = set()
expected_failures = {}
last_exp_tmp = None
argv = sys.argv[1:]
opts, args = getopt.getopt(argv, "h:p:e:x:X:n:", ["help"])
for opt, arg in opts:
if opt == '-h':
host = arg
elif opt == '-p':
port = int(arg)
elif opt == '-e':
run_exclude.add(arg)
elif opt == '-x':
expected_failures[arg] = None
last_exp_tmp = str(arg)
elif opt == '-X':
if not last_exp_tmp:
raise ValueError("-x has to be specified before -X")
expected_failures[last_exp_tmp] = str(arg)
elif opt == '-n':
num_limit = int(arg)
elif opt == '--help':
METHOD_NAME()
sys.exit(0)
else:
raise ValueError("Unknown option: {0}".format(opt))
if args:
run_only = set(args)
else:
run_only = None
conversations = {}
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_DHE_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectServerHello(extensions={ExtensionType.
renegotiation_info:None}))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerKeyExchange())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(
bytearray(b"GET / HTTP/1.0\n\n")))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertLevel.warning,
AlertDescription.close_notify))
node = node.add_child(ExpectAlert())
node.next_sibling = ExpectClose()
conversations["sanity"] = conversation
# run the conversation
good = 0
bad = 0
xfail = 0
xpass = 0
failed = []
xpassed = []
if not num_limit:
num_limit = len(conversations)
# make sure that sanity test is run first and last
# to verify that server was running and kept running throughout
sanity_tests = [('sanity', conversations['sanity'])]
if run_only:
if num_limit > len(run_only):
num_limit = len(run_only)
regular_tests = [(k, v) for k, v in conversations.items() if
k in run_only]
else:
regular_tests = [(k, v) for k, v in conversations.items() if
(k != 'sanity') and k not in run_exclude]
sampled_tests = sample(regular_tests, min(num_limit, len(regular_tests)))
ordered_tests = chain(sanity_tests, sampled_tests, sanity_tests)
for c_name, c_test in ordered_tests:
if run_only and c_name not in run_only or c_name in run_exclude:
continue
print("{0} ...".format(c_name))
runner = Runner(c_test)
res = True
exception = None
try:
runner.run()
except Exception as exp:
exception = exp
print("Error while processing")
print(traceback.format_exc())
res = False
if c_name in expected_failures:
if res:
xpass += 1
xpassed.append(c_name)
print("XPASS-expected failure but test passed\n")
else:
if expected_failures[c_name] is not None and \
expected_failures[c_name] not in str(exception):
bad += 1
failed.append(c_name)
print("Expected error message: {0}\n"
.format(expected_failures[c_name]))
else:
xfail += 1
print("OK-expected failure\n")
else:
if res:
good += 1
print("OK\n")
else:
bad += 1
failed.append(c_name)
print("Test end")
print(20 * '=')
print("version: {0}".format(version))
print(20 * '=')
print("TOTAL: {0}".format(len(sampled_tests) + 2*len(sanity_tests)))
print("SKIP: {0}".format(len(run_exclude.intersection(conversations.keys()))))
print("PASS: {0}".format(good))
print("XFAIL: {0}".format(xfail))
print("FAIL: {0}".format(bad))
print("XPASS: {0}".format(xpass))
print(20 * '=')
sort = sorted(xpassed ,key=natural_sort_keys)
if len(sort):
print("XPASSED:\n\t{0}".format('\n\t'.join(repr(i) for i in sort)))
sort = sorted(failed, key=natural_sort_keys)
if len(sort):
print("FAILED:\n\t{0}".format('\n\t'.join(repr(i) for i in sort)))
if bad or xpass:
sys.exit(1)
if __name__ == "__main__":
main() |
1,924 | translate ui | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
A dialog to edit the keyboard shortcuts for an action.
"""
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import (
QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QLabel, QRadioButton,
QVBoxLayout)
import app
from . import Separator
from .keysequencewidget import KeySequenceWidget
class ShortcutEditDialog(QDialog):
"""A modal dialog to view and/or edit keyboard shortcuts."""
def __init__(self, parent=None, conflictCallback=None, *cbArgs):
"""conflictCallback is a optional method called when a shortcut is changed.
cbArgs is optional arguments of the conflictCallback method.
it should return the name of the potential conflict or a null value """
super().__init__(parent)
self.conflictCallback = conflictCallback
self.cbArgs = cbArgs
self.setMinimumWidth(400)
# create gui
layout = QVBoxLayout()
layout.setSpacing(10)
self.setLayout(layout)
top = QHBoxLayout()
top.setSpacing(4)
p = self.toppixmap = QLabel()
l = self.toplabel = QLabel()
top.addWidget(p)
top.addWidget(l, 1)
layout.addLayout(top)
grid = QGridLayout()
grid.setSpacing(4)
grid.setColumnStretch(1, 2)
layout.addLayout(grid)
self.buttonDefault = QRadioButton(self, toggled=self.slotButtonDefaultToggled)
self.buttonNone = QRadioButton(self)
self.lconflictDefault = QLabel('test')
self.lconflictDefault.setStyleSheet("color : red;")
self.lconflictDefault.setVisible(False)
self.buttonCustom = QRadioButton(self)
grid.addWidget(self.buttonDefault, 0, 0, 1, 2)
grid.addWidget(self.lconflictDefault, 1, 0, 1, 2)
grid.addWidget(self.buttonNone, 2, 0, 1, 2)
grid.addWidget(self.buttonCustom, 3, 0, 1, 2)
self.keybuttons = []
self.keylabels = []
self.conflictlabels = []
for num in range(4):
l = QLabel(self)
l.setStyleSheet("margin-left: 2em;")
l.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
b = KeySequenceWidget(self, num)
b.keySequenceChanged.connect(self.slotKeySequenceChanged)
l.setBuddy(b)
self.keylabels.append(l)
self.keybuttons.append(b)
grid.addWidget(l, num+4+num, 0)
grid.addWidget(b, num+4+num, 1)
lconflict = QLabel()
lconflict.setStyleSheet("color : red;")
self.conflictlabels.append(lconflict)
lconflict.setVisible(False)
grid.addWidget(lconflict, num+5+num, 0, 1, 2, Qt.AlignHCenter)
layout.addWidget(Separator(self))
b = QDialogButtonBox(self)
b.setStandardButtons(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
layout.addWidget(b)
b.accepted.connect(self.accept)
b.rejected.connect(self.reject)
app.METHOD_NAME(self)
def METHOD_NAME(self):
self.setWindowTitle(app.caption(_("window title", "Edit Shortcut")))
self.buttonNone.setText(_("&No shortcut"))
self.buttonCustom.setText(_("Use a &custom shortcut:"))
for num in range(4):
self.keylabels[num].setText(_("Alternative #{num}:").format(num=num) if num else _("Primary shortcut:"))
def slotKeySequenceChanged(self, num):
"""Called when one of the keysequence buttons has changed."""
self.checkConflict(num)
self.buttonCustom.setChecked(True)
def slotButtonDefaultToggled(self, val):
if self.conflictCallback is not None:
if not val:
self.lconflictDefault.setVisible(False)
else:
if self._default:
conflictList = []
for s in self._default:
conflictName = self.conflictCallback(s, *self.cbArgs)
if conflictName:
conflictList.append(conflictName)
if conflictList:
text = _("Conflict with: {name}").format(
name="<b>{}</b>".format(', '.join(conflictList)))
self.lconflictDefault.setText(text)
self.lconflictDefault.setVisible(True)
QTimer.singleShot(0, self.adjustSize)
def checkConflict(self, num):
if self.conflictCallback is not None:
conflictName = self.conflictCallback(self.keybuttons[num].shortcut(), *self.cbArgs)
if conflictName:
text = _("Conflict with: {name}").format(
name=f"<b>{conflictName}</b>")
self.conflictlabels[num].setText(text)
self.conflictlabels[num].setVisible(True)
else:
self.conflictlabels[num].setVisible(False)
QTimer.singleShot(0, self.adjustSize)
def editAction(self, action, default=None):
# load the action
self._action = action
self._default = default
self.toplabel.setText('<p>{}</p>'.format(
_("Here you can edit the shortcuts for {name}").format(
name=f'<br/><b>{action.text()}</b>:')))
self.toppixmap.setPixmap(action.icon().pixmap(32))
shortcuts = action.shortcuts()
self.buttonDefault.setVisible(bool(default))
if default is not None and shortcuts == default:
self.buttonDefault.setChecked(True)
else:
if shortcuts:
self.buttonCustom.setChecked(True)
for num, key in enumerate(shortcuts[:4]):
self.keybuttons[num].setShortcut(key)
self.checkConflict(num)
else:
self.buttonNone.setChecked(True)
if default:
ds = "; ".join(key.toString(QKeySequence.NativeText) for key in default)
else:
ds = _("no keyboard shortcut", "none")
self.buttonDefault.setText(_("Use &default shortcut ({name})").format(name=ds))
return self.exec_()
def done(self, result):
if result:
shortcuts = []
if self.buttonDefault.isChecked():
shortcuts = self._default
elif self.buttonCustom.isChecked():
for num in range(4):
seq = self.keybuttons[num].shortcut()
if not seq.isEmpty():
shortcuts.append(seq)
self._action.setShortcuts(shortcuts)
super().done(result)
|
1,925 | test delete proxy | import json
import falcon
from mock import PropertyMock, MagicMock, patch
from ddt import ddt, data, unpack
from tests import RestTestBase
from monitorrent.rest.settings_proxy import SettingsProxyEnabled, SettingsProxy
from monitorrent.settings_manager import SettingsManager
@ddt
class SettingsProxyEnabledTest(RestTestBase):
api_url = '/api/settings/proxy/enabled'
@data(True, False)
def test_get_is_proxy_enabled(self, value):
settings_manager = SettingsManager()
get_is_proxy_enabled_mock = MagicMock(return_value=value)
settings_manager.get_is_proxy_enabled = get_is_proxy_enabled_mock
settings_proxy_enabled_resource = SettingsProxyEnabled(settings_manager)
self.api.add_route(self.api_url, settings_proxy_enabled_resource)
body = self.simulate_request(self.api_url, decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(result, {'enabled': value})
get_is_proxy_enabled_mock.assert_called_once_with()
@data(True, False)
def test_set_is_proxy_enabled(self, value):
settings_manager = SettingsManager()
set_is_proxy_enabled_mock = MagicMock()
settings_manager.set_is_proxy_enabled = set_is_proxy_enabled_mock
settings_proxy_enabled_resource = SettingsProxyEnabled(settings_manager)
self.api.add_route(self.api_url, settings_proxy_enabled_resource)
request = {'enabled': value}
self.simulate_request(self.api_url, method="PUT", body=json.dumps(request))
self.assertEqual(self.srmock.status, falcon.HTTP_NO_CONTENT)
set_is_proxy_enabled_mock.assert_called_once_with(value)
@data({'enabled': 'random_text'},
{'enabled': 'True'},
{'wrong_param': 'Value'},
None)
def test_bad_request(self, body):
settings_manager = SettingsManager()
settings_proxy_enabled_resource = SettingsProxyEnabled(settings_manager)
self.api.add_route(self.api_url, settings_proxy_enabled_resource)
self.simulate_request(self.api_url, method="PUT", body=json.dumps(body) if body else None)
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST)
@ddt
class SettingsProxyTest(RestTestBase):
api_url = '/api/settings/proxy'
@data(('http', 'http://1.1.1.1:8888'), ('https', 'http://2.2.2.2:8888'))
@unpack
def test_get_proxy(self, key, proxy):
settings_manager = SettingsManager()
get_proxy_mock = MagicMock(return_value=proxy)
settings_manager.get_proxy = get_proxy_mock
settings_proxy_resource = SettingsProxy(settings_manager)
self.api.add_route(self.api_url, settings_proxy_resource)
body = self.simulate_request(self.api_url, decode='utf-8', query_string="key="+key)
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(result, {'url': proxy})
get_proxy_mock.assert_called_once_with(key)
def test_get_proxy_not_found(self):
settings_manager = SettingsManager()
get_proxy_mock = MagicMock(return_value=None)
settings_manager.get_proxy = get_proxy_mock
settings_proxy_resource = SettingsProxy(settings_manager)
self.api.add_route(self.api_url, settings_proxy_resource)
self.simulate_request(self.api_url, decode='utf-8', query_string="key=http")
self.assertEqual(self.srmock.status, falcon.HTTP_NOT_FOUND)
get_proxy_mock.assert_called_once_with('http')
@data(('http', 'http://1.1.1.1:8888'), ('https', 'http://2.2.2.2:8888'))
@unpack
def test_put_proxy(self, key, proxy):
settings_manager = SettingsManager()
set_proxy_mock = MagicMock()
settings_manager.set_proxy = set_proxy_mock
settings_proxy_resource = SettingsProxy(settings_manager)
self.api.add_route(self.api_url, settings_proxy_resource)
request = {'url': proxy}
self.simulate_request(self.api_url, decode='utf-8', method='PUT', query_string="key="+key,
body=json.dumps(request))
self.assertEqual(self.srmock.status, falcon.HTTP_NO_CONTENT)
set_proxy_mock.assert_called_once_with(key, proxy)
def test_put_proxy_bad_request_1(self):
settings_proxy_resource = SettingsProxy(SettingsManager())
self.api.add_route(self.api_url, settings_proxy_resource)
self.simulate_request(self.api_url, decode='utf-8', method='PUT', query_string="key=http")
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST)
def test_put_proxy_bad_request_2(self):
settings_proxy_resource = SettingsProxy(SettingsManager())
self.api.add_route(self.api_url, settings_proxy_resource)
self.simulate_request(self.api_url, decode='utf-8', method='PUT', query_string="key=http",
body=json.dumps({'url': None}))
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST)
self.simulate_request(self.api_url, decode='utf-8', method='PUT', query_string="key=http",
body=json.dumps({'nourl': 'http://1.1.1.1:8888'}))
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST)
@data('http', 'https')
def METHOD_NAME(self, key):
settings_manager = SettingsManager()
set_proxy_mock = MagicMock()
settings_manager.set_proxy = set_proxy_mock
settings_proxy_resource = SettingsProxy(settings_manager)
self.api.add_route(self.api_url, settings_proxy_resource)
self.simulate_request(self.api_url, decode='utf-8', method='DELETE', query_string="key="+key)
self.assertEqual(self.srmock.status, falcon.HTTP_NO_CONTENT)
set_proxy_mock.assert_called_once_with(key, None) |
1,926 | generate | """SCons.Tool.rpm
Tool-specific initialization for rpm.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
The rpm tool calls the rpmbuild command. The first and only argument should a
tar.gz consisting of the source file and a specfile.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import re
import shutil
import subprocess
import SCons.Builder
import SCons.Node.FS
import SCons.Util
import SCons.Action
import SCons.Defaults
def get_cmd(source, env):
tar_file_with_included_specfile = source
if SCons.Util.is_List(source):
tar_file_with_included_specfile = source[0]
return "%s %s %s"%(env['RPM'], env['RPMFLAGS'],
tar_file_with_included_specfile.get_abspath())
def build_rpm(target, source, env):
# create a temporary rpm build root.
tmpdir = os.path.join(os.path.dirname(target[0].get_abspath()), 'rpmtemp')
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
# now create the mandatory rpm directory structure.
for d in ['RPMS', 'SRPMS', 'SPECS', 'BUILD']:
os.makedirs(os.path.join(tmpdir, d))
# set the topdir as an rpmflag.
env.Prepend(RPMFLAGS = '--define \'_topdir %s\'' % tmpdir)
# now call rpmbuild to create the rpm package.
handle = subprocess.Popen(get_cmd(source, env),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
with handle.stdout:
output = SCons.Util.to_str(handle.stdout.read())
status = handle.wait()
if status:
raise SCons.Errors.BuildError(node=target[0],
errstr=output,
filename=str(target[0]))
else:
# XXX: assume that LC_ALL=C is set while running rpmbuild
output_files = re.compile('Wrote: (.*)').findall(output)
for output, input in zip(output_files, target):
rpm_output = os.path.basename(output)
expected = os.path.basename(input.get_path())
assert expected == rpm_output, "got %s but expected %s" % (rpm_output, expected)
shutil.copy(output, input.get_abspath())
# cleanup before leaving.
shutil.rmtree(tmpdir)
return status
def string_rpm(target, source, env):
try:
return env['RPMCOMSTR']
except KeyError:
return get_cmd(source, env)
rpmAction = SCons.Action.Action(build_rpm, string_rpm)
RpmBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$RPMCOM', '$RPMCOMSTR'),
source_scanner = SCons.Defaults.DirScanner,
suffix = '$RPMSUFFIX')
def METHOD_NAME(env):
"""Add Builders and construction variables for rpm to an Environment."""
try:
bld = env['BUILDERS']['Rpm']
except KeyError:
bld = RpmBuilder
env['BUILDERS']['Rpm'] = bld
env.SetDefault(RPM = 'LC_ALL=C rpmbuild')
env.SetDefault(RPMFLAGS = SCons.Util.CLVar('-ta'))
env.SetDefault(RPMCOM = rpmAction)
env.SetDefault(RPMSUFFIX = '.rpm')
def exists(env):
return env.Detect('rpmbuild')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: |
1,927 | test read no chunks | import unittest
from test import audiotests
from test import support
from audioop import byteswap
import io
import struct
import sys
import wave
class WaveTest(audiotests.AudioWriteTests,
audiotests.AudioTestsWithSourceFile):
module = wave
class WavePCM8Test(WaveTest, unittest.TestCase):
sndfilename = 'pluck-pcm8.wav'
sndfilenframes = 3307
nchannels = 2
sampwidth = 1
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
827F CB80 B184 0088 4B86 C883 3F81 837E 387A 3473 A96B 9A66 \
6D64 4662 8E60 6F60 D762 7B68 936F 5877 177B 757C 887B 5F7B \
917A BE7B 3C7C E67F 4F84 C389 418E D192 6E97 0296 FF94 0092 \
C98E D28D 6F8F 4E8F 648C E38A 888A AB8B D18E 0B91 368E C48A \
""")
class WavePCM16Test(WaveTest, unittest.TestCase):
sndfilename = 'pluck-pcm16.wav'
sndfilenframes = 3307
nchannels = 2
sampwidth = 2
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022EFFEA 4B5C00F9 311404EF 80DC0843 CBDF06B2 48AA03F3 BFE701B2 036BFE7C \
B857FA3E B4B2F34F 2999EBCA 1A5FE6D7 EDFCE491 C626E279 0E05E0B8 EF27E02D \
5754E275 FB31E843 1373EF89 D827F72C 978BFB7A F5F7FC11 0866FB9C DF30FB42 \
117FFA36 3EE4FB5D BC75FCB6 66D5FF5F CF16040E 43220978 C1BC0EC8 511F12A4 \
EEDF1755 82061666 7FFF1446 80001296 499C0EB2 52BA0DB9 EFB70F5C CE400FBC \
E4B50CEB 63440A5A 08CA0A1F 2BBA0B0B 51460E47 8BCB113C B6F50EEA 44150A59 \
""")
if sys.byteorder != 'big':
frames = byteswap(frames, 2)
class WavePCM24Test(WaveTest, unittest.TestCase):
sndfilename = 'pluck-pcm24.wav'
sndfilenframes = 3307
nchannels = 2
sampwidth = 3
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022D65FFEB9D 4B5A0F00FA54 3113C304EE2B 80DCD6084303 \
CBDEC006B261 48A99803F2F8 BFE82401B07D 036BFBFE7B5D \
B85756FA3EC9 B4B055F3502B 299830EBCB62 1A5CA7E6D99A \
EDFA3EE491BD C625EBE27884 0E05A9E0B6CF EF2929E02922 \
5758D8E27067 FB3557E83E16 1377BFEF8402 D82C5BF7272A \
978F16FB7745 F5F865FC1013 086635FB9C4E DF30FCFB40EE \
117FE0FA3438 3EE6B8FB5AC3 BC77A3FCB2F4 66D6DAFF5F32 \
CF13B9041275 431D69097A8C C1BB600EC74E 5120B912A2BA \
EEDF641754C0 8207001664B7 7FFFFF14453F 8000001294E6 \
499C1B0EB3B2 52B73E0DBCA0 EFB2B20F5FD8 CE3CDB0FBE12 \
E4B49C0CEA2D 6344A80A5A7C 08C8FE0A1FFE 2BB9860B0A0E \
51486F0E44E1 8BCC64113B05 B6F4EC0EEB36 4413170A5B48 \
""")
if sys.byteorder != 'big':
frames = byteswap(frames, 3)
class WavePCM32Test(WaveTest, unittest.TestCase):
sndfilename = 'pluck-pcm32.wav'
sndfilenframes = 3307
nchannels = 2
sampwidth = 4
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022D65BCFFEB9D92 4B5A0F8000FA549C 3113C34004EE2BC0 80DCD680084303E0 \
CBDEC0C006B26140 48A9980003F2F8FC BFE8248001B07D92 036BFB60FE7B5D34 \
B8575600FA3EC920 B4B05500F3502BC0 29983000EBCB6240 1A5CA7A0E6D99A60 \
EDFA3E80E491BD40 C625EB80E27884A0 0E05A9A0E0B6CFE0 EF292940E0292280 \
5758D800E2706700 FB3557D8E83E1640 1377BF00EF840280 D82C5B80F7272A80 \
978F1600FB774560 F5F86510FC101364 086635A0FB9C4E20 DF30FC40FB40EE28 \
117FE0A0FA3438B0 3EE6B840FB5AC3F0 BC77A380FCB2F454 66D6DA80FF5F32B4 \
CF13B980041275B0 431D6980097A8C00 C1BB60000EC74E00 5120B98012A2BAA0 \
EEDF64C01754C060 820700001664B780 7FFFFFFF14453F40 800000001294E6E0 \
499C1B000EB3B270 52B73E000DBCA020 EFB2B2E00F5FD880 CE3CDB400FBE1270 \
E4B49CC00CEA2D90 6344A8800A5A7CA0 08C8FE800A1FFEE0 2BB986C00B0A0E00 \
51486F800E44E190 8BCC6480113B0580 B6F4EC000EEB3630 441317800A5B48A0 \
""")
if sys.byteorder != 'big':
frames = byteswap(frames, 4)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, wave, not_exported={'WAVE_FORMAT_PCM'})
class WaveLowLevelTest(unittest.TestCase):
def METHOD_NAME(self):
b = b'SPAM'
with self.assertRaises(EOFError):
wave.open(io.BytesIO(b))
def test_read_no_riff_chunk(self):
b = b'SPAM' + struct.pack('<L', 0)
with self.assertRaisesRegex(wave.Error,
'file does not start with RIFF id'):
wave.open(io.BytesIO(b))
def test_read_not_wave(self):
b = b'RIFF' + struct.pack('<L', 4) + b'SPAM'
with self.assertRaisesRegex(wave.Error,
'not a WAVE file'):
wave.open(io.BytesIO(b))
def test_read_no_fmt_no_data_chunk(self):
b = b'RIFF' + struct.pack('<L', 4) + b'WAVE'
with self.assertRaisesRegex(wave.Error,
'fmt chunk and/or data chunk missing'):
wave.open(io.BytesIO(b))
def test_read_no_data_chunk(self):
b = b'RIFF' + struct.pack('<L', 28) + b'WAVE'
b += b'fmt ' + struct.pack('<LHHLLHH', 16, 1, 1, 11025, 11025, 1, 8)
with self.assertRaisesRegex(wave.Error,
'fmt chunk and/or data chunk missing'):
wave.open(io.BytesIO(b))
def test_read_no_fmt_chunk(self):
b = b'RIFF' + struct.pack('<L', 12) + b'WAVE'
b += b'data' + struct.pack('<L', 0)
with self.assertRaisesRegex(wave.Error, 'data chunk before fmt chunk'):
wave.open(io.BytesIO(b))
def test_read_wrong_form(self):
b = b'RIFF' + struct.pack('<L', 36) + b'WAVE'
b += b'fmt ' + struct.pack('<LHHLLHH', 16, 2, 1, 11025, 11025, 1, 1)
b += b'data' + struct.pack('<L', 0)
with self.assertRaisesRegex(wave.Error, 'unknown format: 2'):
wave.open(io.BytesIO(b))
def test_read_wrong_number_of_channels(self):
b = b'RIFF' + struct.pack('<L', 36) + b'WAVE'
b += b'fmt ' + struct.pack('<LHHLLHH', 16, 1, 0, 11025, 11025, 1, 8)
b += b'data' + struct.pack('<L', 0)
with self.assertRaisesRegex(wave.Error, 'bad # of channels'):
wave.open(io.BytesIO(b))
def test_read_wrong_sample_width(self):
b = b'RIFF' + struct.pack('<L', 36) + b'WAVE'
b += b'fmt ' + struct.pack('<LHHLLHH', 16, 1, 1, 11025, 11025, 1, 0)
b += b'data' + struct.pack('<L', 0)
with self.assertRaisesRegex(wave.Error, 'bad sample width'):
wave.open(io.BytesIO(b))
if __name__ == '__main__':
unittest.main() |
1,928 | test large testcase list | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the coverage_uploader module."""
import datetime
import os
from pyfakefs import fake_filesystem_unittest
from clusterfuzz._internal.fuzzing import coverage_uploader
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
from clusterfuzz._internal.tests.test_libs import test_utils
def _mock_config_get(_, param):
"""Handle test configuration options."""
if param == 'coverage.fuzzer-testcases.bucket':
return 'test-coverage-testcases'
return None
class FakeGSUtilRunner(object):
"""Fake gsutil runner for testing."""
rsync_calls = []
def rsync(self, source, destination, exclusion_pattern=None):
FakeGSUtilRunner.rsync_calls.append((source, destination,
exclusion_pattern))
class UploadTestsToCloudStorageTest(fake_filesystem_unittest.TestCase):
"""Tests for upload_tests_to_cloud_storage."""
def setUp(self):
"""Setup for upload tests to cloud storage test."""
test_helpers.patch_environ(self)
test_helpers.patch(self, [
'clusterfuzz._internal.base.utils.utcnow',
'clusterfuzz._internal.config.local_config.ProjectConfig.get',
'clusterfuzz._internal.datastore.locks.acquire_lock',
'clusterfuzz._internal.datastore.locks.release_lock',
'clusterfuzz._internal.google_cloud_utils.gsutil.GSUtilRunner',
'clusterfuzz._internal.google_cloud_utils.storage.list_blobs',
'clusterfuzz._internal.google_cloud_utils.storage.read_data',
'clusterfuzz._internal.google_cloud_utils.storage.write_data',
])
test_utils.set_up_pyfakefs(self)
self.mock.write_data.return_value = True
self.mock.utcnow.side_effect = lambda: datetime.datetime(2018, 11, 1, 0, 0)
FakeGSUtilRunner.rsync_calls = []
self.mock.GSUtilRunner.side_effect = FakeGSUtilRunner
self.mock.get.side_effect = _mock_config_get
os.environ['BOT_NAME'] = 'test-bot'
os.environ['BOT_TMPDIR'] = '/tmp'
os.environ['FAIL_RETRIES'] = '1'
def test_tests_created_in_correct_bucket(self):
"""Ensure that we invoke gsutil correctly to store tests."""
files = [
'/testcases/a/file1.txt', '/testcases/file2.txt',
'/something/b/file3.txt', '/data/f/g/file4.txt'
]
coverage_uploader.upload_testcases_if_needed('test_fuzzer', files,
'/testcases', '/data')
self.mock.write_data.assert_called_with(
b'a/file1.txt\nfile2.txt\nf/g/file4.txt',
'gs://test-coverage-testcases/2018-11-01/test_fuzzer/'
'5b680a295e1f3a81160a0bd71ca2abbcb8d19521/file_list.txt')
self.assertEqual(
FakeGSUtilRunner.rsync_calls,
[('/testcases', 'gs://test-coverage-testcases/'
'2018-11-01/test_fuzzer/5b680a295e1f3a81160a0bd71ca2abbcb8d19521',
None),
('/data', 'gs://test-coverage-testcases/'
'2018-11-01/test_fuzzer/5b680a295e1f3a81160a0bd71ca2abbcb8d19521',
'(?!.*fuzz-)'),
('/data', 'gs://test-coverage-testcases/'
'2018-11-01/test_fuzzer/5b680a295e1f3a81160a0bd71ca2abbcb8d19521',
'(?!.*resource)')])
def test_empty_testcases_list(self):
"""Ensure that we do nothing when we have no testcases."""
coverage_uploader.upload_testcases_if_needed('test_fuzzer', [],
'/testcases', '/data')
self.assertEqual(self.mock.write_data.call_count, 0)
self.assertEqual(FakeGSUtilRunner.rsync_calls, [])
def METHOD_NAME(self):
"""Ensure that we cap number of uploaded testcases."""
files = ['/testcases/file%s' % i for i in range(20000)]
coverage_uploader.upload_testcases_if_needed('test_fuzzer', files,
'/testcases', '/data')
filtered_files_list = '\n'.join(
['file%s' % i for i in range(1000)]).encode('utf-8')
self.mock.write_data.assert_called_with(
filtered_files_list,
'gs://test-coverage-testcases/2018-11-01/test_fuzzer/'
'5b680a295e1f3a81160a0bd71ca2abbcb8d19521/file_list.txt')
self.assertEqual(
FakeGSUtilRunner.rsync_calls,
[('/testcases', 'gs://test-coverage-testcases/'
'2018-11-01/test_fuzzer/5b680a295e1f3a81160a0bd71ca2abbcb8d19521',
None)]) |
1,929 | update memory cost | import copy
import operator
from functools import reduce
from typing import List
from colossalai.auto_parallel.tensor_shard.node_handler.strategy.strategy_generator import FollowingStrategyGenerator
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
CommAction,
CommType,
MemoryCost,
ShardingStrategy,
TrainCycleItem,
)
from colossalai.auto_parallel.tensor_shard.utils import (
check_keep_sharding_status,
detect_reshape_mapping,
infer_output_dim_partition_dict,
)
from colossalai.tensor.shape_consistency import CollectiveCommPattern
__all__ = ['SoftmaxGenerator']
class SoftmaxGenerator(FollowingStrategyGenerator):
"""
SoftmaxGenerator is used to generate strategies for torch.nn.Softmax or F.softmax.
"""
def validate(self) -> bool:
return super().validate()
def update_compute_cost(self, strategy: ShardingStrategy):
'''
Compute the computation cost per device with this specific strategy.
'''
sharded_input_shape = strategy.sharding_specs[self.op_data['input']].get_sharded_shape_per_device()
sharded_output_shape = strategy.sharding_specs[self.op_data['output']].get_sharded_shape_per_device()
input_size_product = reduce(operator.mul, sharded_input_shape)
output_size_product = reduce(operator.mul, sharded_output_shape)
forward_compute_cost = output_size_product * 2
backward_compute_cost = input_size_product
total_compute_cost = forward_compute_cost + backward_compute_cost
compute_cost = TrainCycleItem(fwd=forward_compute_cost, bwd=backward_compute_cost, total=total_compute_cost)
strategy.compute_cost = compute_cost
def METHOD_NAME(self, strategy: ShardingStrategy):
'''
Compute the memory cost per device with this specific strategy.
'''
forward_size_mapping = {
'input': self._compute_size_in_bytes(strategy, "input"),
'output': self._compute_size_in_bytes(strategy, "output")
}
backward_size_mapping = copy.deepcopy(forward_size_mapping)
backward_size_mapping.pop("output")
# compute fwd cost incurred
# fwd_cost = input + output
fwd_activation_cost = sum([v for k, v in forward_size_mapping.items() if not self.is_param(k)])
fwd_parameter_cost = sum([v for k, v in forward_size_mapping.items() if self.is_param(k)])
fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost)
# compute bwd cost incurred
# bwd_cost = input_grad
bwd_activation_cost = sum([v for k, v in backward_size_mapping.items() if not self.is_param(k)])
bwd_parameter_cost = sum([v for k, v in backward_size_mapping.items() if self.is_param(k)])
bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=bwd_parameter_cost)
# compute total cost
total_mem_cost = MemoryCost(activation=fwd_activation_cost + bwd_activation_cost,
parameter=fwd_parameter_cost + bwd_parameter_cost)
memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost)
strategy.memory_cost = memory_cost
def collate_strategies(self) -> List[ShardingStrategy]:
strategy_list = []
for index, strategy in enumerate(self.predecessor_node.strategies_vector):
dim_partition_dict_mapping = {}
communication_action_mapping = {}
input_sharding_spec = strategy.output_sharding_specs[self.op_data["input"]]
dim_partition_dict_for_input = copy.deepcopy(input_sharding_spec.dim_partition_dict)
softmax_dim = self.op_data['softmax_dim'].data
if softmax_dim in dim_partition_dict_for_input:
recover_dims = dim_partition_dict_for_input.pop(softmax_dim)
dim_partition_dict_for_output = copy.deepcopy(dim_partition_dict_for_input)
dim_partition_dict_mapping = {
"input": dim_partition_dict_for_input,
"output": dim_partition_dict_for_output,
}
sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping)
# add index into name to pass the duplicated check
# we keep same strategies with different name for node merging, and it will not increase the searching space,
# because in solver, this node will be merged into other nodes, and solver will not create a new variable for this node.
name = f'{sharding_spec_mapping["input"].sharding_sequence} -> {sharding_spec_mapping["output"].sharding_sequence}_{index}'
strategy = self.get_sharding_strategy(name=name,
sharding_spec_mapping=sharding_spec_mapping,
communication_action_mapping=communication_action_mapping)
strategy_list.append(strategy)
return strategy_list |
1,930 | release read lock | import logging
import threading
log = logging.getLogger(__name__)
class LockError(Exception):
pass
class ReadWriteMutex(object):
"""A mutex which allows multiple readers, single writer.
:class:`.ReadWriteMutex` uses a Python ``threading.Condition``
to provide this functionality across threads within a process.
The Beaker package also contained a file-lock based version
of this concept, so that readers/writers could be synchronized
across processes with a common filesystem. A future Dogpile
release may include this additional class at some point.
"""
def __init__(self):
# counts how many asynchronous methods are executing
self.async_ = 0
# pointer to thread that is the current sync operation
self.current_sync_operation = None
# condition object to lock on
self.condition = threading.Condition(threading.Lock())
def acquire_read_lock(self, wait=True):
"""Acquire the 'read' lock."""
self.condition.acquire()
try:
# see if a synchronous operation is waiting to start
# or is already running, in which case we wait (or just
# give up and return)
if wait:
while self.current_sync_operation is not None:
self.condition.wait()
else:
if self.current_sync_operation is not None:
return False
self.async_ += 1
log.debug("%s acquired read lock", self)
finally:
self.condition.release()
if not wait:
return True
def METHOD_NAME(self):
"""Release the 'read' lock."""
self.condition.acquire()
try:
self.async_ -= 1
# check if we are the last asynchronous reader thread
# out the door.
if self.async_ == 0:
# yes. so if a sync operation is waiting, notify_all to wake
# it up
if self.current_sync_operation is not None:
self.condition.notify_all()
elif self.async_ < 0:
raise LockError(
"Synchronizer error - too many "
"release_read_locks called"
)
log.debug("%s released read lock", self)
finally:
self.condition.release()
def acquire_write_lock(self, wait=True):
"""Acquire the 'write' lock."""
self.condition.acquire()
try:
# here, we are not a synchronous reader, and after returning,
# assuming waiting or immediate availability, we will be.
if wait:
# if another sync is working, wait
while self.current_sync_operation is not None:
self.condition.wait()
else:
# if another sync is working,
# we dont want to wait, so forget it
if self.current_sync_operation is not None:
return False
# establish ourselves as the current sync
# this indicates to other read/write operations
# that they should wait until this is None again
self.current_sync_operation = threading.current_thread()
# now wait again for asyncs to finish
if self.async_ > 0:
if wait:
# wait
self.condition.wait()
else:
# we dont want to wait, so forget it
self.current_sync_operation = None
return False
log.debug("%s acquired write lock", self)
finally:
self.condition.release()
if not wait:
return True
def release_write_lock(self):
"""Release the 'write' lock."""
self.condition.acquire()
try:
if self.current_sync_operation is not threading.current_thread():
raise LockError(
"Synchronizer error - current thread doesn't "
"have the write lock"
)
# reset the current sync operation so
# another can get it
self.current_sync_operation = None
# tell everyone to get ready
self.condition.notify_all()
log.debug("%s released write lock", self)
finally:
# everyone go !!
self.condition.release() |
1,931 | expected protocol | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
""" Unit tests for understanding product URLs. """
import unittest
from codechecker_client.product import expand_whole_protocol_and_port, \
split_server_url, split_product_url
def METHOD_NAME(protocol=None, port=None):
protocol, _ = expand_whole_protocol_and_port(protocol, port)
return protocol
def expected_port(protocol=None, port=None):
_, port = expand_whole_protocol_and_port(protocol, port)
return port
class product_urlTest(unittest.TestCase):
"""
Testing the product and server URL splitter.
"""
def testFullURL(self):
"""
Whole product URL understanding.
"""
def test(host, port, name, protocol=None):
url = ''.join([protocol + "://" if protocol else "",
host, ":", str(port), "/", name])
sprotocol, shost, sport, sname = split_product_url(url)
self.assertEqual(sprotocol, METHOD_NAME(protocol, port))
self.assertEqual(shost, host)
self.assertEqual(sport, expected_port(protocol, port))
self.assertEqual(sname, name)
test("localhost", 8001, "Default")
test("[::1]", 8001, "Default")
test("[f000:baa5::f00d]", 8080, "Foo")
test("localhost", 8002, "MyProduct")
test("another.server", 80, "CodeChecker", "http")
test("very-secure.another.server", 443, "CodeChecker", "https")
test("contains-a-port-overri.de", 1234, "PRD", "https")
def testProductName(self):
"""
Understanding only a product name specified.
"""
def test(name, protocol=None):
url = ''.join([protocol + "://" if protocol else "", name])
sprotocol, shost, sport, sname = split_product_url(url)
self.assertEqual(sprotocol, METHOD_NAME(protocol, None))
self.assertEqual(shost, "localhost")
self.assertEqual(sport, expected_port(protocol, None))
self.assertEqual(sname, name)
test("Default")
test("Default", "http")
test("MyProduct")
test("Enterprise-Product", "https")
def testHostAndProductName(self):
"""
Understanding a host and a product name specified.
"""
def test(host, name, protocol=None):
url = ''.join([protocol + "://" if protocol else "",
host, "/", name])
sprotocol, shost, sport, sname = split_product_url(url)
self.assertEqual(sprotocol, METHOD_NAME(protocol, None))
self.assertEqual(shost, host)
self.assertEqual(sport, expected_port(protocol, None))
self.assertEqual(sname, name)
test("localhost", "Default")
test("[::1]", "Default")
test("[f000:baa5::f00d]", "Default")
test("otherhost", "awesome123", "http")
# 8080/MyProduct as if 8080 was a host name.
test("8080", "MyProduct")
test("super", "verygood", "https")
def testBadProductNames(self):
"""
Parser throws on bad product URLs?
"""
with self.assertRaises(ValueError):
split_product_url("123notaproductname")
with self.assertRaises(ValueError):
split_product_url("localhost//containsExtraChar")
with self.assertRaises(ValueError):
split_product_url("in:valid:format/product")
with self.assertRaises(ValueError):
split_product_url("localhost:12PortIsANumber34/Foo")
with self.assertRaises(ValueError):
split_product_url("codechecker://codecheker.com/Baz")
with self.assertRaises(ValueError):
# Make sure we don't understand "https://foobar.baz:1234" as
# HTTPS, localhost, 443, and "foobar.baz:1234" product name.
split_product_url("https://foobar.bar:1234")
with self.assertRaises(ValueError):
split_product_url("http://::1:8080/Default")
def testFullServerURL(self):
"""
Whole server URL understanding.
"""
def test(host, port, protocol=None):
url = ''.join([protocol + "://" if protocol else "",
host, ":", str(port)])
sprotocol, shost, sport = split_server_url(url)
self.assertEqual(sprotocol, METHOD_NAME(protocol, port))
self.assertEqual(shost, host)
self.assertEqual(sport, expected_port(protocol, port))
test("localhost", 8001)
test("localhost", 8002)
test("1hostname.can.begin.with.digits", 9999)
test("another.server", 80, "http")
test("very-secure.another.server", 443, 'https')
sprotocol, shost, sport = \
split_server_url('https://someserver:1234/Product')
self.assertEqual(sprotocol, 'https')
self.assertEqual(shost, 'someserver')
self.assertEqual(sport, 1234)
sprotocol, shost, sport = \
split_server_url('http://[::1]:1234/Product')
self.assertEqual(sprotocol, 'http')
self.assertEqual(shost, '[::1]')
self.assertEqual(sport, 1234)
def testHostname(self):
"""
Understanding only a hostname specified for server URLs.
"""
def test(host, protocol=None):
url = ''.join([protocol + "://" if protocol else "", host])
sprotocol, shost, sport = split_server_url(url)
self.assertEqual(sprotocol, METHOD_NAME(protocol, None))
self.assertEqual(shost, host)
self.assertEqual(sport, expected_port(protocol, None))
test("codechecker") # Port: 8001
test("[::1]") # Port: 8001
test("codechecker", "http") # Port: 80
test("codechecker.local") # Port: 8001
test("www.example.org", "https") # Port: 443
def testBadServerURLs(self):
"""
Parser throws on bad server URLs?
"""
with self.assertRaises(ValueError):
split_server_url("in:valid:format")
with self.assertRaises(ValueError):
split_server_url("localhost:12PortIsANumber34")
with self.assertRaises(ValueError):
split_server_url("whatever://whatev.er")
with self.assertRaises(ValueError):
split_server_url("http://::1:8081") |
1,932 | pulsar from opencv projection | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Tuple
import torch
from ..transforms import matrix_to_rotation_6d
from .cameras import PerspectiveCameras
LOGGER = logging.getLogger(__name__)
def _cameras_from_opencv_projection(
R: torch.Tensor,
tvec: torch.Tensor,
camera_matrix: torch.Tensor,
image_size: torch.Tensor,
) -> PerspectiveCameras:
focal_length = torch.stack([camera_matrix[:, 0, 0], camera_matrix[:, 1, 1]], dim=-1)
principal_point = camera_matrix[:, :2, 2]
# Retype the image_size correctly and flip to width, height.
image_size_wh = image_size.to(R).flip(dims=(1,))
# Screen to NDC conversion:
# For non square images, we scale the points such that smallest side
# has range [-1, 1] and the largest side has range [-u, u], with u > 1.
# This convention is consistent with the PyTorch3D renderer, as well as
# the transformation function `get_ndc_to_screen_transform`.
scale = image_size_wh.to(R).min(dim=1, keepdim=True)[0] / 2.0
scale = scale.expand(-1, 2)
c0 = image_size_wh / 2.0
# Get the PyTorch3D focal length and principal point.
focal_pytorch3d = focal_length / scale
p0_pytorch3d = -(principal_point - c0) / scale
# For R, T we flip x, y axes (opencv screen space has an opposite
# orientation of screen axes).
# We also transpose R (opencv multiplies points from the opposite=left side).
R_pytorch3d = R.clone().permute(0, 2, 1)
T_pytorch3d = tvec.clone()
R_pytorch3d[:, :, :2] *= -1
T_pytorch3d[:, :2] *= -1
return PerspectiveCameras(
R=R_pytorch3d,
T=T_pytorch3d,
focal_length=focal_pytorch3d,
principal_point=p0_pytorch3d,
image_size=image_size,
device=R.device,
)
def _opencv_from_cameras_projection(
cameras: PerspectiveCameras,
image_size: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
R_pytorch3d = cameras.R.clone()
T_pytorch3d = cameras.T.clone()
focal_pytorch3d = cameras.focal_length
p0_pytorch3d = cameras.principal_point
T_pytorch3d[:, :2] *= -1
R_pytorch3d[:, :, :2] *= -1
tvec = T_pytorch3d
R = R_pytorch3d.permute(0, 2, 1)
# Retype the image_size correctly and flip to width, height.
image_size_wh = image_size.to(R).flip(dims=(1,))
# NDC to screen conversion.
scale = image_size_wh.to(R).min(dim=1, keepdim=True)[0] / 2.0
scale = scale.expand(-1, 2)
c0 = image_size_wh / 2.0
principal_point = -p0_pytorch3d * scale + c0
focal_length = focal_pytorch3d * scale
camera_matrix = torch.zeros_like(R)
camera_matrix[:, :2, 2] = principal_point
camera_matrix[:, 2, 2] = 1.0
camera_matrix[:, 0, 0] = focal_length[:, 0]
camera_matrix[:, 1, 1] = focal_length[:, 1]
return R, tvec, camera_matrix
def METHOD_NAME(
R: torch.Tensor,
tvec: torch.Tensor,
camera_matrix: torch.Tensor,
image_size: torch.Tensor,
znear: float = 0.1,
) -> torch.Tensor:
assert len(camera_matrix.size()) == 3, "This function requires batched inputs!"
assert len(R.size()) == 3, "This function requires batched inputs!"
assert len(tvec.size()) in (2, 3), "This function reuqires batched inputs!"
# Validate parameters.
image_size_wh = image_size.to(R).flip(dims=(1,))
assert torch.all(
image_size_wh > 0
), "height and width must be positive but min is: %s" % (
str(image_size_wh.min().item())
)
assert (
camera_matrix.size(1) == 3 and camera_matrix.size(2) == 3
), "Incorrect camera matrix shape: expected 3x3 but got %dx%d" % (
camera_matrix.size(1),
camera_matrix.size(2),
)
assert (
R.size(1) == 3 and R.size(2) == 3
), "Incorrect R shape: expected 3x3 but got %dx%d" % (
R.size(1),
R.size(2),
)
if len(tvec.size()) == 2:
tvec = tvec.unsqueeze(2)
assert (
tvec.size(1) == 3 and tvec.size(2) == 1
), "Incorrect tvec shape: expected 3x1 but got %dx%d" % (
tvec.size(1),
tvec.size(2),
)
# Check batch size.
batch_size = camera_matrix.size(0)
assert R.size(0) == batch_size, "Expected R to have batch size %d. Has size %d." % (
batch_size,
R.size(0),
)
assert (
tvec.size(0) == batch_size
), "Expected tvec to have batch size %d. Has size %d." % (
batch_size,
tvec.size(0),
)
# Check image sizes.
image_w = image_size_wh[0, 0]
image_h = image_size_wh[0, 1]
assert torch.all(
image_size_wh[:, 0] == image_w
), "All images in a batch must have the same width!"
assert torch.all(
image_size_wh[:, 1] == image_h
), "All images in a batch must have the same height!"
# Focal length.
fx = camera_matrix[:, 0, 0].unsqueeze(1)
fy = camera_matrix[:, 1, 1].unsqueeze(1)
# Check that we introduce less than 1% error by averaging the focal lengths.
fx_y = fx / fy
if torch.any(fx_y > 1.01) or torch.any(fx_y < 0.99):
LOGGER.warning(
"Pulsar only supports a single focal lengths. For converting OpenCV "
"focal lengths, we average them for x and y directions. "
"The focal lengths for x and y you provided differ by more than 1%, "
"which means this could introduce a noticeable error."
)
f = (fx + fy) / 2
# Normalize f into normalized device coordinates.
focal_length_px = f / image_w
# Transfer into focal_length and sensor_width.
focal_length = torch.tensor([znear - 1e-5], dtype=torch.float32, device=R.device)
focal_length = focal_length[None, :].repeat(batch_size, 1)
sensor_width = focal_length / focal_length_px
# Principal point.
cx = camera_matrix[:, 0, 2].unsqueeze(1)
cy = camera_matrix[:, 1, 2].unsqueeze(1)
# Transfer principal point offset into centered offset.
cx = -(cx - image_w / 2)
cy = cy - image_h / 2
# Concatenate to final vector.
param = torch.cat([focal_length, sensor_width, cx, cy], dim=1)
R_trans = R.permute(0, 2, 1)
cam_pos = -torch.bmm(R_trans, tvec).squeeze(2)
cam_rot = matrix_to_rotation_6d(R_trans)
cam_params = torch.cat([cam_pos, cam_rot, param], dim=1)
return cam_params
def _pulsar_from_cameras_projection(
cameras: PerspectiveCameras,
image_size: torch.Tensor,
) -> torch.Tensor:
opencv_R, opencv_T, opencv_K = _opencv_from_cameras_projection(cameras, image_size)
return METHOD_NAME(opencv_R, opencv_T, opencv_K, image_size) |
1,933 | draw buttons | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
# from mathutils import Vector
from bpy.props import EnumProperty, BoolProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, repeat_last_for_length
from sverchok.data_structure import match_long_repeat as mlrepeat
from sverchok.utils.sv_bmesh_utils import bmesh_from_pydata
from sverchok.utils.nodes_mixins.sockets_config import ModifierNode
def flip_from_mask(mask, geom, reverse):
"""
this mode expects a mask list with an element corresponding to each polygon
"""
verts, edges, faces = geom
mask_matched = repeat_last_for_length(mask, len(faces))
b_faces = []
for m, face in zip(mask_matched, faces):
mask_val = bool(m) if not reverse else not bool(m)
b_faces.append(face if mask_val else face[::-1])
return verts, edges, b_faces
def flip_to_match_1st(geom, reverse):
"""
this mode expects all faces to be coplanar, else you need to manually generate a flip mask.
"""
verts, edges, faces = geom
b_faces = []
bm = bmesh_from_pydata(verts, faces=faces, normal_update=True)
bm.faces.ensure_lookup_table()
Direction = bm.faces[0].normal
for face in bm.faces:
close = (face.normal - Direction).length < 0.004
flip = close if not reverse else not close
poly = [i.index for i in face.verts]
b_faces.append(poly if flip else poly[::-1])
bm.free()
return verts, edges, b_faces
class SvFlipNormalsNode(ModifierNode, SverchCustomTreeNode, bpy.types.Node):
''' Flip face normals '''
bl_idname = 'SvFlipNormalsNode'
bl_label = 'Flip Normals'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_FLIP_NORMALS'
mode_options = [(mode, mode, '', idx) for idx, mode in enumerate(['mask', 'match'])]
selected_mode: EnumProperty(
items=mode_options, description="offers flip options", default="match", update=updateNode)
reverse: BoolProperty(update=updateNode)
def sv_init(self, context):
self.inputs.new('SvVerticesSocket', "Vertices")
self.inputs.new('SvStringsSocket', 'Edges')
self.inputs.new('SvStringsSocket', 'Polygons')
self.inputs.new('SvStringsSocket', 'Mask')
self.outputs.new('SvVerticesSocket', 'Vertices')
self.outputs.new('SvStringsSocket', 'Edges')
self.outputs.new('SvStringsSocket', 'Polygons')
def METHOD_NAME(self, context, layout):
r = layout.row(align=True)
r1 = r.split(factor=0.35)
r1.prop(self, 'reverse', text='reverse', toggle=True)
r2 = r1.split().row()
r2.prop(self, "selected_mode", expand=True)
def process(self):
if not any(self.outputs[idx].is_linked for idx in range(3)):
return
vertices_s = self.inputs['Vertices'].sv_get(default=[[]], deepcopy=False)
edges_s = self.inputs['Edges'].sv_get(default=[[]], deepcopy=False)
faces_s = self.inputs['Polygons'].sv_get(default=[[]], deepcopy=False)
geom = [[], [], []]
if self.selected_mode == 'mask':
mask_s = self.inputs['Mask'].sv_get(default=[[True]], deepcopy=False)
for *single_geom, mask in zip(*mlrepeat([vertices_s, edges_s, faces_s, mask_s])):
for idx, d in enumerate(flip_from_mask(mask, single_geom, self.reverse)):
geom[idx].append(d)
elif self.selected_mode == 'match':
for single_geom in zip(*mlrepeat([vertices_s, edges_s, faces_s])):
for idx, d in enumerate(flip_to_match_1st(single_geom, self.reverse)):
geom[idx].append(d)
self.set_output(geom)
def set_output(self, geom):
_ = [self.outputs[idx].sv_set(data) for idx, data in enumerate(geom)]
def register():
bpy.utils.register_class(SvFlipNormalsNode)
def unregister():
bpy.utils.unregister_class(SvFlipNormalsNode) |
1,934 | build list request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2020-06-02-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/microsoft.insights/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.applicationinsights.v2020_06_02_preview.ApplicationInsightsManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.OperationLive"]:
"""List available operations.
List the available operations supported by the resource provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationLive or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.applicationinsights.v2020_06_02_preview.models.OperationLive]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2020-06-02-preview"))
cls: ClsType[_models.OperationsListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = METHOD_NAME(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/microsoft.insights/operations"} |
1,935 | type | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
#
# This module contains all the classes representing the Resources objects.
# These objects are obtained from the configuration file through a conversion based on the Schema classes.
#
import asyncio
import functools
from abc import ABC, abstractmethod
from enum import Enum
from typing import List
ASYNC_TIMED_VALIDATORS_DEFAULT_TIMEOUT_SEC = 10
class FailureLevel(Enum):
"""Validation failure level."""
INFO = 20
WARNING = 30
ERROR = 40
def __str__(self):
return str(self.name)
class ValidationResult:
"""Represent the result of the validation."""
def __init__(self, message: str, level: FailureLevel, validator_type: str):
self.message = message
self.level = level
self.validator_type = validator_type
def __repr__(self):
return f"ValidationResult(level={self.level}, message={self.message})"
class Validator(ABC):
"""Abstract validator. The children must implement the _validate method."""
def __init__(self):
self._failures = []
def _add_failure(self, message: str, level: FailureLevel):
result = ValidationResult(message, level, self.METHOD_NAME)
self._failures.append(result)
@property
def METHOD_NAME(self):
"""Identify the type of validator."""
return self.__class__.__name__
def execute(self, *arg, **kwargs) -> List[ValidationResult]:
"""Entry point of all validators to verify all input params are valid."""
self._validate(*arg, **kwargs)
return self._failures
@abstractmethod
def _validate(self, *args, **kwargs):
"""
Must be implemented with specific validation logic.
Use _add_failure to add failures to the list of failures returned by execute.
"""
pass
class AsyncValidator(Validator):
"""Abstract validator that supports *also* async execution. Children must implement the _validate_async method."""
def __init__(self):
super().__init__()
def _validate(self, *arg, **kwargs):
asyncio.get_event_loop().run_until_complete(self._validate_async(*arg, **kwargs))
return self._failures
async def execute_async(self, *arg, **kwargs) -> List[ValidationResult]:
"""Entry point of all async validators to verify all input params are valid."""
await self._validate_async(*arg, **kwargs)
return self._failures
@abstractmethod
async def _validate_async(self, *args, **kwargs):
"""
Must be implemented with specific validation logic.
Use _add_failure to add failures to the list of failures returned by execute or execute_async when awaited.
"""
pass
def get_async_timed_validator_type_for(validator_type: METHOD_NAME) -> AsyncValidator:
"""
Return the type decorating the given validator with timeout support.
The enriched _validate_async will accept an additional timeout parameter.
If not provided will default to ASYNC_TIMED_VALIDATORS_DEFAULT_TIMEOUT_SEC.
Since validators async execution is coroutine based with preemptive multitasking,
the effective time to fail the validator for timeout may exceed the requested one.
"""
class_name = f"AsyncTimed{validator_type.__name__}"
if class_name not in globals():
class_bases = validator_type.__bases__
class_dict = dict(validator_type.__dict__)
def _async_timed_validate(original_method):
@functools.wraps(original_method)
async def _validate_async(self: AsyncValidator, *args, **kwargs):
timeout = kwargs.pop("timeout", ASYNC_TIMED_VALIDATORS_DEFAULT_TIMEOUT_SEC)
try:
await asyncio.wait_for(original_method(self, *args, **kwargs), timeout=timeout)
except asyncio.TimeoutError:
self._add_failure( # pylint: disable=protected-access
f"Validation of ({kwargs}) timed out after {timeout} seconds.", FailureLevel.WARNING
)
return _validate_async
class_dict["_validate_async"] = _async_timed_validate(class_dict["_validate_async"])
schema_class_type = METHOD_NAME(class_name, class_bases, class_dict)
globals()[class_name] = schema_class_type
else:
schema_class_type = globals()[class_name]
return schema_class_type
class ValidatorContext:
"""Context containing information about cluster environment meant to be passed to validators."""
def __init__(self, head_node_instance_id: str = None, during_update: bool = None):
self.head_node_instance_id = head_node_instance_id
self.during_update = during_update |
1,936 | post | from pathlib import PurePath
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.http.response import HttpResponseRedirect
from drf_spectacular.utils import extend_schema, extend_schema_view
from qfieldcloud.core import exceptions, permissions_utils, serializers, utils
from qfieldcloud.core.models import PackageJob, Project
from qfieldcloud.core.permissions_utils import check_supported_regarding_owner_account
from rest_framework import permissions, views
from rest_framework.response import Response
class PackageViewPermissions(permissions.BasePermission):
def has_permission(self, request, view):
projectid = permissions_utils.get_param_from_request(request, "projectid")
try:
project = Project.objects.get(id=projectid)
except ObjectDoesNotExist:
return False
user = request.user
return permissions_utils.can_read_files(user, project)
@extend_schema(
deprecated=True,
summary="This endpoint is deprecated and will be removed in the future. Please use `/jobs/` endpoint instead.",
)
@extend_schema_view(
METHOD_NAME=extend_schema(description="Launch QField packaging project"),
get=extend_schema(description="Get QField packaging status"),
)
class PackageView(views.APIView):
permission_classes = [permissions.IsAuthenticated, PackageViewPermissions]
def METHOD_NAME(self, request, projectid):
project_obj = Project.objects.get(id=projectid)
check_supported_regarding_owner_account(project_obj)
if not project_obj.project_filename:
raise exceptions.NoQGISProjectError()
# Check if active packaging job already exists
# TODO: !!!!!!!!!!!! cache results for some minutes
query = Q(project=project_obj) & (
Q(status=PackageJob.Status.PENDING)
| Q(status=PackageJob.Status.QUEUED)
| Q(status=PackageJob.Status.STARTED)
)
# NOTE uncomment to enforce job creation
# PackageJob.objects.filter(query).delete()
if not project_obj.needs_repackaging:
export_job = (
PackageJob.objects.filter(status=PackageJob.Status.FINISHED)
.filter(project=project_obj)
.latest("started_at")
)
if export_job:
serializer = serializers.ExportJobSerializer(export_job)
return Response(serializer.data)
if PackageJob.objects.filter(query).exists():
serializer = serializers.ExportJobSerializer(PackageJob.objects.get(query))
return Response(serializer.data)
export_job = PackageJob.objects.create(
project=project_obj, created_by=self.request.user
)
# TODO: check if user is allowed otherwise ERROR 403
serializer = serializers.ExportJobSerializer(export_job)
return Response(serializer.data)
def get(self, request, projectid):
project_obj = Project.objects.get(id=projectid)
export_job = (
PackageJob.objects.filter(project=project_obj).order_by("updated_at").last()
)
serializer = serializers.ExportJobSerializer(export_job)
return Response(serializer.data)
@extend_schema(
deprecated=True,
summary="This endpoint is deprecated and will be removed in the future. Please use `/packages/{project_id}/latest/` endpoint instead.",
)
@extend_schema_view(
get=extend_schema(description="List QField project files"),
)
class ListFilesView(views.APIView):
permission_classes = [permissions.IsAuthenticated, PackageViewPermissions]
def get(self, request, projectid):
project_obj = Project.objects.get(id=projectid)
# Check if the project was exported at least once
if not PackageJob.objects.filter(
project=project_obj, status=PackageJob.Status.FINISHED
):
raise exceptions.InvalidJobError(
"Project files have not been exported for the provided project id"
)
package_job = project_obj.last_package_job
assert package_job
# Obtain the bucket object
bucket = utils.get_s3_bucket()
export_prefix = f"projects/{projectid}/packages/{package_job.id}/"
files = []
for obj in bucket.objects.filter(Prefix=export_prefix):
path = PurePath(obj.key)
# We cannot be sure of the metadata's first letter case
# https://github.com/boto/boto3/issues/1709
metadata = obj.Object().metadata
if "sha256sum" in metadata:
sha256sum = metadata["sha256sum"]
else:
sha256sum = metadata["Sha256sum"]
files.append(
{
# Get the path of the file relative to the export directory
"name": str(path.relative_to(*path.parts[:4])),
"size": obj.size,
"sha256": sha256sum,
}
)
if package_job.feedback.get("feedback_version") == "2.0":
layers = package_job.feedback["outputs"]["qgis_layers_data"]["layers_by_id"]
for data in layers.values():
data["valid"] = data["is_valid"]
data["status"] = data["error_code"]
else:
steps = package_job.feedback.get("steps", [])
layers = (
steps[1]["outputs"]["layer_checks"]
if len(steps) > 2 and steps[1].get("stage", 1) == 2
else None
)
return Response(
{
"files": files,
"layers": layers,
"exported_at": package_job.updated_at,
"export_id": package_job.pk,
}
)
@extend_schema(
deprecated=True,
summary="This endpoint is deprecated and will be removed in the future. Please use `/packages/{project_id}/latest/files/{filename}/` endpoint instead.",
)
@extend_schema_view(
get=extend_schema(description="Download file for QField"),
)
class DownloadFileView(views.APIView):
permission_classes = [permissions.IsAuthenticated, PackageViewPermissions]
def get(self, request, projectid, filename):
project_obj = Project.objects.get(id=projectid)
package_job = project_obj.last_package_job
# Check if the project was exported at least once
if not package_job:
raise exceptions.InvalidJobError(
"Project files have not been exported for the provided project id"
)
filekey = utils.safe_join(
f"projects/{projectid}/packages/{package_job.id}/", filename
)
url = utils.get_s3_client().generate_presigned_url(
"get_object",
Params={
"Key": filekey,
"Bucket": utils.get_s3_bucket().name,
"ResponseContentType": "application/force-download",
"ResponseContentDisposition": f'attachment;filename="{filename}"',
},
ExpiresIn=60,
HttpMethod="GET",
)
return HttpResponseRedirect(url) |
1,937 | delete cols | import wx
import wx.grid
from cellprofiler_core.preferences import get_error_color
from cellprofiler_core.setting import Table
class TableController(wx.grid.GridTableBase):
DEFAULT_ATTR = wx.grid.GridCellAttr()
ERROR_ATTR = wx.grid.GridCellAttr()
ERROR_ATTR.SetTextColour(get_error_color())
def __init__(self, v):
super(self.__class__, self).__init__()
assert isinstance(v, Table)
self.v = v
self.column_size = [v.max_field_size] * len(v.column_names)
def bind_to_grid(self, grid):
"""Bind to intercept events on the grid
Binds on_mouse_motion and on_column_resize in order to do tooltips.
Sets up editing / auto size and other to customize for table type.
"""
self.grid = grid
grid.AutoSize()
grid.EnableEditing(False)
grid.SetDefaultCellOverflow(False)
if self.v.corner_button is None:
grid.fn_clicked = None
else:
fn_clicked = self.v.corner_button["fn_clicked"]
def on_corner_button_clicked(e):
fn_clicked(e)
self.update_grid()
grid.ForceRefresh()
grid.Parent.Layout()
grid.fn_clicked = on_corner_button_clicked
grid.label = self.v.corner_button.get("label", "Update")
grid.tooltip = self.v.corner_button.get("tooltip", "")
#
# Below largely taken from
# http://wiki.wxpython.org/wxGrid%20ToolTips
#
self.last_pos = (None, None)
grid.GetGridWindow().Bind(wx.EVT_MOTION, self.on_mouse_motion)
grid.Bind(wx.grid.EVT_GRID_COL_SIZE, self.on_column_resize)
def update_grid(self):
"""Update the grid after the table data has changed"""
need_column_layout = False
grid = self.grid
v = self.v
if len(v.column_names) < grid.GetNumberCols():
tm = wx.grid.GridTableMessage(
grid.Table,
wx.grid.GRIDTABLE_NOTIFY_COLS_DELETED,
0,
grid.GetNumberCols() - len(v.column_names),
)
grid.ProcessTableMessage(tm)
need_column_layout = True
elif grid.GetNumberCols() < len(v.column_names):
tm = wx.grid.GridTableMessage(
grid.Table,
wx.grid.GRIDTABLE_NOTIFY_COLS_INSERTED,
0,
len(v.column_names) - grid.GetNumberCols(),
)
grid.ProcessTableMessage(tm)
need_column_layout = True
if len(v.data) < grid.GetNumberRows():
tm = wx.grid.GridTableMessage(
grid.Table,
wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED,
0,
grid.GetNumberRows() - len(v.data),
)
grid.ProcessTableMessage(tm)
elif grid.GetNumberRows() < len(v.data):
tm = wx.grid.GridTableMessage(
grid.Table,
wx.grid.GRIDTABLE_NOTIFY_ROWS_INSERTED,
0,
len(v.data) - grid.GetNumberRows(),
)
grid.ProcessTableMessage(tm)
if need_column_layout:
grid.AutoSizeColumns()
def on_mouse_motion(self, event):
x, y = self.grid.CalcUnscrolledPosition(event.GetPosition())
row = self.grid.YToRow(y)
col = self.grid.XToCol(x)
this_pos = (row, col)
if (
this_pos != self.last_pos
and row >= 0
and col >= 0
and row < len(self.v.data)
and col < len(self.v.data[row])
):
self.last_pos = this_pos
s = self.v.data[row][col]
if s is None:
s = ""
elif not isinstance(s, str):
s = str(s)
self.grid.GetGridWindow().SetToolTip(s)
event.Skip()
def on_column_resize(self, event):
grid = self.grid
col = event.GetRowOrCol()
width = grid.GetColSize(col)
table = grid.GetTable()
self.column_size[col] = int(width * 1.1) / grid.CharWidth
tm = wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)
grid.ProcessTableMessage(tm)
grid.ForceRefresh()
def GetAttr(self, row, col, kind):
attrs = self.v.get_cell_attributes(row, self.v.column_names[col])
attr = self.DEFAULT_ATTR
if attrs is not None and self.v.ATTR_ERROR in attrs:
attr = self.ERROR_ATTR
attr.IncRef() # OH so bogus, don't refcount = bus error
return attr
def CanHaveAttributes(self):
return True
def GetNumberRows(self):
return len(self.v.data)
def GetNumberCols(self):
return len(self.v.column_names)
def IsEmptyCell(self, row, col):
return (
len(self.v.data) <= row
or len(self.v.data[row]) <= col
or self.v.data[row][col] is None
)
def GetValue(self, row, col):
if self.IsEmptyCell(row, col):
return None
s = str(self.v.data[row][col])
if len(self.column_size) <= col:
self.column_size += [self.v.max_field_size] * (
col - len(self.column_size) + 1
)
field_size = self.column_size[col]
if len(s) > field_size:
half = int(field_size - 3) // 2
s = s[:half] + "..." + s[-half:]
return s
def GetRowLabelValue(self, row):
attrs = self.v.get_row_attributes(row)
if attrs is not None and self.v.ATTR_ERROR in attrs:
return "%d: Error" % (row + 1)
return str(row + 1)
def GetColLabelValue(self, col):
return self.v.column_names[col]
def AppendCols(self, numCols=1):
return True
def AppendRows(self, numRows=1, updateLabels=True):
return True
def InsertCols(self, pos=0, numCols=1, updateLabels=True):
return True
def InsertRows(self, pos=0, numRows=1, updateLabels=True):
return True
def METHOD_NAME(self, pos=0, numCols=1, updateLabels=True):
return True
def DeleteRows(self, pos=0, numRows=1, updateLabels=True):
return True |
1,938 | util neg inf | #
# @file TestXMLAttributes.py
# @brief TestXMLAttributes unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/xml/test/TestXMLAttributes.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
def util_NaN():
z = 1e300
z = z * z
return z - z
def util_PosInf():
z = 1e300
z = z * z
return z
def METHOD_NAME():
z = 1e300
z = z * z
return -z
def wrapString(s):
return s
pass
class TestXMLAttributes(unittest.TestCase):
def test_XMLAttributes_add_get(self):
attrs = libsbml.XMLAttributes()
self.assertTrue( attrs.getLength() == 0 )
self.assertEqual( True, attrs.isEmpty() )
attrs.add("xmlns", "http://foo.org/")
self.assertTrue( attrs.getLength() == 1 )
self.assertTrue( attrs.isEmpty() == False )
attrs.add("foo", "bar")
self.assertTrue( attrs.getLength() == 2 )
self.assertTrue( attrs.isEmpty() == False )
self.assertTrue( attrs.getIndex("xmlns") == 0 )
self.assertTrue( attrs.getIndex("foo" ) == 1 )
self.assertTrue( attrs.getIndex("bar" ) == -1 )
self.assertTrue( attrs.getValue("xmlns") == "http://foo.org/" )
self.assertTrue( attrs.getValue("foo" ) == "bar" )
self.assertTrue( attrs.getValue("bar" ) == "" )
self.assertTrue( attrs.getName(0) == "xmlns" )
self.assertTrue( attrs.getName(1) == "foo" )
self.assertTrue( attrs.getName(2) == "" )
pass
def test_XMLAttributes_assignment(self):
att1 = libsbml.XMLAttributes()
att1.add("xmlns", "http://foo.org/")
self.assertTrue( att1.getLength() == 1 )
self.assertTrue( att1.isEmpty() == False )
self.assertTrue( att1.getIndex("xmlns") == 0 )
self.assertTrue( att1.getName(0) == "xmlns" )
self.assertTrue( att1.getValue("xmlns") == "http://foo.org/" )
att2 = libsbml.XMLAttributes()
att2 = att1
self.assertTrue( att2.getLength() == 1 )
self.assertTrue( att2.isEmpty() == False )
self.assertTrue( att2.getIndex("xmlns") == 0 )
self.assertTrue( att2.getName(0) == "xmlns" )
self.assertTrue( att2.getValue("xmlns") == "http://foo.org/" )
att2 = None
att1 = None
pass
def test_XMLAttributes_clone(self):
att1 = libsbml.XMLAttributes()
att1.add("xmlns", "http://foo.org/")
self.assertTrue( att1.getLength() == 1 )
self.assertTrue( att1.isEmpty() == False )
self.assertTrue( att1.getIndex("xmlns") == 0 )
self.assertTrue( att1.getName(0) == "xmlns" )
self.assertTrue( att1.getValue("xmlns") == "http://foo.org/" )
att2 = att1.clone()
self.assertTrue( att2.getLength() == 1 )
self.assertTrue( att2.isEmpty() == False )
self.assertTrue( att2.getIndex("xmlns") == 0 )
self.assertTrue( att2.getName(0) == "xmlns" )
self.assertTrue( att2.getValue("xmlns") == "http://foo.org/" )
att2 = None
att1 = None
pass
def test_XMLAttributes_copy(self):
att1 = libsbml.XMLAttributes()
att1.add("xmlns", "http://foo.org/")
self.assertTrue( att1.getLength() == 1 )
self.assertTrue( att1.isEmpty() == False )
self.assertTrue( att1.getIndex("xmlns") == 0 )
self.assertTrue( att1.getName(0) == "xmlns" )
self.assertTrue( att1.getValue("xmlns") == "http://foo.org/" )
att2 = libsbml.XMLAttributes(att1)
self.assertTrue( att2.getLength() == 1 )
self.assertTrue( att2.isEmpty() == False )
self.assertTrue( att2.getIndex("xmlns") == 0 )
self.assertTrue( att2.getName(0) == "xmlns" )
self.assertTrue( att2.getValue("xmlns") == "http://foo.org/" )
att2 = None
att1 = None
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestXMLAttributes))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
1,939 | definition | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2022 ScyllaDB
# pylint: disable=redefined-outer-name
import uuid
from datetime import datetime, timezone
import pytest
from sdcm.keystore import KeyStore
from sdcm.provision.provisioner import (
InstanceDefinition,
provisioner_factory,
ProvisionerError,
)
from sdcm.provision.user_data import UserDataObject
class PrintingTestUserDataObject(UserDataObject):
@property
def script_to_run(self) -> str:
return """echo OK
echo another command"""
# fixture defines which provisioners to test
@pytest.fixture(scope="module", params=["azure", "fake"])
def backend(request):
return request.param
@pytest.fixture(scope="module")
def image_id(backend):
if backend == "azure":
return "OpenLogic:CentOS:7_9:latest"
return "some-image-id"
@pytest.fixture(scope="module")
def image_type(backend):
if backend == "azure":
return "Standard_D2s_v3"
return "test-image-type"
@pytest.fixture(scope='module')
def test_id():
return f"unit-test-{str(uuid.uuid4())}"
@pytest.fixture(scope='module')
def region(backend):
if backend == "azure":
return "eastus"
return "some-region"
@pytest.fixture(scope='module')
def METHOD_NAME(image_id, image_type):
return InstanceDefinition(
name="test-vm-1",
image_id=image_id,
type=image_type,
user_name="tester",
ssh_key=KeyStore().get_ec2_ssh_key_pair(),
tags={'test-tag': 'test_value'},
user_data=[PrintingTestUserDataObject()]
)
@pytest.fixture(scope='module')
def provisioner_params(test_id, region, azure_service):
return {"test_id": test_id, "region": region, "availability_zone": "a", "azure_service": azure_service}
@pytest.fixture(scope="function")
def provisioner(backend, provisioner_params):
return provisioner_factory.create_provisioner(backend, **provisioner_params)
def test_can_provision_scylla_vm(region, METHOD_NAME, provisioner, backend, provisioner_params):
creation_time = datetime.utcnow().replace(microsecond=0).replace(tzinfo=timezone.utc)
v_m = provisioner.get_or_create_instances(definitions=[METHOD_NAME])[0]
assert v_m.name == METHOD_NAME.name
assert v_m.region == region
assert v_m.user_name == METHOD_NAME.user_name
assert v_m.public_ip_address
assert v_m.private_ip_address
assert v_m.tags == METHOD_NAME.tags
assert v_m.creation_time >= creation_time
assert v_m == provisioner.list_instances()[0]
v_m_2 = provisioner.get_or_create_instance(METHOD_NAME)
assert v_m is v_m_2, 'provisioner should not not recreate vm with the same name'
provisioner = provisioner_factory.create_provisioner(backend=backend, **provisioner_params)
v_m._provisioner = provisioner # pylint: disable=protected-access
assert v_m == provisioner.list_instances(
)[0], 'provisioner with the same params should rediscover created resources'
def test_can_discover_regions(test_id, region, backend, provisioner_params):
provisioner = provisioner_factory.discover_provisioners(backend=backend, **provisioner_params)[0]
assert provisioner.region == region
assert provisioner.test_id == test_id
def test_discover_provisioners_wrong_backend(provisioner_params):
wrong_backend = "absent-name-in-backend-mapping"
match_err_msg = "Provisioner class was not registered for the '%s' backend" % wrong_backend
with pytest.raises(ProvisionerError, match=match_err_msg):
provisioner_factory.discover_provisioners(backend=wrong_backend, **provisioner_params)
def test_can_add_tags(provisioner, METHOD_NAME, backend, provisioner_params):
provisioner.add_instance_tags(METHOD_NAME.name, {"tag_key": "tag_value"})
assert provisioner.get_or_create_instance(METHOD_NAME).tags.get("tag_key") == "tag_value"
# validate real tags change
provisioner = provisioner_factory.create_provisioner(backend=backend, **provisioner_params)
assert provisioner.get_or_create_instance(METHOD_NAME).tags.get("tag_key") == "tag_value"
def test_null_tag_value_is_replaced_with_empty_string(provisioner, METHOD_NAME, backend, provisioner_params):
if backend != "azure":
pytest.skip("Only Azure does not support null tags")
provisioner.add_instance_tags(METHOD_NAME.name, {"tag_key": "null"})
assert provisioner.get_or_create_instance(METHOD_NAME).tags.get("tag_key") == ""
# validate real tags change
provisioner = provisioner_factory.create_provisioner(backend=backend, **provisioner_params)
assert provisioner.get_or_create_instance(METHOD_NAME).tags.get("tag_key") == ""
def test_can_terminate_vm_instance(provisioner, METHOD_NAME, backend, provisioner_params):
"""should read from cache instead creating anything - so should be fast (after provisioner initialized)"""
provisioner.terminate_instance(METHOD_NAME.name, wait=True)
# validate cache has been cleaned up
assert not provisioner.list_instances()
# validate real termination
provisioner = provisioner_factory.create_provisioner(backend=backend, **provisioner_params)
assert not provisioner.list_instances()
def test_can_trigger_cleanup(METHOD_NAME, provisioner, backend, provisioner_params): # pylint: disable=no-self-use
provisioner.get_or_create_instance(METHOD_NAME)
assert len(provisioner.list_instances()) == 1
provisioner.cleanup(wait=True)
# validate cache has been cleaned up
assert not provisioner.list_instances()
# validate real termination
provisioner = provisioner_factory.create_provisioner(backend=backend, **provisioner_params)
assert not provisioner.list_instances(), "failed cleaning up resources" |
1,940 | compute output shape | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras zero-padding layer for 2D input."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.utils import conv_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.ZeroPadding2D")
class ZeroPadding2D(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
This layer can add rows and columns of zeros
at the top, bottom, left and right side of an image tensor.
Examples:
>>> input_shape = (1, 1, 2, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[[0 1]
[2 3]]]]
>>> y = tf.keras.layers.ZeroPadding2D(padding=1)(x)
>>> print(y)
tf.Tensor(
[[[[0 0]
[0 0]
[0 0]
[0 0]]
[[0 0]
[0 1]
[2 3]
[0 0]]
[[0 0]
[0 0]
[0 0]
[0 0]]]], shape=(1, 3, 4, 2), dtype=int64)
Args:
padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
When unspecified, uses
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json` (if exists) else 'channels_last'.
Defaults to 'channels_last'.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, padded_rows, padded_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, padded_rows, padded_cols)`
"""
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, "__len__"):
if len(padding) != 2:
raise ValueError(
f"`padding` should have two elements. Received: {padding}."
)
height_padding = conv_utils.normalize_tuple(
padding[0], 2, "1st entry of padding", allow_zero=True
)
width_padding = conv_utils.normalize_tuple(
padding[1], 2, "2nd entry of padding", allow_zero=True
)
self.padding = (height_padding, width_padding)
else:
raise ValueError(
"`padding` should be either an int, "
"a tuple of 2 ints "
"(symmetric_height_pad, symmetric_width_pad), "
"or a tuple of 2 tuples of 2 ints "
"((top_pad, bottom_pad), (left_pad, right_pad)). "
f"Received: {padding}."
)
self.input_spec = InputSpec(ndim=4)
def METHOD_NAME(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == "channels_first":
if input_shape[2] is not None:
rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[3] is not None:
cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tf.TensorShape([input_shape[0], input_shape[1], rows, cols])
elif self.data_format == "channels_last":
if input_shape[1] is not None:
rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[2] is not None:
cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tf.TensorShape([input_shape[0], rows, cols, input_shape[3]])
def call(self, inputs):
return backend.spatial_2d_padding(
inputs, padding=self.padding, data_format=self.data_format
)
def get_config(self):
config = {"padding": self.padding, "data_format": self.data_format}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items())) |
1,941 | get metric | # pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
# pylint: disable=too-many-arguments
import json
import re
from collections.abc import AsyncIterator, Callable, Iterator
from pathlib import Path
from random import choice
from typing import Any
from unittest import mock
import httpx
import pytest
import requests_mock
from asgi_lifespan import LifespanManager
from faker import Faker
from fakeredis.aioredis import FakeRedis
from fastapi import FastAPI
from fastapi.testclient import TestClient
from pytest import MonkeyPatch
from pytest_mock import MockerFixture
from pytest_simcore.helpers.typing_env import EnvVarsDict
from pytest_simcore.helpers.utils_envs import setenvs_from_dict
from settings_library.rabbit import RabbitSettings
from simcore_service_resource_usage_tracker.core.application import create_app
from simcore_service_resource_usage_tracker.core.settings import ApplicationSettings
pytest_plugins = [
"pytest_simcore.cli_runner",
"pytest_simcore.docker_compose",
"pytest_simcore.docker_registry",
"pytest_simcore.docker_swarm",
"pytest_simcore.environment_configs",
"pytest_simcore.postgres_service",
"pytest_simcore.pydantic_models",
"pytest_simcore.pytest_global_environs",
"pytest_simcore.rabbit_service",
"pytest_simcore.repository_paths",
"pytest_simcore.tmp_path_extra",
]
@pytest.fixture(scope="session")
def project_slug_dir(osparc_simcore_root_dir: Path) -> Path:
# fixtures in pytest_simcore.environs
service_folder = osparc_simcore_root_dir / "services" / "resource-usage-tracker"
assert service_folder.exists()
assert any(service_folder.glob("src/simcore_service_resource_usage_tracker"))
return service_folder
@pytest.fixture
def app_environment(
mock_env_devel_environment: EnvVarsDict, monkeypatch: MonkeyPatch, faker: Faker
) -> EnvVarsDict:
envs = setenvs_from_dict(
monkeypatch,
{
"POSTGRES_HOST": faker.domain_name(),
"POSTGRES_USER": faker.user_name(),
"POSTGRES_PASSWORD": faker.password(special_chars=False),
"POSTGRES_DB": faker.pystr(),
"PROMETHEUS_URL": f"{choice(['http', 'https'])}://{faker.domain_name()}",
"PROMETHEUS_USERNAME": faker.user_name(),
"PROMETHEUS_PASSWORD": faker.password(special_chars=False),
},
)
return mock_env_devel_environment | envs
@pytest.fixture
def disabled_prometheus(
app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.delenv("PROMETHEUS_URL")
monkeypatch.delenv("PROMETHEUS_USERNAME")
monkeypatch.delenv("PROMETHEUS_PASSWORD")
@pytest.fixture
def disabled_database(
app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.delenv("POSTGRES_HOST")
monkeypatch.delenv("POSTGRES_USER")
monkeypatch.delenv("POSTGRES_PASSWORD")
monkeypatch.delenv("POSTGRES_DB")
@pytest.fixture
def disabled_rabbitmq(app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch):
monkeypatch.delenv("RABBIT_HOST")
monkeypatch.delenv("RABBIT_USER")
monkeypatch.delenv("RABBIT_SECURE")
monkeypatch.delenv("RABBIT_PASSWORD")
@pytest.fixture
def enabled_rabbitmq(
app_environment: EnvVarsDict, rabbit_service: RabbitSettings
) -> RabbitSettings:
return rabbit_service
@pytest.fixture
def app_settings(
app_environment: EnvVarsDict, monkeypatch: pytest.MonkeyPatch
) -> ApplicationSettings:
return ApplicationSettings.create_from_envs()
@pytest.fixture
async def initialized_app(app_settings: ApplicationSettings) -> AsyncIterator[FastAPI]:
app = create_app(app_settings)
async with LifespanManager(app):
yield app
@pytest.fixture
def client(app_settings: ApplicationSettings) -> Iterator[TestClient]:
app = create_app(app_settings)
with TestClient(app, base_url="http://testserver.test") as client:
yield client
@pytest.fixture
async def async_client(initialized_app: FastAPI) -> AsyncIterator[httpx.AsyncClient]:
async with httpx.AsyncClient(
app=initialized_app,
base_url=f"http://{initialized_app.title}.testserver.io",
headers={"Content-Type": "application/json"},
) as client:
yield client
@pytest.fixture
def mocked_prometheus(
requests_mock: requests_mock.Mocker, app_settings: ApplicationSettings
) -> requests_mock.Mocker:
assert app_settings.RESOURCE_USAGE_TRACKER_PROMETHEUS
requests_mock.get(f"{app_settings.RESOURCE_USAGE_TRACKER_PROMETHEUS.api_url}/")
return requests_mock
@pytest.fixture
def get_metric_response(faker: Faker) -> Callable[..., dict[str, Any]]:
def METHOD_NAME(request, context) -> dict[str, Any]:
return {
"data": {
"result": [
{
"metric": {
"id": "cpu",
"container_label_uuid": faker.uuid4(),
"container_label_simcore_service_settings": json.dumps(
[
{
"name": "Resources",
"type": "Resources",
"resources": faker.pystr(),
"value": {
"Limits": {
"NanoCPUs": faker.pyint(min_value=1000)
}
},
}
]
),
},
"value": faker.pylist(allowed_types=(int,)),
}
]
}
}
return METHOD_NAME
@pytest.fixture
def mocked_prometheus_with_query(
mocked_prometheus: requests_mock.Mocker,
app_settings: ApplicationSettings,
faker: Faker,
get_metric_response,
) -> requests_mock.Mocker:
"""overrides with needed calls here"""
assert app_settings.RESOURCE_USAGE_TRACKER_PROMETHEUS
pattern = re.compile(
rf"^{re.escape(app_settings.RESOURCE_USAGE_TRACKER_PROMETHEUS.api_url)}/api/v1/query\?.*$"
)
mocked_prometheus.get(pattern, json=get_metric_response)
return mocked_prometheus
@pytest.fixture
def disabled_tracker_background_task(mocker: MockerFixture) -> dict[str, mock.Mock]:
mocked_start = mocker.patch(
"simcore_service_resource_usage_tracker.modules.prometheus_containers.plugin.start_periodic_task",
autospec=True,
)
mocked_stop = mocker.patch(
"simcore_service_resource_usage_tracker.modules.prometheus_containers.plugin.stop_periodic_task",
autospec=True,
)
return {"start_task": mocked_start, "stop_task": mocked_stop}
@pytest.fixture
async def mocked_redis_server(mocker: MockerFixture) -> None:
mock_redis = FakeRedis()
mocker.patch("redis.asyncio.from_url", return_value=mock_redis)
@pytest.fixture
def mocked_setup_rabbitmq(mocker: MockerFixture):
return mocker.patch(
"simcore_service_resource_usage_tracker.core.application.setup_rabbitmq",
autospec=True,
) |
1,942 | test assert logs invalid multiple logs | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import warnings
import pytest
import cirq.testing
def test_assert_logs_valid_single_logs():
with cirq.testing.assert_logs('apple'):
logging.error('orange apple fruit')
with cirq.testing.assert_logs('apple', 'orange'):
logging.error('orange apple fruit')
with cirq.testing.assert_logs():
logging.error('orange apple fruit')
with cirq.testing.assert_logs('apple', 'fruit'):
logging.error('orange apple fruit')
with cirq.testing.assert_logs('apple') as logs:
logging.error('orange apple fruit')
assert len(logs) == 1
assert logs[0].getMessage() == 'orange apple fruit'
assert logs[0].levelno == logging.ERROR
with cirq.testing.assert_logs('apple'):
warnings.warn('orange apple fruit')
def test_assert_logs_invalid_single_logs():
match = (
'^dog expected to appear in log messages but it was not found. '
'Log messages: \\[\'orange apple fruit\'\\].$'
)
with pytest.raises(AssertionError, match=match):
with cirq.testing.assert_logs('dog'):
logging.error('orange apple fruit')
with pytest.raises(AssertionError, match='dog'):
with cirq.testing.assert_logs('dog', 'cat'):
logging.error('orange apple fruit')
def test_assert_logs_valid_multiple_logs():
with cirq.testing.assert_logs('apple', count=2):
logging.error('orange apple fruit')
logging.error('other')
with cirq.testing.assert_logs('apple', count=2):
logging.error('other')
logging.error('orange apple fruit')
with cirq.testing.assert_logs('apple', count=2):
logging.error('other')
logging.error('orange apple fruit')
with cirq.testing.assert_logs('apple', count=2):
logging.error('other')
logging.error('orange apple fruit')
with cirq.testing.assert_logs('apple', 'other', count=2):
logging.error('other')
logging.error('orange apple fruit')
with cirq.testing.assert_logs('apple', count=3):
logging.error('orange apple fruit')
logging.error('other')
logging.warning('other two')
def METHOD_NAME():
with pytest.raises(AssertionError, match='^Expected 1 log message but got 2. Log messages.*$'):
with cirq.testing.assert_logs('dog'):
logging.error('orange apple fruit')
logging.error('dog')
with pytest.raises(AssertionError, match='^Expected 2 log message but got 3. Log messages.*$'):
with cirq.testing.assert_logs('dog', count=2):
logging.error('orange apple fruit')
logging.error('other')
logging.error('dog')
match = (
'^dog expected to appear in log messages but it was not found. '
'Log messages: \\[\'orange\', \'other\', \'whatever\'\\].$'
)
with pytest.raises(AssertionError, match=match):
with cirq.testing.assert_logs('dog', count=3):
logging.error('orange')
logging.error('other')
logging.error('whatever')
def test_assert_logs_log_level():
# Default minlevel is WARNING, max level CRITICAL
with cirq.testing.assert_logs('apple'):
logging.error('orange apple fruit')
logging.debug('should not')
logging.info('count')
with cirq.testing.assert_logs('apple', 'critical', count=2):
logging.critical('critical')
logging.error('orange apple fruit')
logging.debug('should not')
logging.info('count')
with cirq.testing.assert_logs('apple', min_level=logging.INFO, count=2):
logging.error('orange apple fruit')
logging.debug('should not')
logging.info('count')
with cirq.testing.assert_logs('info only 1', min_level=logging.INFO, max_level=logging.INFO):
with cirq.testing.assert_logs(
'info warning 1', min_level=logging.WARNING, max_level=logging.WARNING
):
logging.info("info only 1")
logging.warning("info warning 1")
def test_invalid_levels():
with pytest.raises(ValueError, match="min_level.*max_level"):
with cirq.testing.assert_logs(
"test", min_level=logging.CRITICAL, max_level=logging.WARNING
):
pass
def test_assert_logs_warnings():
# Capture all warnings in one context, so that test cases that will
# display a warning do not do so when the test is run.
with warnings.catch_warnings(record=True):
with cirq.testing.assert_logs('apple'):
warnings.warn('orange apple fruit')
with cirq.testing.assert_logs('apple', count=2):
warnings.warn('orange apple fruit')
logging.error('other')
with cirq.testing.assert_logs('apple', capture_warnings=False):
logging.error('orange apple fruit')
warnings.warn('warn')
with pytest.raises(
AssertionError, match='^Expected 1 log message but got 0. Log messages.*$'
):
with cirq.testing.assert_logs('apple', capture_warnings=False):
warnings.warn('orange apple fruit') |
1,943 | is event loop running wx | # coding: utf-8
"""
Support for creating GUI apps and starting event loops.
IPython's GUI integration allows interactive plotting and GUI usage in IPython
session. IPython has two different types of GUI integration:
1. The terminal based IPython supports GUI event loops through Python's
PyOS_InputHook. PyOS_InputHook is a hook that Python calls periodically
whenever raw_input is waiting for a user to type code. We implement GUI
support in the terminal by setting PyOS_InputHook to a function that
iterates the event loop for a short while. It is important to note that
in this situation, the real GUI event loop is NOT run in the normal
manner, so you can't use the normal means to detect that it is running.
2. In the two process IPython kernel/frontend, the GUI event loop is run in
the kernel. In this case, the event loop is run in the normal manner by
calling the function or method of the GUI toolkit that starts the event
loop.
In addition to starting the GUI event loops in one of these two ways, IPython
will *always* create an appropriate GUI application object when GUi
integration is enabled.
If you want your GUI apps to run in IPython you need to do two things:
1. Test to see if there is already an existing main application object. If
there is, you should use it. If there is not an existing application object
you should create one.
2. Test to see if the GUI event loop is running. If it is, you should not
start it. If the event loop is not running you may start it.
This module contains functions for each toolkit that perform these things
in a consistent manner. Because of how PyOS_InputHook runs the event loop
you cannot detect if the event loop is running using the traditional calls
(such as ``wx.GetApp.IsMainLoopRunning()`` in wxPython). If PyOS_InputHook is
set These methods will return a false negative. That is, they will say the
event loop is not running, when is actually is. To work around this limitation
we proposed the following informal protocol:
* Whenever someone starts the event loop, they *must* set the ``_in_event_loop``
attribute of the main application object to ``True``. This should be done
regardless of how the event loop is actually run.
* Whenever someone stops the event loop, they *must* set the ``_in_event_loop``
attribute of the main application object to ``False``.
* If you want to see if the event loop is running, you *must* use ``hasattr``
to see if ``_in_event_loop`` attribute has been set. If it is set, you
*must* use its value. If it has not been set, you can query the toolkit
in the normal manner.
* If you want GUI support and no one else has created an application or
started the event loop you *must* do this. We don't want projects to
attempt to defer these things to someone else if they themselves need it.
The functions below implement this logic for each GUI toolkit. If you need
to create custom application subclasses, you will likely have to modify this
code for your own purposes. This code can be copied into your own project
so you don't have to depend on IPython.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from IPython.core.getipython import get_ipython
#-----------------------------------------------------------------------------
# wx
#-----------------------------------------------------------------------------
def get_app_wx(*args, **kwargs):
"""Create a new wx app or return an exiting one."""
import wx
app = wx.GetApp()
if app is None:
if 'redirect' not in kwargs:
kwargs['redirect'] = False
app = wx.PySimpleApp(*args, **kwargs)
return app
def METHOD_NAME(app=None):
"""Is the wx event loop running."""
# New way: check attribute on shell instance
ip = get_ipython()
if ip is not None:
if ip.active_eventloop and ip.active_eventloop == 'wx':
return True
# Fall through to checking the application, because Wx has a native way
# to check if the event loop is running, unlike Qt.
# Old way: check Wx application
if app is None:
app = get_app_wx()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return app.IsMainLoopRunning()
def start_event_loop_wx(app=None):
"""Start the wx event loop in a consistent manner."""
if app is None:
app = get_app_wx()
if not METHOD_NAME(app):
app._in_event_loop = True
app.MainLoop()
app._in_event_loop = False
else:
app._in_event_loop = True
#-----------------------------------------------------------------------------
# Qt
#-----------------------------------------------------------------------------
def get_app_qt4(*args, **kwargs):
"""Create a new Qt app or return an existing one."""
from IPython.external.qt_for_kernel import QtGui
app = QtGui.QApplication.instance()
if app is None:
if not args:
args = ([""],)
app = QtGui.QApplication(*args, **kwargs)
return app
def is_event_loop_running_qt4(app=None):
"""Is the qt event loop running."""
# New way: check attribute on shell instance
ip = get_ipython()
if ip is not None:
return ip.active_eventloop and ip.active_eventloop.startswith('qt')
# Old way: check attribute on QApplication singleton
if app is None:
app = get_app_qt4([""])
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
# Does qt provide a other way to detect this?
return False
def start_event_loop_qt4(app=None):
"""Start the qt event loop in a consistent manner."""
if app is None:
app = get_app_qt4([""])
if not is_event_loop_running_qt4(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
#-----------------------------------------------------------------------------
# Tk
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# gtk
#----------------------------------------------------------------------------- |
1,944 | sosdict | #!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
# This file must have been copied to ~/.ipython/extensions/ during
# the installation of SoS (python setup.py install), with a sos
# profile with pre-loaded sos magics. You can use
#
# ipython --profile sos
#
# to use sos_magic in a ipython session with sos profile, or run
#
# %load_ext sos_magic
#
# after you load ipython, or you can add 'sos_magic' to
#
# c.InteractiveShellApp.extensions
#
# in ~/.ipython/profile_default/ipython_config.py, something like:
#
# c.InteractiveShellApp.extensions = [
# 'autoreload',
# 'sos_magic'
# ]
#
import copy
from IPython.core.error import UsageError
from IPython.core.magic import (Magics, line_cell_magic, line_magic,
magics_class)
from IPython.lib.clipboard import ClipboardEmpty
# cannot use relative import because the module will be copied to
# ~/.ipython/extensions
from sos_notebook.workflow_executor import runfile
from sos.eval import SoS_eval, SoS_exec
from sos.utils import WorkflowDict, env
# The class MUST call this class decorator at creation time
@magics_class
class SoS_Magics(Magics):
'''Magics that works with Script of Scripts'''
def __init__(self, shell):
super(SoS_Magics, self).__init__(shell)
self._reset()
def _reset(self):
env.sos_dict = WorkflowDict()
SoS_exec('from sos.runtime import *', None)
env.sos_dict.set('__interactive__', True)
self.original_keys = set(env.sos_dict._dict.keys())
self.original_keys.add('__builtins__')
self.options = ''
@line_cell_magic
def sos(self, line, cell=None):
'Magic execute sos expression and statements'
# if in line mode, no command line
if cell is None:
if not self.options:
return SoS_exec(line)
else:
return runfile(code=line, args=self.options)
else:
return runfile(code=cell, args=line.strip() + ' ' + self.options)
@line_magic
def sospaste(self, line):
'Magic that execute sos expression and statements from clipboard'
# get and print clipboard content
try:
block = self.shell.hooks.clipboard_get()
except ClipboardEmpty:
raise UsageError("The clipboard appears to be empty")
#
print(block.strip())
print('## -- End pasted text --')
try:
# is it an expression?
compile(block, '<string>', 'eval')
return SoS_eval(block)
except Exception:
# is it a list of statement?
try:
compile(block, '<string>', 'exec')
return SoS_exec(block)
except Exception:
return runfile(code=block, args=self.options + line.strip())
@line_magic
def sosset(self, line):
'Magic that set perminant options for sos and sospaste'
# do not return __builtins__ beacuse it is too long...
if line.strip():
print('sos options set to "{}"'.format(line.strip()))
self.options = line.strip() + ' '
else:
return runfile(script=None, code=None)
@line_magic
def sosget(self, line):
'Magic that get a variable from sos dictionary and set to Python dictionary'
# do not return __builtins__ beacuse it is too long...
for var in [x.strip() for x in line.split() if x.strip()]:
if var not in env.sos_dict:
raise RuntimeError('{} not exist in sos dict.'.format(var))
self.shell.user_ns[var] = copy.deepcopy(env.sos_dict[var])
@line_magic
def sosput(self, line):
'Magic that get a variable from sos dictionary and set to Python dictionary'
# do not return __builtins__ beacuse it is too long...
for var in [x.strip() for x in line.split() if x.strip()]:
if var not in self.shell.user_ns:
raise RuntimeError('{} not exist in python dict.'.format(var))
env.sos_dict[var] = copy.deepcopy(self.shell.user_ns[var])
@line_magic
def METHOD_NAME(self, line):
'Magic that displays content of the dictionary'
# do not return __builtins__ beacuse it is too long...
actions = line.strip().split()
keys = [x for x in actions if not x.startswith('-')]
for x in keys:
if not x in env.sos_dict:
raise RuntimeError(
'Unrecognized sosdict option or variable name {}'.format(x))
for x in [x for x in actions if x.startswith('-')]:
if not x in ['-r', '--reset', '-k', '--keys', '-a', '--all']:
raise RuntimeError(
'Unrecognized option {} for magic %sosdict'.format(x))
if '--reset' in actions or '-r' in actions:
return self._reset()
if '--keys' in actions or '-k' in actions:
if '--all' in actions or '-a' in actions:
return env.sos_dict._dict.keys()
elif keys:
return set(keys)
else:
return {
x for x in env.sos_dict._dict.keys()
if not x.startswith('__')
} - self.original_keys
else:
if '--all' in actions or '-a' in actions:
return env.sos_dict._dict
elif keys:
return {
x: y for x, y in env.sos_dict._dict.items() if x in keys
}
else:
return {
x: y
for x, y in env.sos_dict._dict.items()
if x not in self.original_keys and not x.startswith('__')
}
def load_ipython_extension(ipython):
ipython.register_magics(SoS_Magics) |
1,945 | validate key value | import asyncio
import aiohttp
from aiohttp import ClientConnectorError
from tracardi.service.notation.dict_traverser import DictTraverser
from tracardi.service.plugin.domain.register import Plugin, Spec, MetaData, Form, FormGroup, FormField, FormComponent
from tracardi.service.plugin.domain.result import Result
from tracardi.service.plugin.runner import ActionRunner
from .model.configuration import Configuration
from tracardi.service.tracardi_http_client import HttpClient
def validate(config: dict) -> Configuration:
return Configuration(**config)
class HtmlPageFetchAction(ActionRunner):
config: Configuration
async def set_up(self, init):
self.config = validate(init)
@staticmethod
def METHOD_NAME(values, label):
for name, value in values.items():
if not isinstance(value, str):
raise ValueError(
"{} values must be strings, `{}` given for {} `{}`".format(label, type(value), label.lower(),
name))
async def run(self, payload: dict, in_edge=None) -> Result:
try:
dot = self._get_dot_accessor(payload)
traverser = DictTraverser(dot)
cookies = traverser.reshape(reshape_template=self.config.cookies)
headers = traverser.reshape(reshape_template=self.config.headers)
self.METHOD_NAME(headers, "Header")
self.METHOD_NAME(cookies, "Cookie")
# headers['ContentType'] =
timeout = aiohttp.ClientTimeout(total=self.config.timeout)
async with HttpClient(
self.node.on_connection_error_repeat,
[200, 201, 202, 203],
timeout=timeout
) as client:
params = self.config.get_params(dot)
async with client.request(
method=self.config.method,
url=str(self.config.url),
headers=headers,
cookies=cookies,
ssl=self.config.ssl_check,
**params
) as response:
result = {
"status": response.status,
"content": await response.text(),
"cookies": response.cookies
}
if response.status in [200, 201, 202, 203]:
return Result(port="response", value=result)
else:
return Result(port="error", value=result)
except ClientConnectorError as e:
return Result(port="error", value=str(e))
except asyncio.exceptions.TimeoutError:
return Result(port="error", value="Remote call timed out.")
def register() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module=__name__,
className='HtmlPageFetchAction',
inputs=['payload'],
outputs=["response", "error"],
init={
"method": "get",
"url": None,
"timeout": 30,
"headers": {},
"cookies": {},
"ssl_check": True,
"body": ""
},
form=Form(groups=[
FormGroup(
name="Remote call settings",
fields=[
FormField(
id="method",
name="Method",
description="Select API request method.",
component=FormComponent(type="select", props={
"label": "Method",
"items": {
"get": "GET",
"post": "POST",
"put": "PUT",
"delete": "DELETE"
}
})
),
FormField(
id="url",
name="URL",
description="Type URL to be called.",
component=FormComponent(type="text", props={"label": "Url"})
),
FormField(
id="body",
name="Content",
description="Type content to be sent. For replacing some part of content with data use "
"double curly braces, e.g. {{profile@id}}.",
component=FormComponent(type="textarea", props={"label": "Content", "rows": 13})
),
]),
FormGroup(
name="Advanced settings",
description="Set additional settings of remote request. Such as timeout, headers, etc.",
fields=[
FormField(
id="timeout",
name="Timeout",
description="Type value in seconds for call time-out.",
component=FormComponent(type="text", props={"label": "Time-out"})
),
FormField(
id="ssl_check",
name="Validate SSL certificate",
description="Type if the SSL certificate must be checked.",
component=FormComponent(type="bool", props={"label": "Check and validate SSL certificate."})
),
FormField(
id="headers",
name="Request headers",
description="Type key and value for request headers.",
component=FormComponent(type="keyValueList", props={"label": "Request headers"})
),
FormField(
id="cookies",
name="Cookies",
description="Type key and value for cookies.",
component=FormComponent(type="keyValueList", props={"label": "Cookies"})
)
]
),
]),
version="0.6.1",
author="Risto Kowaczewski",
license="MIT"
),
metadata=MetaData(
name='HTML fetcher',
desc='Fetches HTML page.',
icon='globe',
group=["Connectors"]
)
) |
1,946 | test pretty next run with multiple days | # Copyright Tomer Figenblat.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Switcher integration pretty next run tool test cases."""
from binascii import hexlify, unhexlify
from datetime import datetime, timedelta
from struct import pack, unpack
import time_machine
from assertpy import assert_that
from pytest import fixture, mark
from aioswitcher.schedule import Days, tools
days_by_weekdays = dict(map(lambda d: (d.weekday, d), Days))
@fixture()
def today():
return datetime.utcnow()
@fixture
def todays_day(today):
return days_by_weekdays[today.weekday()]
@fixture
def one_hour_from_now(today):
return datetime.strftime(today + timedelta(hours=1), "%H:%M")
def test_pretty_next_run_with_no_selected_days_should_return_due_today(one_hour_from_now):
expected_return = f"Due today at {one_hour_from_now}"
assert_that(tools.pretty_next_run(one_hour_from_now)).is_equal_to(expected_return)
def test_pretty_next_run_with_todays_day_should_return_due_today(todays_day, one_hour_from_now):
expected_return = f"Due today at {one_hour_from_now}"
assert_that(tools.pretty_next_run(one_hour_from_now, {todays_day})).is_equal_to(expected_return)
def METHOD_NAME(today):
two_days_from_now = today + timedelta(days=2)
four_days_from_now = today + timedelta(days=4)
two_days_from_now_day = days_by_weekdays[two_days_from_now.weekday()]
four_days_from_now_day = days_by_weekdays[four_days_from_now.weekday()]
expected_return = f"Due next {two_days_from_now_day.value} at 13:00"
assert_that(tools.pretty_next_run("13:00", {four_days_from_now_day, two_days_from_now_day})).is_equal_to(expected_return)
def test_pretty_next_run_on_yesterday_with_todays_day_should_return_due_tomorrow(today, todays_day):
expected_return = "Due tomorrow at 13:00"
yesterday = today - timedelta(days=1)
with time_machine.travel(yesterday):
assert_that(tools.pretty_next_run("13:00", {todays_day})).is_equal_to(expected_return)
def test_pretty_next_run_on_two_days_ago_with_todays_day_should_return_due_on_next_day(today, todays_day):
expected_return = f"Due next {todays_day.value} at 13:00"
two_days_ago = today - timedelta(days=2)
with time_machine.travel(two_days_ago):
assert_that(tools.pretty_next_run("13:00", {todays_day})).is_equal_to(expected_return)
def test_pretty_next_run_on_last_sunday_with_monday_selected_should_return_due_tomorrow(today):
expected_return = "Due tomorrow at 13:00"
last_sunday = today - timedelta(days=((today.weekday() + 1) % 7))
with time_machine.travel(last_sunday):
assert_that(tools.pretty_next_run("13:00", {Days.MONDAY})).is_equal_to(expected_return)
def test_calc_duration_with_valid_start_and_end_time_should_return_the_duration():
assert_that(tools.calc_duration("13:00", "14:00")).is_equal_to("1:00:00")
def test_calc_duration_with_reveresed_start_and_end_time_should_throw_an_error():
assert_that(tools.calc_duration).raises(
ValueError
).when_called_with("14:00", "13:00").is_equal_to("end_time should be greater the start_time")
def test_hexadecimale_timestamp_to_localtime_with_the_current_timestamp_should_return_a_time_string():
sut_datetime = datetime.now()
hex_timestamp = hexlify(pack("<I", round(sut_datetime.timestamp())))
assert_that(
tools.hexadecimale_timestamp_to_localtime(hex_timestamp)
).is_equal_to(sut_datetime.time().strftime("%H:%M"))
def test_hexadecimale_timestamp_to_localtime_with_wrong_value_should_throw_an_error():
assert_that(tools.hexadecimale_timestamp_to_localtime).raises(
ValueError
).when_called_with("wrongvalue".encode()).starts_with("invalid literal for int() with base 16")
@mark.parametrize("sum, expected_weekdays", [
(2, {Days.MONDAY}),
(6, {Days.MONDAY, Days.TUESDAY}),
(14, {Days.MONDAY, Days.TUESDAY, Days.WEDNESDAY},),
(30, {Days.MONDAY, Days.TUESDAY, Days.WEDNESDAY, Days.THURSDAY}),
(62, {Days.MONDAY, Days.TUESDAY, Days.WEDNESDAY, Days.THURSDAY, Days.FRIDAY}),
(126, {Days.MONDAY, Days.TUESDAY, Days.WEDNESDAY, Days.THURSDAY, Days.FRIDAY, Days.SATURDAY}),
(254, {Days.MONDAY, Days.TUESDAY, Days.WEDNESDAY, Days.THURSDAY, Days.FRIDAY, Days.SATURDAY, Days.SUNDAY}),
])
def test_bit_summary_to_days_with_parameterized_sum_should_return_the_expected_weekday_set(sum, expected_weekdays):
assert_that(tools.bit_summary_to_days(sum)).is_equal_to(expected_weekdays)
@mark.parametrize("wrong_bit_sum", [1, 255])
def test_bit_summary_to_days_with_wrong_bit_sum_parameterized_value(wrong_bit_sum):
assert_that(tools.bit_summary_to_days).raises(
ValueError
).when_called_with(wrong_bit_sum).is_equal_to("weekdays bit sum should be between 2 and 254")
@mark.parametrize("weekdays, expected_sum", [
(Days.MONDAY, 2),
({Days.MONDAY, Days.TUESDAY}, 6),
({Days.MONDAY, Days.TUESDAY, Days.WEDNESDAY}, 14),
({Days.MONDAY, Days.TUESDAY, Days.WEDNESDAY, Days.THURSDAY}, 30),
({Days.MONDAY, Days.TUESDAY, Days.WEDNESDAY, Days.THURSDAY, Days.FRIDAY}, 62),
({Days.MONDAY, Days.TUESDAY, Days.WEDNESDAY, Days.THURSDAY, Days.FRIDAY, Days.SATURDAY}, 126),
({Days.MONDAY, Days.TUESDAY, Days.WEDNESDAY, Days.THURSDAY, Days.FRIDAY, Days.SATURDAY, Days.SUNDAY}, 254),
])
def test_weekdays_to_hexadecimal_with_parameterized_weekday_set_should_return_the_expected_sum(weekdays, expected_sum):
sut_hex = tools.weekdays_to_hexadecimal(weekdays)
sut_int = int(sut_hex, 16)
assert_that(sut_int).is_equal_to(expected_sum)
@mark.parametrize("empty_collection", [set(), (), {}, []])
def test_weekdays_to_hexadecimal_with_empty_collections_should_throw_an_error(empty_collection):
assert_that(tools.weekdays_to_hexadecimal).raises(
ValueError
).when_called_with(empty_collection).is_equal_to("no days requested")
@mark.parametrize("duplicate_members", [(Days.MONDAY, Days.MONDAY), [Days.MONDAY, Days.MONDAY]])
def test_weekdays_to_hexadecimal_with_duplicate_members_should_throw_an_encoding_error(duplicate_members):
assert_that(tools.weekdays_to_hexadecimal).raises(
ValueError
).when_called_with(duplicate_members).is_equal_to("no days requested")
def test_time_to_hexadecimal_timestamp_with_correct_time_should_return_the_expected_timestamp():
hex_timestamp = tools.time_to_hexadecimal_timestamp("21:00")
binary_timestamp = unhexlify(hex_timestamp.encode())
unpacked_timestamp = unpack("<I", binary_timestamp)
sut_datetime = datetime.fromtimestamp(unpacked_timestamp[0])
assert_that(
sut_datetime
).is_equal_to_ignoring_time(datetime.now()).has_hour(21).has_minute(0)
def test_time_to_hexadecimal_timestamp_with_incorrect_time_should_throw_an_error():
assert_that(tools.time_to_hexadecimal_timestamp).raises(
IndexError
).when_called_with("2100").is_equal_to("list index out of range") |
1,947 | add string | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ctypes
import math
import sys
from dataclasses import dataclass, field
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.scoring import BaseScorer, register_scorer
from fairseq.scoring.tokenizer import EvaluationTokenizer
class BleuStat(ctypes.Structure):
_fields_ = [
("reflen", ctypes.c_size_t),
("predlen", ctypes.c_size_t),
("match1", ctypes.c_size_t),
("count1", ctypes.c_size_t),
("match2", ctypes.c_size_t),
("count2", ctypes.c_size_t),
("match3", ctypes.c_size_t),
("count3", ctypes.c_size_t),
("match4", ctypes.c_size_t),
("count4", ctypes.c_size_t),
]
@dataclass
class SacrebleuConfig(FairseqDataclass):
sacrebleu_tokenizer: EvaluationTokenizer.ALL_TOKENIZER_TYPES = field(
default="13a", metadata={"help": "tokenizer"}
)
sacrebleu_lowercase: bool = field(
default=False, metadata={"help": "apply lowercasing"}
)
sacrebleu_char_level: bool = field(
default=False, metadata={"help": "evaluate at character level"}
)
@register_scorer("sacrebleu", dataclass=SacrebleuConfig)
class SacrebleuScorer(BaseScorer):
def __init__(self, cfg):
super(SacrebleuScorer, self).__init__(cfg)
import sacrebleu
self.sacrebleu = sacrebleu
self.tokenizer = EvaluationTokenizer(
tokenizer_type=cfg.sacrebleu_tokenizer,
lowercase=cfg.sacrebleu_lowercase,
character_tokenization=cfg.sacrebleu_char_level,
)
def METHOD_NAME(self, ref, pred):
self.ref.append(self.tokenizer.tokenize(ref))
self.pred.append(self.tokenizer.tokenize(pred))
def score(self, order=4):
return self.result_string(order).score
def result_string(self, order=4):
if order != 4:
raise NotImplementedError
# tokenization and lowercasing are performed by self.tokenizer instead.
return self.sacrebleu.corpus_bleu(
self.pred, [self.ref], tokenize="none"
).format()
@dataclass
class BleuConfig(FairseqDataclass):
pad: int = field(default=1, metadata={"help": "padding index"})
eos: int = field(default=2, metadata={"help": "eos index"})
unk: int = field(default=3, metadata={"help": "unk index"})
@register_scorer("bleu", dataclass=BleuConfig)
class Scorer(object):
def __init__(self, cfg):
self.stat = BleuStat()
self.pad = cfg.pad
self.eos = cfg.eos
self.unk = cfg.unk
try:
from fairseq import libbleu
except ImportError as e:
sys.stderr.write(
"ERROR: missing libbleu.so. run `pip install --editable .`\n"
)
raise e
self.C = ctypes.cdll.LoadLibrary(libbleu.__file__)
self.reset()
def reset(self, one_init=False):
if one_init:
self.C.bleu_one_init(ctypes.byref(self.stat))
else:
self.C.bleu_zero_init(ctypes.byref(self.stat))
def add(self, ref, pred):
if not isinstance(ref, torch.IntTensor):
raise TypeError("ref must be a torch.IntTensor (got {})".format(type(ref)))
if not isinstance(pred, torch.IntTensor):
raise TypeError("pred must be a torch.IntTensor(got {})".format(type(pred)))
# don't match unknown words
rref = ref.clone()
assert not rref.lt(0).any()
rref[rref.eq(self.unk)] = -999
rref = rref.contiguous().view(-1)
pred = pred.contiguous().view(-1)
self.C.bleu_add(
ctypes.byref(self.stat),
ctypes.c_size_t(rref.size(0)),
ctypes.c_void_p(rref.data_ptr()),
ctypes.c_size_t(pred.size(0)),
ctypes.c_void_p(pred.data_ptr()),
ctypes.c_int(self.pad),
ctypes.c_int(self.eos),
)
def score(self, order=4):
psum = sum(
math.log(p) if p > 0 else float("-Inf") for p in self.precision()[:order]
)
return self.brevity() * math.exp(psum / order) * 100
def precision(self):
def ratio(a, b):
return a / b if b > 0 else 0
return [
ratio(self.stat.match1, self.stat.count1),
ratio(self.stat.match2, self.stat.count2),
ratio(self.stat.match3, self.stat.count3),
ratio(self.stat.match4, self.stat.count4),
]
def brevity(self):
r = self.stat.reflen / self.stat.predlen
return min(1, math.exp(1 - r))
def result_string(self, order=4):
assert order <= 4, "BLEU scores for order > 4 aren't supported"
fmt = "BLEU{} = {:2.2f}, {:2.1f}"
for _ in range(1, order):
fmt += "/{:2.1f}"
fmt += " (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})"
bleup = [p * 100 for p in self.precision()[:order]]
return fmt.format(
order,
self.score(order=order),
*bleup,
self.brevity(),
self.stat.predlen / self.stat.reflen,
self.stat.predlen,
self.stat.reflen
) |
1,948 | test meta | import unittest
from pathlib import PurePath
import click
from linkml.generators import jsonldcontextgen
from tests.test_scripts.environment import env
from tests.utils.clicktestcase import ClickTestCase
from tests.utils.filters import ldcontext_metadata_filter
class GenContextTestCase(ClickTestCase):
testdir = "gencontext"
click_ep = jsonldcontextgen.cli
prog_name = "gen-jsonld-context"
env = env
def test_help(self):
self.do_test("--help", "help")
def METHOD_NAME(self):
self.maxDiff = None
self.do_test([], "meta.context.jsonld", filtr=ldcontext_metadata_filter)
self.do_test("--metauris", "meta_contextn.jsonld", filtr=ldcontext_metadata_filter)
self.do_test("-f xsv", "meta_error", expected_error=click.exceptions.BadParameter)
self.do_test("--niggles", "meta2_error", expected_error=click.exceptions.NoSuchOption)
def test_prefix_options(self):
"""Test various prefix emission options"""
# prefixes only, no-merge
self.do_test(
[
self.env.input_path("simple_uri_test.yaml"),
"--no-metadata",
"--no-mergeimports",
"--no-model",
],
"simple_uri_test.no_merge.prefixes_only.context.jsonld",
add_yaml=False,
)
# flat prefixes only, no-merge
self.do_test(
[
self.env.input_path("simple_uri_test.yaml"),
"--no-metadata",
"--no-mergeimports",
"--no-model",
"--flatprefixes",
],
"simple_uri_test.no_merge.flatprefixes_only.context.jsonld",
add_yaml=False,
)
# model only, no-merge
self.do_test(
[
self.env.input_path("simple_uri_test.yaml"),
"--no-metadata",
"--no-mergeimports",
"--no-prefixes",
],
"simple_uri_test.no_merge.model_only.context.jsonld",
add_yaml=False,
)
# both, no-merge
self.do_test(
[
self.env.input_path("simple_uri_test.yaml"),
"--no-metadata",
"--no-mergeimports",
"--model",
"--prefixes",
],
"simple_uri_test.no_merge.context.jsonld",
add_yaml=False,
)
# prefixes only, merge
self.do_test(
[
self.env.input_path("simple_uri_test.yaml"),
"--no-metadata",
"--mergeimports",
"--no-model",
],
"simple_uri_test.merge.prefixes_only.context.jsonld",
add_yaml=False,
)
# flat prefixes only, merge
self.do_test(
[
self.env.input_path("simple_uri_test.yaml"),
"--no-metadata",
"--mergeimports",
"--no-model",
"--flatprefixes",
],
"simple_uri_test.merge.flatprefixes_only.context.jsonld",
add_yaml=False,
)
# model only, merge
self.do_test(
[
self.env.input_path("simple_uri_test.yaml"),
"--no-metadata",
"--mergeimports",
"--no-prefixes",
],
"simple_uri_test.merge.model_only.context.jsonld",
add_yaml=False,
)
# both, merge
self.do_test(
[
self.env.input_path("simple_uri_test.yaml"),
"--no-metadata",
"--mergeimports",
"--model",
"--prefixes",
],
"simple_uri_test.merge.context.jsonld",
add_yaml=False,
)
def test_slot_class_uri(self):
# Note: two warnings are expected below:
# WARNING:ContextGenerator:No namespace defined for URI: http://example.org/slot/su
# WARNING:ContextGenerator:No namespace defined for URI: http://example.org/class/cu
self.do_test(
PurePath(env.input_path("uri_tests.yaml")).as_posix(),
"uri_tests.jsonld",
filtr=ldcontext_metadata_filter,
add_yaml=False,
)
if __name__ == "__main__":
unittest.main() |
1,949 | test xml output file | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
import os
from googletest.test import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-env-var-test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs googletest-env-var-test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
SetEnvVar('TESTBRIDGE_TEST_RUNNER_FAIL_FAST', None) # For 'fail_fast' test
TestFlag('fail_fast', '1', '0')
TestFlag('filter', 'FooTest.Bar', '*')
SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('brief', '1', '0')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
def METHOD_NAME(self):
"""Tests that $XML_OUTPUT_FILE affects the output flag."""
SetEnvVar('GTEST_OUTPUT', None)
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/bar.xml', GetFlag('output'))
def testXmlOutputFileOverride(self):
"""Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT."""
SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml')
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output'))
if __name__ == '__main__':
gtest_test_utils.Main() |
1,950 | fixture nas desc | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Dict
import pytest
from nncf.common.initialization.batchnorm_adaptation import BatchnormAdaptationAlgorithm
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_depth import ElasticDepthHandler
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_width import ElasticWidthHandler
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.multi_elasticity_handler import MultiElasticityHandler
from nncf.experimental.torch.nas.bootstrapNAS.training.progressive_shrinking_controller import (
ProgressiveShrinkingController,
)
from nncf.experimental.torch.nas.bootstrapNAS.training.scheduler import BootstrapNASScheduler
from tests.shared.paths import TEST_ROOT
from tests.torch.sample_test_validator import SampleType
from tests.torch.sample_test_validator import SanitySampleValidator
from tests.torch.sample_test_validator import SanityTestCaseDescriptor
class NASSampleTestDescriptor(SanityTestCaseDescriptor):
def __init__(self):
super().__init__()
self.sample_type(SampleType.CLASSIFICATION_NAS)
self.mock_dataset("mock_32x32")
self.batch_size(2)
@property
def config_directory(self) -> Path:
return TEST_ROOT / "torch" / "data" / "configs" / "nas"
def get_validator(self) -> "NASSampleValidator":
return NASSampleValidator(self)
def get_compression_section(self):
pass
def get_config_update(self) -> Dict:
sample_params = self.get_sample_params()
sample_params["num_mock_images"] = 2
sample_params["epochs"] = 5
return sample_params
class NASSampleValidator(SanitySampleValidator):
def __init__(self, desc: NASSampleTestDescriptor):
super().__init__(desc)
self._desc = desc
self._all_spies = []
def setup_spy(self, mocker):
# Need to mock SafeMLFLow to prevent starting a not closed mlflow session due to memory leak of config and
# SafeMLFLow, which happens with a mocked train function
self._sample_handler.mock_mlflow(mocker)
self._all_spies = [
mocker.spy(ElasticWidthHandler, "get_random_config"),
mocker.spy(ElasticWidthHandler, "reorganize_weights"),
mocker.spy(ElasticDepthHandler, "get_random_config"),
mocker.spy(MultiElasticityHandler, "activate_random_subnet"),
mocker.spy(MultiElasticityHandler, "activate_minimum_subnet"),
mocker.spy(MultiElasticityHandler, "activate_subnet_for_config"),
mocker.spy(BootstrapNASScheduler, "epoch_step"),
mocker.spy(BootstrapNASScheduler, "step"),
mocker.spy(ProgressiveShrinkingController, "step"),
mocker.spy(ProgressiveShrinkingController, "_run_batchnorm_adaptation"),
mocker.spy(BatchnormAdaptationAlgorithm, "run"),
]
def validate_spy(self):
for spy in self._all_spies:
spy.assert_called()
NAS_TEST_CASE_DESCRIPTORS = [
NASSampleTestDescriptor().config_name("resnet50_cifar10_nas.json"),
NASSampleTestDescriptor().config_name("mobilenet_v2_cifar10_nas.json"),
NASSampleTestDescriptor().config_name("efficient_net_b1_cifar10_nas.json"),
NASSampleTestDescriptor().config_name("mobilenet_v3_cifar10_nas.json"),
]
@pytest.fixture(name="nas_desc", params=NAS_TEST_CASE_DESCRIPTORS, ids=map(str, NAS_TEST_CASE_DESCRIPTORS))
def METHOD_NAME(request, dataset_dir):
desc: NASSampleTestDescriptor = request.param
return desc.finalize(dataset_dir)
def test_e2e_supernet_training(nas_desc: NASSampleTestDescriptor, tmp_path, mocker):
validator = nas_desc.get_validator()
args = validator.get_default_args(tmp_path)
validator.validate_sample(args, mocker) |
1,951 | get configuration | '''
copyright: Copyright (C) 2015-2022, Wazuh Inc.
Created by Wazuh, Inc. <info@wazuh.com>.
This program is free software; you can redistribute it and/or modify it under the terms of GPLv2
type: integration
brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts
when these files are modified. Specifically, these tests will check if FIM detects
moving files from one directory using the 'whodata' monitoring mode to another using
the 'realtime' monitoring mode and vice versa.
The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured
files for changes to the checksums, permissions, and ownership.
components:
- fim
suite: files_moving_files
targets:
- agent
- manager
daemons:
- wazuh-syscheckd
os_platform:
- linux
- windows
os_version:
- Arch Linux
- Amazon Linux 2
- Amazon Linux 1
- CentOS 8
- CentOS 7
- Debian Buster
- Red Hat 8
- Ubuntu Focal
- Ubuntu Bionic
- Windows 10
- Windows Server 2019
- Windows Server 2016
references:
- https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html
- https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#synchronization
pytest_args:
- fim_mode:
realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems.
whodata: Implies real-time monitoring but adding the 'who-data' information.
- tier:
0: Only level 0 tests are performed, they check basic functionalities and are quick to perform.
1: Only level 1 tests are performed, they check functionalities of medium complexity.
2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform.
tags:
- fim_moving_files
'''
import os
import sys
import pytest
from wazuh_testing import global_parameters
from wazuh_testing.fim import (LOG_FILE_PATH, REGULAR, callback_detect_event, create_file)
from wazuh_testing.tools import PREFIX
from wazuh_testing.tools.configuration import load_wazuh_configurations
from wazuh_testing.tools.monitoring import FileMonitor
# Marks
pytestmark = [pytest.mark.linux, pytest.mark.win32, pytest.mark.tier(level=1)]
# Variables
test_directories = [os.path.join(PREFIX, 'testdir1'), os.path.join(PREFIX, 'testdir2')]
testdir1, testdir2 = test_directories
testfile1 = 'file1'
testfile2 = 'file2'
whodata = 'whodata'
realtime = 'realtime'
added = 'added'
deleted = 'deleted'
test_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
configurations_path = os.path.join(test_data_path, 'wazuh_conf.yaml')
wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)
mark_skip_agentWindows = pytest.mark.skipif(sys.platform == 'win32', reason="It will be blocked by wazuh/wazuh-qa#2174")
# Configurations
configurations = load_wazuh_configurations(configurations_path, __name__)
# Internal functions
def extra_configuration_before_yield():
"""
Create /testdir1/file1 and /testdir2/file2 before execute test
"""
create_file(REGULAR, testdir1, testfile1, content='')
create_file(REGULAR, testdir2, testfile2, content='')
def check_event(dirsrc, dirdst, filename, mod_del_event, mod_add_event):
"""
Check the event has been generated
Parameters
----------
dirsrc : str
Source directory.
dirdst : str
Target directory.
filename : str
File name.
mod_del_event : str
Mode of deleted event.
mod_add_event : str
Mode of added event.
"""
event = wazuh_log_monitor.start(timeout=global_parameters.default_timeout, callback=callback_detect_event).result()
try:
assert (event['data']['mode'] == mod_del_event and event['data']['type'] == deleted and
os.path.join(dirsrc, filename) in event['data']['path'])
except AssertionError:
if (event['data']['mode'] != mod_add_event and event['data']['type'] != added and
os.path.join(dirdst, filename) in event['data']['path']):
raise AssertionError(f'Event not found')
# Fixture
@pytest.fixture(scope='module', params=configurations)
def METHOD_NAME(request):
"""
Get configurations from the module.
"""
return request.param
# Test
@pytest.mark.parametrize('dirsrc, dirdst, filename, mod_del_event, mod_add_event', [
(testdir1, testdir2, testfile1, whodata, realtime),
(testdir2, testdir1, testfile2, realtime, whodata)
])
@mark_skip_agentWindows
def test_moving_file_to_whodata(dirsrc, dirdst, filename, mod_del_event, mod_add_event, METHOD_NAME,
configure_environment, restart_syscheckd, wait_for_fim_start):
'''
description: Check if the 'wazuh-syscheckd' daemon detects events when moving files from a directory
monitored by 'whodata' to another monitored by 'realtime' and vice versa. For this purpose,
the test will monitor two folders using both FIM monitoring modes and create a testing file
inside each one. Then, it will rename the testing file of the target folder using the name
of the one inside the source folder. Finally, the test will verify that the FIM events
generated to match the monitoring mode used in the folders.
wazuh_min_version: 4.2.0
tier: 1
parameters:
- dirsrc:
type: str
brief: Path to the source directory where the testing file will be deleted.
- dirdst:
type: str
brief: Path to the target directory where the testing file will be added.
- filename:
type: str
brief: Name of the testing file.
- mod_del_event:
type: str
brief: Monitoring mode of FIM 'deleted' event.
- mod_add_event:
type: str
brief: Monitoring mode of FIM 'added' event.
- get_configuration:
type: fixture
brief: Get configurations from the module.
- configure_environment:
type: fixture
brief: Configure a custom environment for testing.
- restart_syscheckd:
type: fixture
brief: Clear the 'ossec.log' file and start a new monitor.
- wait_for_fim_start:
type: fixture
brief: Wait for realtime start, whodata start, or end of initial FIM scan.
assertions:
- Verify that the 'mode' field in FIM 'deleted' events match with one used
in the source folder of moved files.
- Verify that the 'mode' field in FIM 'added' events match with one used
in the target folder of moved files.
input_description: A test case (monitoring_realtime) is contained in external YAML file (wazuh_conf.yaml)
which includes configuration settings for the 'wazuh-syscheckd' daemon and, these are
combined with the testing directories to be monitored defined in the module.
expected_output:
- r'.*Sending FIM event: (.+)$' ('added' and 'deleted' events)
tags:
- realtime
- who_data
'''
os.rename(os.path.join(dirsrc, filename), os.path.join(dirdst, filename))
check_event(dirsrc, dirdst, filename, mod_del_event, mod_add_event)
check_event(dirsrc, dirdst, filename, mod_del_event, mod_add_event) |
1,952 | extra | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-few-public-methods, too-many-arguments
from __future__ import annotations
from typing import Any, Callable, TYPE_CHECKING
from flask_babel import gettext as __, ngettext
from jinja2 import TemplateError
from jinja2.meta import find_undeclared_variables
from superset import is_feature_enabled
from superset.errors import SupersetErrorType
from superset.sqllab.commands.execute import SqlQueryRender
from superset.sqllab.exceptions import SqlLabException
from superset.utils import core as utils
MSG_OF_1006 = "Issue 1006 - One or more parameters specified in the query are missing."
if TYPE_CHECKING:
from superset.jinja_context import BaseTemplateProcessor
from superset.sqllab.sqllab_execution_context import SqlJsonExecutionContext
PARAMETER_MISSING_ERR = __(
"Please check your template parameters for syntax errors and make sure "
"they match across your SQL query and Set Parameters. Then, try running "
"your query again."
)
class SqlQueryRenderImpl(SqlQueryRender):
_sql_template_processor_factory: Callable[..., BaseTemplateProcessor]
def __init__(
self, sql_template_factory: Callable[..., BaseTemplateProcessor]
) -> None:
self._sql_template_processor_factory = sql_template_factory
def render(self, execution_context: SqlJsonExecutionContext) -> str:
query_model = execution_context.query
try:
sql_template_processor = self._sql_template_processor_factory(
database=query_model.database, query=query_model
)
rendered_query = sql_template_processor.process_template(
query_model.sql, **execution_context.template_params
)
self._validate(execution_context, rendered_query, sql_template_processor)
return rendered_query
except TemplateError as ex:
self._raise_template_exception(ex, execution_context)
return "NOT_REACHABLE_CODE"
def _validate(
self,
execution_context: SqlJsonExecutionContext,
rendered_query: str,
sql_template_processor: BaseTemplateProcessor,
) -> None:
if is_feature_enabled("ENABLE_TEMPLATE_PROCESSING"):
# pylint: disable=protected-access
syntax_tree = sql_template_processor._env.parse(rendered_query)
undefined_parameters = find_undeclared_variables(syntax_tree)
if undefined_parameters:
self._raise_undefined_parameter_exception(
execution_context, undefined_parameters
)
def _raise_undefined_parameter_exception(
self, execution_context: SqlJsonExecutionContext, undefined_parameters: Any
) -> None:
raise SqlQueryRenderException(
sql_json_execution_context=execution_context,
error_type=SupersetErrorType.MISSING_TEMPLATE_PARAMS_ERROR,
reason_message=ngettext(
"The parameter %(parameters)s in your query is undefined.",
"The following parameters in your query are undefined: %(parameters)s.",
len(undefined_parameters),
parameters=utils.format_list(undefined_parameters),
),
suggestion_help_msg=PARAMETER_MISSING_ERR,
METHOD_NAME={
"undefined_parameters": list(undefined_parameters),
"template_parameters": execution_context.template_params,
"issue_codes": [
{
"code": 1006,
"message": MSG_OF_1006,
}
],
},
)
def _raise_template_exception(
self, ex: Exception, execution_context: SqlJsonExecutionContext
) -> None:
raise SqlQueryRenderException(
sql_json_execution_context=execution_context,
error_type=SupersetErrorType.INVALID_TEMPLATE_PARAMS_ERROR,
reason_message=__(
"The query contains one or more malformed template parameters."
),
suggestion_help_msg=__(
"Please check your query and confirm that all template "
"parameters are surround by double braces, for example, "
'"{{ ds }}". Then, try running your query again.'
),
) from ex
class SqlQueryRenderException(SqlLabException):
_extra: dict[str, Any] | None
def __init__(
self,
sql_json_execution_context: SqlJsonExecutionContext,
error_type: SupersetErrorType,
reason_message: str | None = None,
exception: Exception | None = None,
suggestion_help_msg: str | None = None,
METHOD_NAME: dict[str, Any] | None = None,
) -> None:
super().__init__(
sql_json_execution_context,
error_type,
reason_message,
exception,
suggestion_help_msg,
)
self._extra = METHOD_NAME
@property
def METHOD_NAME(self) -> dict[str, Any] | None:
return self._extra
def to_dict(self) -> dict[str, Any]:
rv = super().to_dict()
if self._extra:
rv["extra"] = self._extra
return rv |
1,953 | have valid syntax | #!/usr/bin/env python3
import os, sys, re, subprocess, shutil
# TODO: add support for other basic preprocessor checks (c/kamcfg)
# TODO: add support for missing semi-colon / dangling curly brace (c/kamcfg)
# TODO: add support for recursing through kamcfg include files (kamcfg)
# global config variables
project_root = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'], universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).communicate()[0].strip()
if len(project_root) == 0:
project_root = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
# find C src files in project
matched_csrc_files = subprocess.Popen(['find', project_root, '-type', 'f', '-regextype', 'posix-extended', '-regex', '.*\.(cpp|hpp|c|h)$'],
universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).communicate()[0].strip().split()
# find kamailio .cfg files in project
shell_pipe = subprocess.Popen(['find', project_root, '-type', 'f', '-name', '*.cfg', '-print0'],
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout
matched_kamcfg_files = subprocess.Popen(['xargs', '-0', 'sh', '-c', 'for arg do sed -n "/^\#\!KAMAILIO/q 0;q 1" ${arg} && echo "${arg}"; done', '_'],
universal_newlines=True, stdin=shell_pipe, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL
).communicate()[0].strip().split()
files_found = len(matched_csrc_files) + len(matched_kamcfg_files)
term_width = shutil.get_terminal_size((80, 24))[0]
# global constants
CSRC_STYLE_IFDEF_REGEX = re.compile(rb'^[ \t]*#(?:ifdef|ifndef).*')
CSRC_STYLE_ELSE_REGEX = re.compile(rb'^[ \t]*#else.*')
CSRC_STYLE_ENDIF_REGEX = re.compile(rb'^[ \t]*#endif.*')
CSRC_CURLYBRACE_OPEN_REGEX = re.compile(rb'^[ \t]*(?!//|/\*).*\{[ \t]*')
CSRC_CURLYBRACE_CLOSE_REGEX = re.compile(rb'^[ \t]*(?!//|/\*)\}[ \t]*')
KAMCFG_STYLE_IFDEF_REGEX = re.compile(rb'^[ \t]*#\!(?:ifdef|ifndef).*')
KAMCFG_STYLE_ELSE_REGEX = re.compile(rb'^[ \t]*#\!else.*')
KAMCFG_STYLE_ENDIF_REGEX = re.compile(rb'^[ \t]*#\!endif.*')
KAMCFG_CURLYBRACE_OPEN_REGEX = re.compile(rb'^[ \t]*(?!//|#|/\*).*\{[ \t]*')
KAMCFG_CURLYBRACE_CLOSE_REGEX = re.compile(rb'^[ \t]*(?!//|#|/\*)\}[ \t]*')
# holds state for entire test
test_succeeded = True
files_checked = 0
# holds state for current file check
unmatched_ifdefs = []
unmatched_elses = []
outoforder_elses = []
unmatched_endifs = []
unmatched_lcurly_braces = []
unmatched_rcurly_braces = []
# check for common syntax errors, currently supported checks:
# + preprocessor statement closure
def METHOD_NAME(test_files, syntax='c-src'):
global files_checked
global unmatched_ifdefs, unmatched_elses, outoforder_elses, unmatched_endifs
global unmatched_lcurly_braces, unmatched_rcurly_braces
if syntax == 'c-src':
ifdef_regex = CSRC_STYLE_IFDEF_REGEX
else_regex = CSRC_STYLE_ELSE_REGEX
endif_regex = CSRC_STYLE_ENDIF_REGEX
lcurly_regex = CSRC_CURLYBRACE_OPEN_REGEX
rcurly_regex = CSRC_CURLYBRACE_CLOSE_REGEX
elif syntax == 'kam-cfg':
ifdef_regex = KAMCFG_STYLE_IFDEF_REGEX
else_regex = KAMCFG_STYLE_ELSE_REGEX
endif_regex = KAMCFG_STYLE_ENDIF_REGEX
lcurly_regex = KAMCFG_CURLYBRACE_OPEN_REGEX
rcurly_regex = KAMCFG_CURLYBRACE_CLOSE_REGEX
else:
return False
for test_file in test_files:
with open(test_file, 'rb') as fp:
i = 1
for line in fp:
if ifdef_regex.match(line):
unmatched_ifdefs.append([test_file,i,line])
elif else_regex.match(line):
if len(unmatched_ifdefs) == 0:
outoforder_elses.append([test_file,i,line])
else:
unmatched_elses.append([test_file,i,line])
elif endif_regex.match(line):
try:
unmatched_elses.pop()
except IndexError:
pass
try:
unmatched_ifdefs.pop()
except IndexError:
unmatched_endifs.append([test_file,i,line])
elif lcurly_regex.match(line):
unmatched_lcurly_braces.append([test_file,i,line])
elif rcurly_regex.match(line):
unmatched_rcurly_braces.append([test_file,i,line])
i += 1
files_checked += 1
if len(unmatched_ifdefs) + len(outoforder_elses) + len(unmatched_elses) + len(unmatched_endifs) + \
len(unmatched_lcurly_braces) + len(unmatched_rcurly_braces) != 0:
return False
return True
# print summary of test results
def printSummary():
print('|', '='*(term_width-2), '|', sep='')
if test_succeeded:
print('Test Result: PASSED')
else:
print('Test Result: FAILED')
print('Number Of Files Tested: {}'.format(str(files_checked)))
print('Number Of Files Matched: {}'.format(str(files_found)))
print('|', '='*(term_width-2), '|', sep='')
# print error results for a single test
def printErrorBlock(header, test_results):
header_len = len(header)
avail_space = term_width - 4 - header_len
header_fill = '=' * (int(avail_space / 2))
header_pad = '=' * (avail_space % 2)
print('|', header_fill, ' ' + header + ' ', header_fill, header_pad, '|', sep='')
for result in test_results:
print('[{}] line {}: {}'.format(result[0], str(result[1]), result[2]), file=sys.stderr)
print('|', '=' * (term_width - 2), '|', sep='', file=sys.stderr)
# print detailed failure info
def printErrorInfo():
if len(unmatched_ifdefs) != 0:
printErrorBlock('unmatched preprocessor ifdef statements', unmatched_ifdefs)
if len(outoforder_elses) != 0:
printErrorBlock('out of order preprocessor else statements', outoforder_elses)
if len(unmatched_elses) != 0:
printErrorBlock('unmatched preprocessor else statements', unmatched_elses)
if len(unmatched_endifs) != 0:
printErrorBlock('unmatched preprocessor endif statements', unmatched_endifs)
if len(unmatched_lcurly_braces) != 0:
printErrorBlock('unmatched left curly braces', unmatched_lcurly_braces)
if len(unmatched_rcurly_braces) != 0:
printErrorBlock('unmatched right curly braces', unmatched_rcurly_braces)
# wrapper for the final cleanup
def printResultsAndExit():
printSummary()
if not test_succeeded:
printErrorInfo()
sys.exit(int(test_succeeded == False))
# main testing logic
if __name__ == "__main__":
if not METHOD_NAME(matched_csrc_files, syntax='c-src'):
test_succeeded = False
elif not METHOD_NAME(matched_kamcfg_files, syntax='kam-cfg'):
test_succeeded = False
printResultsAndExit()
|
1,954 | test convert lf to crlf no op | # test_line_ending.py -- Tests for the line ending functions
# Copyright (C) 2018-2019 Boris Feld <boris.feld@comet.ml>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Tests for the line ending conversion."""
from dulwich.tests import TestCase
from ..line_ending import (
convert_crlf_to_lf,
convert_lf_to_crlf,
get_checkin_filter_autocrlf,
get_checkout_filter_autocrlf,
normalize_blob,
)
from ..objects import Blob
class LineEndingConversion(TestCase):
"""Test the line ending conversion functions in various cases."""
def test_convert_crlf_to_lf_no_op(self):
self.assertEqual(convert_crlf_to_lf(b"foobar"), b"foobar")
def test_convert_crlf_to_lf(self):
self.assertEqual(convert_crlf_to_lf(b"line1\r\nline2"), b"line1\nline2")
def test_convert_crlf_to_lf_mixed(self):
self.assertEqual(convert_crlf_to_lf(b"line1\r\n\nline2"), b"line1\n\nline2")
def METHOD_NAME(self):
self.assertEqual(convert_lf_to_crlf(b"foobar"), b"foobar")
def test_convert_lf_to_crlf(self):
self.assertEqual(convert_lf_to_crlf(b"line1\nline2"), b"line1\r\nline2")
def test_convert_lf_to_crlf_mixed(self):
self.assertEqual(convert_lf_to_crlf(b"line1\r\n\nline2"), b"line1\r\n\r\nline2")
class GetLineEndingAutocrlfFilters(TestCase):
def test_get_checkin_filter_autocrlf_default(self):
checkin_filter = get_checkin_filter_autocrlf(b"false")
self.assertEqual(checkin_filter, None)
def test_get_checkin_filter_autocrlf_true(self):
checkin_filter = get_checkin_filter_autocrlf(b"true")
self.assertEqual(checkin_filter, convert_crlf_to_lf)
def test_get_checkin_filter_autocrlf_input(self):
checkin_filter = get_checkin_filter_autocrlf(b"input")
self.assertEqual(checkin_filter, convert_crlf_to_lf)
def test_get_checkout_filter_autocrlf_default(self):
checkout_filter = get_checkout_filter_autocrlf(b"false")
self.assertEqual(checkout_filter, None)
def test_get_checkout_filter_autocrlf_true(self):
checkout_filter = get_checkout_filter_autocrlf(b"true")
self.assertEqual(checkout_filter, convert_lf_to_crlf)
def test_get_checkout_filter_autocrlf_input(self):
checkout_filter = get_checkout_filter_autocrlf(b"input")
self.assertEqual(checkout_filter, None)
class NormalizeBlobTestCase(TestCase):
def test_normalize_to_lf_no_op(self):
base_content = b"line1\nline2"
base_sha = "f8be7bb828880727816015d21abcbc37d033f233"
base_blob = Blob()
base_blob.set_raw_string(base_content)
self.assertEqual(base_blob.as_raw_chunks(), [base_content])
self.assertEqual(base_blob.sha().hexdigest(), base_sha)
filtered_blob = normalize_blob(
base_blob, convert_crlf_to_lf, binary_detection=False
)
self.assertEqual(filtered_blob.as_raw_chunks(), [base_content])
self.assertEqual(filtered_blob.sha().hexdigest(), base_sha)
def test_normalize_to_lf(self):
base_content = b"line1\r\nline2"
base_sha = "3a1bd7a52799fe5cf6411f1d35f4c10bacb1db96"
base_blob = Blob()
base_blob.set_raw_string(base_content)
self.assertEqual(base_blob.as_raw_chunks(), [base_content])
self.assertEqual(base_blob.sha().hexdigest(), base_sha)
filtered_blob = normalize_blob(
base_blob, convert_crlf_to_lf, binary_detection=False
)
normalized_content = b"line1\nline2"
normalized_sha = "f8be7bb828880727816015d21abcbc37d033f233"
self.assertEqual(filtered_blob.as_raw_chunks(), [normalized_content])
self.assertEqual(filtered_blob.sha().hexdigest(), normalized_sha)
def test_normalize_to_lf_binary(self):
base_content = b"line1\r\nline2\0"
base_sha = "b44504193b765f7cd79673812de8afb55b372ab2"
base_blob = Blob()
base_blob.set_raw_string(base_content)
self.assertEqual(base_blob.as_raw_chunks(), [base_content])
self.assertEqual(base_blob.sha().hexdigest(), base_sha)
filtered_blob = normalize_blob(
base_blob, convert_crlf_to_lf, binary_detection=True
)
self.assertEqual(filtered_blob.as_raw_chunks(), [base_content])
self.assertEqual(filtered_blob.sha().hexdigest(), base_sha)
def test_normalize_to_crlf_no_op(self):
base_content = b"line1\r\nline2"
base_sha = "3a1bd7a52799fe5cf6411f1d35f4c10bacb1db96"
base_blob = Blob()
base_blob.set_raw_string(base_content)
self.assertEqual(base_blob.as_raw_chunks(), [base_content])
self.assertEqual(base_blob.sha().hexdigest(), base_sha)
filtered_blob = normalize_blob(
base_blob, convert_lf_to_crlf, binary_detection=False
)
self.assertEqual(filtered_blob.as_raw_chunks(), [base_content])
self.assertEqual(filtered_blob.sha().hexdigest(), base_sha)
def test_normalize_to_crlf(self):
base_content = b"line1\nline2"
base_sha = "f8be7bb828880727816015d21abcbc37d033f233"
base_blob = Blob()
base_blob.set_raw_string(base_content)
self.assertEqual(base_blob.as_raw_chunks(), [base_content])
self.assertEqual(base_blob.sha().hexdigest(), base_sha)
filtered_blob = normalize_blob(
base_blob, convert_lf_to_crlf, binary_detection=False
)
normalized_content = b"line1\r\nline2"
normalized_sha = "3a1bd7a52799fe5cf6411f1d35f4c10bacb1db96"
self.assertEqual(filtered_blob.as_raw_chunks(), [normalized_content])
self.assertEqual(filtered_blob.sha().hexdigest(), normalized_sha)
def test_normalize_to_crlf_binary(self):
base_content = b"line1\r\nline2\0"
base_sha = "b44504193b765f7cd79673812de8afb55b372ab2"
base_blob = Blob()
base_blob.set_raw_string(base_content)
self.assertEqual(base_blob.as_raw_chunks(), [base_content])
self.assertEqual(base_blob.sha().hexdigest(), base_sha)
filtered_blob = normalize_blob(
base_blob, convert_lf_to_crlf, binary_detection=True
)
self.assertEqual(filtered_blob.as_raw_chunks(), [base_content])
self.assertEqual(filtered_blob.sha().hexdigest(), base_sha) |
1,955 | get server azure ad only authentication output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetServerAzureADOnlyAuthenticationResult',
'AwaitableGetServerAzureADOnlyAuthenticationResult',
'get_server_azure_ad_only_authentication',
'get_server_azure_ad_only_authentication_output',
]
@pulumi.output_type
class GetServerAzureADOnlyAuthenticationResult:
"""
Azure Active Directory only authentication.
"""
def __init__(__self__, azure_ad_only_authentication=None, id=None, name=None, type=None):
if azure_ad_only_authentication and not isinstance(azure_ad_only_authentication, bool):
raise TypeError("Expected argument 'azure_ad_only_authentication' to be a bool")
pulumi.set(__self__, "azure_ad_only_authentication", azure_ad_only_authentication)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="azureADOnlyAuthentication")
def azure_ad_only_authentication(self) -> bool:
"""
Azure Active Directory only Authentication enabled.
"""
return pulumi.get(self, "azure_ad_only_authentication")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetServerAzureADOnlyAuthenticationResult(GetServerAzureADOnlyAuthenticationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServerAzureADOnlyAuthenticationResult(
azure_ad_only_authentication=self.azure_ad_only_authentication,
id=self.id,
name=self.name,
type=self.type)
def get_server_azure_ad_only_authentication(authentication_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServerAzureADOnlyAuthenticationResult:
"""
Gets a specific Azure Active Directory only authentication property.
:param str authentication_name: The name of server azure active directory only authentication.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['authenticationName'] = authentication_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:sql/v20230201preview:getServerAzureADOnlyAuthentication', __args__, opts=opts, typ=GetServerAzureADOnlyAuthenticationResult).value
return AwaitableGetServerAzureADOnlyAuthenticationResult(
azure_ad_only_authentication=pulumi.get(__ret__, 'azure_ad_only_authentication'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_server_azure_ad_only_authentication)
def METHOD_NAME(authentication_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetServerAzureADOnlyAuthenticationResult]:
"""
Gets a specific Azure Active Directory only authentication property.
:param str authentication_name: The name of server azure active directory only authentication.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
... |
1,956 | received | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The plugin module provides classes for implementation
of suds plugins.
"""
from logging import getLogger
log = getLogger(__name__)
class Context(object):
"""
Plugin context.
"""
pass
class InitContext(Context):
"""
Init Context.
@ivar wsdl: The wsdl.
@type wsdl: L{wsdl.Definitions}
"""
pass
class DocumentContext(Context):
"""
The XML document load context.
@ivar url: The URL.
@type url: str
@ivar document: Either the XML text or the B{parsed} document root.
@type document: (str|L{sax.element.Element})
"""
pass
class MessageContext(Context):
"""
The context for sending the soap envelope.
@ivar envelope: The soap envelope to be sent.
@type envelope: (str|L{sax.element.Element})
@ivar reply: The reply.
@type reply: (str|L{sax.element.Element}|object)
"""
pass
class Plugin:
"""
Plugin base.
"""
pass
class InitPlugin(Plugin):
"""
The base class for suds I{init} plugins.
"""
def initialized(self, context):
"""
Suds client initialization.
Called after wsdl the has been loaded. Provides the plugin
with the opportunity to inspect/modify the WSDL.
@param context: The init context.
@type context: L{InitContext}
"""
pass
class DocumentPlugin(Plugin):
"""
The base class for suds I{document} plugins.
"""
def loaded(self, context):
"""
Suds has loaded a WSDL/XSD document. Provides the plugin
with an opportunity to inspect/modify the unparsed document.
Called after each WSDL/XSD document is loaded.
@param context: The document context.
@type context: L{DocumentContext}
"""
pass
def parsed(self, context):
"""
Suds has parsed a WSDL/XSD document. Provides the plugin
with an opportunity to inspect/modify the parsed document.
Called after each WSDL/XSD document is parsed.
@param context: The document context.
@type context: L{DocumentContext}
"""
pass
class MessagePlugin(Plugin):
"""
The base class for suds I{soap message} plugins.
"""
def marshalled(self, context):
"""
Suds will send the specified soap envelope.
Provides the plugin with the opportunity to inspect/modify
the envelope Document before it is sent.
@param context: The send context.
The I{envelope} is the envelope docuemnt.
@type context: L{MessageContext}
"""
pass
def sending(self, context):
"""
Suds will send the specified soap envelope.
Provides the plugin with the opportunity to inspect/modify
the message text it is sent.
@param context: The send context.
The I{envelope} is the envelope text.
@type context: L{MessageContext}
"""
pass
def METHOD_NAME(self, context):
"""
Suds has received the specified reply.
Provides the plugin with the opportunity to inspect/modify
the received XML text before it is SAX parsed.
@param context: The reply context.
The I{reply} is the raw text.
@type context: L{MessageContext}
"""
pass
def parsed(self, context):
"""
Suds has sax parsed the received reply.
Provides the plugin with the opportunity to inspect/modify
the sax parsed DOM tree for the reply before it is unmarshalled.
@param context: The reply context.
The I{reply} is DOM tree.
@type context: L{MessageContext}
"""
pass
def unmarshalled(self, context):
"""
Suds has unmarshalled the received reply.
Provides the plugin with the opportunity to inspect/modify
the unmarshalled reply object before it is returned.
@param context: The reply context.
The I{reply} is unmarshalled suds object.
@type context: L{MessageContext}
"""
pass
class PluginContainer:
"""
Plugin container provides easy method invocation.
@ivar plugins: A list of plugin objects.
@type plugins: [L{Plugin},]
@cvar ctxclass: A dict of plugin method / context classes.
@type ctxclass: dict
"""
domains = {
'init': (InitContext, InitPlugin),
'document': (DocumentContext, DocumentPlugin),
'message': (MessageContext, MessagePlugin),
}
def __init__(self, plugins):
"""
@param plugins: A list of plugin objects.
@type plugins: [L{Plugin},]
"""
self.plugins = plugins
def __getattr__(self, name):
domain = self.domains.get(name)
if domain:
plugins = []
ctx, pclass = domain
for p in self.plugins:
if isinstance(p, pclass):
plugins.append(p)
return PluginDomain(ctx, plugins)
else:
raise Exception('plugin domain (%s), invalid' % name)
class PluginDomain:
"""
The plugin domain.
@ivar ctx: A context.
@type ctx: L{Context}
@ivar plugins: A list of plugins (targets).
@type plugins: list
"""
def __init__(self, ctx, plugins):
self.ctx = ctx
self.plugins = plugins
def __getattr__(self, name):
return Method(name, self)
class Method:
"""
Plugin method.
@ivar name: The method name.
@type name: str
@ivar domain: The plugin domain.
@type domain: L{PluginDomain}
"""
def __init__(self, name, domain):
"""
@param name: The method name.
@type name: str
@param domain: A plugin domain.
@type domain: L{PluginDomain}
"""
self.name = name
self.domain = domain
def __call__(self, **kwargs):
ctx = self.domain.ctx()
ctx.__dict__.update(kwargs)
for plugin in self.domain.plugins:
try:
method = getattr(plugin, self.name, None)
if method and callable(method):
method(ctx)
except Exception as pe:
log.exception(pe)
return ctx |
1,957 | subscribe | # Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from nats.aio.client import Client
from thrift.transport.TTransport import TTransportException
from thrift.transport.TTransport import TMemoryBuffer
from frugal import _NATS_MAX_MESSAGE_SIZE
from frugal.exceptions import TTransportExceptionType
from frugal.transport import FPublisherTransport
from frugal.transport import FSubscriberTransport
from frugal.transport import FPublisherTransportFactory
from frugal.transport import FSubscriberTransportFactory
class FNatsPublisherTransportFactory(FPublisherTransportFactory):
"""
FNatsPublisherTransportFactory is used to create
FNatsPublisherTransports.
"""
def __init__(self, nats_client: Client):
self._nats_client = nats_client
def get_transport(self) -> FPublisherTransport:
"""
Get a new FNatsPublisherTransport.
"""
return FNatsPublisherTransport(self._nats_client)
class FNatsPublisherTransport(FPublisherTransport):
"""
FNatsPublisherTransport is used exclusively for pub/sub scopes.
Publishers use it to publish to a topic. NATS is used as the
underlying bus.
"""
def __init__(self, nats_client: Client):
super().__init__(_NATS_MAX_MESSAGE_SIZE)
self._nats_client = nats_client
async def open(self):
"""
Open the NATS publisher transport by connected to NATS.
"""
if not self._nats_client.is_connected:
raise TTransportException(TTransportExceptionType.NOT_OPEN,
'NATS is not connected')
async def close(self):
"""
Close the NATS publisher transport and disconnect from NATS.
"""
if not self.is_open():
return
await self._nats_client.flush()
def is_open(self) -> bool:
"""
Check to see if the transport is open.
"""
return self._nats_client.is_connected
async def publish(self, topic: str, data):
"""
Publish a message to NATS on a given topic.
Args:
topic: string
data: bytearray
"""
if not self.is_open():
raise TTransportException(TTransportExceptionType.NOT_OPEN,
'Transport is not connected')
if self._check_publish_size(data):
raise TTransportException(
type=TTransportExceptionType.REQUEST_TOO_LARGE,
message='Message exceeds max message size'
)
await self._nats_client.publish('frugal.{0}'.format(topic), data)
class FNatsSubscriberTransportFactory(FSubscriberTransportFactory):
"""
FNatsSubscriberTransportFactory is used to create
FNatsSubscriberTransports.
"""
def __init__(self, nats_client: Client, queue=''):
self._nats_client = nats_client
self._queue = queue
def get_transport(self) -> FSubscriberTransport:
"""
Get a new FNatsSubscriberTransport.
"""
return FNatsSubscriberTransport(self._nats_client, self._queue)
class FNatsSubscriberTransport(FSubscriberTransport):
"""
FSubscriberTransport is used exclusively for pub/sub scopes.
Subscribers use it to subscribe to a pub/sub topic. Nats is
used as the underlying bus.
"""
def __init__(self, nats_client: Client, queue=''):
self._nats_client = nats_client
self._queue = queue
self._is_subscribed = False
self._subscription = None
async def METHOD_NAME(self, topic: str, callback):
"""
Subscribe to the given topic and register a callback to
invoke when a message is received.
Args:
topic: str
callback: func
"""
if not self._nats_client.is_connected:
raise TTransportException(TTransportExceptionType.NOT_OPEN,
'Nats is not connected')
if self.is_subscribed():
raise TTransportException(TTransportExceptionType.ALREADY_OPEN,
'Already subscribed to nats topic')
async def nats_callback(message):
ret = callback(TMemoryBuffer(message.data[4:]))
if inspect.iscoroutine(ret):
ret = await ret
return ret
self._subscription = await self._nats_client.METHOD_NAME(
'frugal.{0}'.format(topic),
queue=self._queue,
cb=nats_callback,
)
self._is_subscribed = True
async def unsubscribe(self):
"""
Unsubscribe from the currently subscribed topic.
"""
await self._subscription.unsubscribe()
self._subscription = None
self._is_subscribed = False
def is_subscribed(self) -> bool:
"""
Check whether the client is subscribed or not.
Returns:
bool
"""
return self._is_subscribed |
1,958 | main | #
# Copyright 2002-2010 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Merges XLIFF and Gettext PO localization files.
Snippet file produced by e.g. :doc:`pogrep </commands/pogrep>` and updated by a
translator can be merged back into the original files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/pomerge.html
for examples and usage instructions.
"""
import logging
from translate.storage import factory
from translate.storage.poheader import poheader
def mergestores(store1, store2, mergeblanks, mergefuzzy, mergecomments):
"""Take any new translations in store2 and write them into store1."""
for unit2 in store2.units:
if unit2.isheader():
if isinstance(store1, poheader):
store1.mergeheaders(store2)
continue
unit1 = store1.findid(unit2.getid())
if unit1 is None:
unit1 = store1.findunit(unit2.source)
if unit1 is None:
logging.error(
"The template does not contain the following unit:\n%s", str(unit2)
)
else:
if not mergeblanks and len(unit2.target.strip()) == 0:
continue
if not mergefuzzy and unit2.isfuzzy():
continue
unit1.merge(unit2, overwrite=True, comments=mergecomments)
return store1
def str2bool(option):
"""Convert a string value to boolean
:param option: yes, true, 1, no, false, 0
:type option: String
:rtype: Boolean
"""
option = option.lower()
if option in ("yes", "true", "1"):
return True
elif option in ("no", "false", "0"):
return False
raise ValueError("invalid boolean value: %r" % option)
def mergestore(
inputfile,
outputfile,
templatefile,
mergeblanks="no",
mergefuzzy="no",
mergecomments="yes",
):
try:
mergecomments = str2bool(mergecomments)
except ValueError:
raise ValueError("invalid mergecomments value: %r" % mergecomments)
try:
mergeblanks = str2bool(mergeblanks)
except ValueError:
raise ValueError("invalid mergeblanks value: %r" % mergeblanks)
try:
mergefuzzy = str2bool(mergefuzzy)
except ValueError:
raise ValueError("invalid mergefuzzy value: %r" % mergefuzzy)
inputstore = factory.getobject(inputfile)
if templatefile is None:
# just merge nothing
templatestore = type(inputstore)()
else:
templatestore = factory.getobject(templatefile)
outputstore = mergestores(
templatestore, inputstore, mergeblanks, mergefuzzy, mergecomments
)
if outputstore.isempty():
return 0
outputstore.serialize(outputfile)
return 1
def METHOD_NAME():
from translate.convert import convert
formats = {
("po", "po"): ("po", mergestore),
("po", "pot"): ("po", mergestore),
("pot", "po"): ("po", mergestore),
("pot", "pot"): ("pot", mergestore),
"po": ("po", mergestore),
"pot": ("po", mergestore),
("xlf", "po"): ("po", mergestore),
("xlf", "pot"): ("po", mergestore),
("xlf", "xlf"): ("xlf", mergestore),
("po", "xlf"): ("xlf", mergestore),
("xliff", "po"): ("po", mergestore),
("xliff", "pot"): ("po", mergestore),
("xliff", "xliff"): ("xliff", mergestore),
("po", "xliff"): ("xliff", mergestore),
}
mergeblanksoption = convert.optparse.Option(
"",
"--mergeblanks",
dest="mergeblanks",
action="store",
default="yes",
help="whether to overwrite existing translations with blank translations (yes/no). Default is yes.",
)
mergefuzzyoption = convert.optparse.Option(
"",
"--mergefuzzy",
dest="mergefuzzy",
action="store",
default="yes",
help="whether to consider fuzzy translations from input (yes/no). Default is yes.",
)
mergecommentsoption = convert.optparse.Option(
"",
"--mergecomments",
dest="mergecomments",
action="store",
default="yes",
help="whether to merge comments as well as translations (yes/no). Default is yes.",
)
parser = convert.ConvertOptionParser(
formats, usetemplates=True, description=__doc__
)
parser.add_option(mergeblanksoption)
parser.passthrough.append("mergeblanks")
parser.add_option(mergefuzzyoption)
parser.passthrough.append("mergefuzzy")
parser.add_option(mergecommentsoption)
parser.passthrough.append("mergecomments")
parser.run()
if __name__ == "__main__":
METHOD_NAME() |
1,959 | xml node name | ## @file
# This is an XML API that uses a syntax similar to XPath, but it is written in
# standard python so that no extra python packages are required to use it.
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import print_function
import xml.dom.minidom
import codecs
from edk2basetools.Common.LongFilePathSupport import OpenLongFilePath as open
## Create a element of XML
#
# @param Name
# @param String
# @param NodeList
# @param AttributeList
#
# @revel Element
#
def CreateXmlElement(Name, String, NodeList, AttributeList):
Doc = xml.dom.minidom.Document()
Element = Doc.createElement(Name)
if String != '' and String is not None:
Element.appendChild(Doc.createTextNode(String))
for Item in NodeList:
if isinstance(Item, type([])):
Key = Item[0]
Value = Item[1]
if Key != '' and Key is not None and Value != '' and Value is not None:
Node = Doc.createElement(Key)
Node.appendChild(Doc.createTextNode(Value))
Element.appendChild(Node)
else:
Element.appendChild(Item)
for Item in AttributeList:
Key = Item[0]
Value = Item[1]
if Key != '' and Key is not None and Value != '' and Value is not None:
Element.setAttribute(Key, Value)
return Element
## Get a list of XML nodes using XPath style syntax.
#
# Return a list of XML DOM nodes from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty list is returned.
#
# @param Dom The root XML DOM node.
# @param String A XPath style path.
#
# @revel Nodes A list of XML nodes matching XPath style Sting.
#
def XmlList(Dom, String):
if String is None or String == "" or Dom is None or Dom == "":
return []
if Dom.nodeType == Dom.DOCUMENT_NODE:
Dom = Dom.documentElement
if String[0] == "/":
String = String[1:]
TagList = String.split('/')
Nodes = [Dom]
Index = 0
End = len(TagList) - 1
while Index <= End:
ChildNodes = []
for Node in Nodes:
if Node.nodeType == Node.ELEMENT_NODE and Node.tagName == TagList[Index]:
if Index < End:
ChildNodes.extend(Node.childNodes)
else:
ChildNodes.append(Node)
Nodes = ChildNodes
ChildNodes = []
Index += 1
return Nodes
## Get a single XML node using XPath style syntax.
#
# Return a single XML DOM node from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM node.
# @param String A XPath style path.
#
# @revel Node A single XML node matching XPath style Sting.
#
def XmlNode(Dom, String):
if String is None or String == "" or Dom is None or Dom == "":
return ""
if Dom.nodeType == Dom.DOCUMENT_NODE:
Dom = Dom.documentElement
if String[0] == "/":
String = String[1:]
TagList = String.split('/')
Index = 0
End = len(TagList) - 1
ChildNodes = [Dom]
while Index <= End:
for Node in ChildNodes:
if Node.nodeType == Node.ELEMENT_NODE and Node.tagName == TagList[Index]:
if Index < End:
ChildNodes = Node.childNodes
else:
return Node
break
Index += 1
return ""
## Get a single XML element using XPath style syntax.
#
# Return a single XML element from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
# @param Strin A XPath style path.
#
# @revel Element An XML element matching XPath style Sting.
#
def XmlElement(Dom, String):
try:
return XmlNode(Dom, String).firstChild.data.strip()
except:
return ""
## Get a single XML element of the current node.
#
# Return a single XML element specified by the current root Dom.
# If the input Dom is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
#
# @revel Element An XML element in current root Dom.
#
def XmlElementData(Dom):
try:
return Dom.firstChild.data.strip()
except:
return ""
## Get a list of XML elements using XPath style syntax.
#
# Return a list of XML elements from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty list is returned.
#
# @param Dom The root XML DOM object.
# @param String A XPath style path.
#
# @revel Elements A list of XML elements matching XPath style Sting.
#
def XmlElementList(Dom, String):
return map(XmlElementData, XmlList(Dom, String))
## Get the XML attribute of the current node.
#
# Return a single XML attribute named Attribute from the current root Dom.
# If the input Dom or Attribute is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
# @param Attribute The name of Attribute.
#
# @revel Element A single XML element matching XPath style Sting.
#
def XmlAttribute(Dom, Attribute):
try:
return Dom.getAttribute(Attribute).strip()
except:
return ''
## Get the XML node name of the current node.
#
# Return a single XML node name from the current root Dom.
# If the input Dom is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
#
# @revel Element A single XML element matching XPath style Sting.
#
def METHOD_NAME(Dom):
try:
return Dom.nodeName.strip()
except:
return ''
## Parse an XML file.
#
# Parse the input XML file named FileName and return a XML DOM it stands for.
# If the input File is not a valid XML file, then an empty string is returned.
#
# @param FileName The XML file name.
#
# @revel Dom The Dom object achieved from the XML file.
#
def XmlParseFile(FileName):
try:
XmlFile = codecs.open(FileName,encoding='utf_8_sig')
Dom = xml.dom.minidom.parse(XmlFile)
XmlFile.close()
return Dom
except Exception as X:
print(X)
return ""
# This acts like the main() function for the script, unless it is 'import'ed
# into another script.
if __name__ == '__main__':
# Nothing to do here. Could do some unit tests.
A = CreateXmlElement('AAA', 'CCC', [['AAA', '111'], ['BBB', '222']], [['A', '1'], ['B', '2']])
B = CreateXmlElement('ZZZ', 'CCC', [['XXX', '111'], ['YYY', '222']], [['A', '1'], ['B', '2']])
C = CreateXmlList('DDD', 'EEE', [A, B], ['FFF', 'GGG'])
print(C.toprettyxml(indent = " "))
pass |
1,960 | get data type sync | """Create dynamic types from entity, property, and data types via pydantic."""
from uuid import UUID
from graph_types import (
DataTypeSchema,
EntityTypeSchema,
PropertyTypeSchema,
)
from yarl import URL
from graph_sdk.client.concurrent import HASHClient, with_actor
from graph_sdk.filter import (
DataTypeQueryPath,
EntityTypeQueryPath,
PropertyTypeQueryPath,
)
from graph_sdk.options import Options
from graph_sdk.query import Parameter
from graph_sdk.utils import (
async_to_sync,
filter_latest_ontology_types_from_subgraph,
)
class TypeAPI:
"""GraphAPI for use with hash-graph-types."""
inner: HASHClient
def __init__(self, base: URL) -> None:
"""Initialize the client with the base URL."""
self.inner = HASHClient(base)
async def load_data_type(
self,
data_type_id: str,
*,
actor_id: UUID,
) -> DataTypeSchema:
"""Load an external data type."""
with with_actor(self.inner, actor_id):
await self.inner.load_external_data_type(
URL(data_type_id),
)
return await self.get_data_type(
data_type_id,
actor_id=actor_id,
is_after_load=True,
)
async def get_data_type(
self,
data_type_id: str,
*,
actor_id: UUID,
is_after_load: bool = False,
) -> DataTypeSchema:
"""Returns the data type schema for the given data type ID.
If the data type is not found it will attempt to fetch it and use
the actor ID to authenticate the request.
TODO: remove this once H-136 is resolved.
"""
with with_actor(self.inner, actor_id):
subgraph = await self.inner.query_data_types(
DataTypeQueryPath().versioned_url() == Parameter(data_type_id),
Options(),
)
latest = filter_latest_ontology_types_from_subgraph(subgraph)
if not latest:
if is_after_load:
msg = f"Could not find data type {data_type_id}"
raise ValueError(msg)
return await self.load_data_type(data_type_id, actor_id=actor_id)
vertex = latest[0]
data_type = vertex.root.inner.schema_
return DataTypeSchema(**data_type.model_dump(by_alias=True, mode="json"))
def METHOD_NAME(
self,
data_type_id: str,
*,
actor_id: UUID,
) -> DataTypeSchema:
"""Returns the data type schema for the given data type ID."""
return async_to_sync(self.get_data_type(data_type_id, actor_id=actor_id))
async def load_property_type(
self,
property_type_id: str,
*,
actor_id: UUID,
) -> PropertyTypeSchema:
"""Load an external property type."""
with with_actor(self.inner, actor_id):
await self.inner.load_external_property_type(
URL(property_type_id),
)
return await self.get_property_type(
property_type_id,
actor_id=actor_id,
is_after_load=True,
)
async def get_property_type(
self,
property_type_id: str,
*,
actor_id: UUID,
is_after_load: bool = False,
) -> PropertyTypeSchema:
"""Returns the property type schema for the given property type ID.
If the property type is not found it will attempt to fetch it and use
the actor ID to authenticate the request.
TODO: remove this once H-136 is resolved.
"""
with with_actor(self.inner, actor_id):
subgraph = await self.inner.query_property_types(
PropertyTypeQueryPath().versioned_url() == Parameter(property_type_id),
Options(),
)
latest = filter_latest_ontology_types_from_subgraph(subgraph)
if not latest:
if is_after_load:
msg = f"Could not find property type {property_type_id}"
raise ValueError(msg)
return await self.load_property_type(property_type_id, actor_id=actor_id)
vertex = latest[0]
property_type = vertex.root.inner.schema_
return PropertyTypeSchema(
**property_type.model_dump(by_alias=True, mode="json"),
)
def get_property_type_sync(
self,
property_type_id: str,
*,
actor_id: UUID,
) -> PropertyTypeSchema:
"""Returns the property type schema for the given property type ID."""
return async_to_sync(
self.get_property_type(property_type_id, actor_id=actor_id),
)
async def load_entity_type(
self,
entity_type_id: str,
*,
actor_id: UUID,
) -> EntityTypeSchema:
"""Load an external entity type."""
with with_actor(self.inner, actor_id):
await self.inner.load_external_entity_type(
URL(entity_type_id),
)
return await self.get_entity_type(
entity_type_id,
actor_id=actor_id,
is_after_load=True,
)
async def get_entity_type(
self,
entity_type_id: str,
*,
actor_id: UUID,
is_after_load: bool = False,
) -> EntityTypeSchema:
"""Returns the entity type schema for the given entity type ID.
If the entity type is not found it will attempt to fetch it and use
the actor ID to authenticate the request.
TODO: remove this once H-136 is resolved.
"""
with with_actor(self.inner, actor_id):
subgraph = await self.inner.query_entity_types(
EntityTypeQueryPath().versioned_url() == Parameter(entity_type_id),
Options(),
)
latest = filter_latest_ontology_types_from_subgraph(subgraph)
if not latest:
if is_after_load:
msg = f"Could not find entity type {entity_type_id}"
raise ValueError(msg)
return await self.load_entity_type(entity_type_id, actor_id=actor_id)
vertex = latest[0]
entity_type = vertex.root.inner.schema_
return EntityTypeSchema(**entity_type.model_dump(by_alias=True, mode="json"))
def get_entity_type_sync(
self,
entity_type_id: str,
*,
actor_id: UUID,
) -> EntityTypeSchema:
"""Returns the entity type schema for the given entity type ID."""
return async_to_sync(self.get_entity_type(entity_type_id, actor_id=actor_id)) |
1,961 | handle job update | import json
import select
import os
import eventlet
eventlet.monkey_patch()
from eventlet.hubs import trampoline
import urllib.request, urllib.parse, urllib.error
import requests
import psycopg2
import urllib3
urllib3.disable_warnings()
from pyinfraboxutils import get_logger, get_env, get_root_url
from pyinfraboxutils.db import connect_db
from pyinfraboxutils.leader import elect_leader, is_leader, is_active
from pyinfraboxutils import dbpool
logger = get_logger("github")
def execute_sql(conn, stmt, params): # pragma: no cover
c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
c.execute(stmt, params)
result = c.fetchall()
c.close()
return result
def main(): # pragma: no cover
get_env('INFRABOX_VERSION')
get_env('INFRABOX_DATABASE_DB')
get_env('INFRABOX_DATABASE_USER')
get_env('INFRABOX_DATABASE_PASSWORD')
get_env('INFRABOX_DATABASE_HOST')
get_env('INFRABOX_DATABASE_PORT')
cluster_name = get_env('INFRABOX_CLUSTER_NAME')
conn = connect_db()
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
logger.info("Connected to database")
elect_leader(conn, 'github-review', cluster_name)
curs = conn.cursor()
curs.execute("LISTEN job_update;")
logger.info("Waiting for job updates")
pool = eventlet.GreenPool()
while True:
trampoline(conn, read=True)
conn.poll()
while conn.notifies:
notify = conn.notifies.pop(0)
event = json.loads(notify.payload)
if not is_leader(conn, 'github-review', cluster_name, exit=False):
logger.info("skip job: %s because I'm not leader", event.get('job_id'))
continue
pool.spawn_n(handle, event)
def handle(event):
db = dbpool.get()
try:
METHOD_NAME(db.conn, event)
except Exception as e:
logger.error(e)
finally:
dbpool.put(db)
def METHOD_NAME(conn, event):
job_id = event['job_id']
jobs = execute_sql(conn, '''
SELECT id, state, name, project_id, build_id
FROM job
WHERE id = %s
''', [job_id])
if not jobs:
return False
job = jobs[0]
project_id = job['project_id']
build_id = job['build_id']
projects = execute_sql(conn, '''
SELECT id, name, type
FROM project
WHERE id = %s
''', [project_id])
if not projects:
return False
project = projects[0]
if project['type'] != 'github':
return False
builds = execute_sql(conn, '''
SELECT id, build_number, restart_counter, commit_id
FROM build
WHERE id = %s
''', [build_id])
if not builds:
return False
build = builds[0]
project_name = project['name']
job_state = job['state']
job_name = job['name']
commit_sha = build['commit_id']
build_id = build['id']
build_number = build['build_number']
build_restartCounter = build['restart_counter']
# determine github commit state
state = 'success'
if job_state in ('scheduled', 'running', 'queued'):
state = 'pending'
if job_state in ('failure', 'skipped', 'killed', 'unstable'):
state = 'failure'
if job_state == 'error':
state = 'error'
logger.info("")
logger.info("Handle job %s", job_id)
logger.info("Setting jobs %s state to %s", job_id, state)
token = execute_sql(conn, '''
SELECT github_api_token FROM "user" u
INNER JOIN collaborator co
ON co.role = 'Owner'
AND co.project_id = %s
AND co.user_id = u.id
''', [project_id])
if not token:
logger.warn("No API token, not updating status")
return False
github_api_token = token[0]['github_api_token']
github_status_url = execute_sql(conn, '''
SELECT github_status_url
FROM "commit"
WHERE id = %s
AND project_id = %s
''', [commit_sha, project_id])[0]['github_status_url']
ha_mode = os.environ.get('INFRABOX_HA_ENABLED') == 'true'
if ha_mode:
dashboard_url = get_root_url('global')
else:
dashboard_url = execute_sql(conn, '''
SELECT root_url
FROM cluster
WHERE name = 'master'
''', [])[0]['root_url']
target_url = '%s/dashboard/#/project/%s/build/%s/%s/job/%s' % (dashboard_url,
urllib.parse.quote(project_name, safe=''),
build_number,
build_restartCounter,
urllib.parse.quote_plus(job_name).replace('+', '%20'))
job_name = job_name.split(".")[0]
payload = {
"state": state,
"target_url": target_url,
"description": "project_id:%s job_id:%s" % (project_id, job_id),
"context": "Job: %s" % job_name
}
headers = {
"Authorization": "token " + github_api_token,
"User-Agent": "InfraBox"
}
# TODO(ib-steffen): support ca bundles
try:
r = requests.post(github_status_url,
data=json.dumps(payload),
headers=headers,
timeout=10,
verify=False)
if r.status_code != 201:
logger.warn("[job: %s] Failed to update github status: %s", job_id, r.text)
logger.warn(github_status_url)
else:
logger.info("[job: %s] Successfully updated github status", job_id)
except Exception as e:
logger.warn("[job: %s] Failed to update github status: %s", job_id, e)
return False
return True
if __name__ == "__main__": # pragma: no cover
main() |
1,962 | test remove removes | # TODO(mc, 2022-07-07): these tests are very coupled to the implementation
# both the tests and the code under test likely needs to be rewritten
# if we want useful unit test coverage
from mock import patch, AsyncMock # type: ignore[attr-defined]
import pytest
from robot_server.service.session.errors import SessionCreationException
from robot_server.service.session.manager import SessionMetaData, BaseSession
from robot_server.service.session.models.common import create_identifier
from robot_server.service.session.models.session import SessionType
@pytest.fixture
async def session(session_manager) -> BaseSession:
"""An added session"""
return await session_manager.add(
session_type=SessionType.calibration_check, session_meta_data=SessionMetaData()
)
@pytest.fixture(autouse=True)
def mock_check_session_create():
"""Patch of Session.create"""
with patch("robot_server.service.session.manager.CheckSession.create") as m:
mock_session = AsyncMock()
mock_session.session_type = SessionType.calibration_check
m.return_value = mock_session
yield m
@pytest.fixture(autouse=True)
def mock_tip_length_session_create():
"""Patch of Session.create"""
with patch("robot_server.service.session.manager.TipLengthCalibration.create") as m:
mock_session = AsyncMock()
mock_session.session_type = SessionType.tip_length_calibration
m.return_value = mock_session
yield m
@pytest.fixture(autouse=True)
def mock_deck_calibration_session_create():
"""Patch of Session.create"""
with patch(
"robot_server.service.session.manager.DeckCalibrationSession.create"
) as m:
mock_session = AsyncMock()
mock_session.session_type = SessionType.deck_calibration
m.return_value = mock_session
yield m
async def test_add_calls_session_create(
session_manager, mock_check_session_create, session
):
mock_check_session_create.assert_called_once()
assert (
mock_check_session_create.call_args[1]["configuration"]
== session_manager._session_common
)
assert isinstance(
mock_check_session_create.call_args[1]["instance_meta"], SessionMetaData
)
async def test_add_no_class_doesnt_call_create(
session_manager, mock_check_session_create
):
# Patch the type to class dict
with patch("robot_server.service.session.manager.SessionTypeToClass", new={}):
with pytest.raises(SessionCreationException):
await session_manager.add(SessionType.calibration_check, SessionMetaData())
mock_check_session_create.assert_not_called()
async def test_add_stores_session(session_manager, session):
assert session_manager._sessions[session.meta.identifier] == session
async def test_add_activates_session(session_manager, session):
"""Test that adding a session also makes that new session active"""
assert session_manager._active.active_id == session.meta.identifier
async def METHOD_NAME(session_manager, session):
assert await session_manager.remove(session.meta.identifier) is session
assert session.meta.identifier not in session_manager._sessions
async def test_remove_calls_cleanup(session_manager):
session = await session_manager.add(
SessionType.calibration_check, SessionMetaData()
)
session.clean_up = AsyncMock()
await session_manager.remove(session.meta.identifier)
session.clean_up.assert_called_once()
async def test_remove_active_session(session_manager, session):
session_manager._active.active_id = session.meta.identifier
await session_manager.remove(session.meta.identifier)
assert session_manager._active.active_id is None
async def test_remove_inactive_session(session_manager, session):
active_session = await session_manager.add(
SessionType.tip_length_calibration, SessionMetaData()
)
await session_manager.remove(session.meta.identifier)
assert session_manager._active.active_id is active_session.meta.identifier
async def test_remove_unknown_session(session_manager):
assert await session_manager.remove(create_identifier()) is None
def test_get_by_id_not_found(session_manager):
assert session_manager.get_by_id(create_identifier()) is None
# fixme(mm, 2022-01-14): This looks like a flaky test
# because the session_manager.add() tasks will run and return
# in a nondeterministic order.
# @pytest.mark.xfail(strict=False)
async def test_get_by_type(session_manager):
tip_length_session = await session_manager.add(
SessionType.tip_length_calibration, SessionMetaData()
)
deck_cal_session = await session_manager.add(
SessionType.deck_calibration, SessionMetaData()
)
assert session_manager.get(SessionType.tip_length_calibration) == (
tip_length_session,
)
assert session_manager.get(SessionType.deck_calibration) == (deck_cal_session,)
assert session_manager.get() == (tip_length_session, deck_cal_session)
assert session_manager.get(SessionType.calibration_check) == tuple()
async def test_get_active(session_manager, session):
session_manager._active.active_id = session.meta.identifier
assert session_manager.get_active() is session
async def test_is_active(session_manager, session):
session_manager._active.active_id = session.meta.identifier
assert session_manager.is_active(session.meta.identifier) is True
def test_is_active_not_active(session_manager):
assert session_manager.is_active(create_identifier()) is False
async def test_activate(session_manager, session):
assert session_manager.activate(session.meta.identifier) is session
assert session_manager._active.active_id == session.meta.identifier
def test_activate_unknown_session(session_manager):
assert session_manager.activate(create_identifier()) is None
assert session_manager._active.active_id is None
async def test_deactivate(session_manager, session):
session_manager._active.active_id = session.meta.identifier
assert session_manager.deactivate(session.meta.identifier) is session
assert session_manager._active.active_id is None
async def test_deactivate_unknown_session(session_manager, session):
session_manager._active.active_id = session.meta.identifier
assert session_manager.deactivate(create_identifier()) is None
assert session_manager._active.active_id is session.meta.identifier
def test_deactivate_non_active(session_manager):
session_manager._active.active_id = None
assert session_manager.deactivate(create_identifier()) is None |
1,963 | blue button pressed long | from enigma import eServiceReference
from Components.ActionMap import HelpableActionMap
from Screens.EpgSelectionChannel import EPGSelectionChannel
from Screens.EpgSelectionBase import EPGServiceZap
from Screens.InfoBar import InfoBar
from Screens.TimerEntry import addTimerFromEventSilent
from Tools.Directories import isPluginInstalled
# Keep for backwards compatibility with plugins, including the parameter naming.
# This class assumes that EPGSelection is only used in the SingleEPG sense.
class EPGSelection(EPGSelectionChannel, EPGServiceZap):
def __init__(self, session, service=None, zapFunc=None, eventid=None, bouquetChangeCB=None, serviceChangeCB=None, EPGtype="similar", StartBouquet=None, StartRef=None, bouquets=None):
if service is not None and not isinstance(service, eServiceReference):
service = eServiceReference(service)
if EPGtype not in ("similar", "single"):
print("[EPGSelection] Warning: EPGSelection does not support type '%s'" % EPGtype)
print(" Attempting to continue in single EPG mode")
EPGSelectionChannel.__init__(self, session, service)
EPGServiceZap.__init__(self, zapFunc or InfoBar.instance.zapToService)
# Rewrite the EPG actions to invoke the compatibility functions.
helpDescription = _("EPG Commands")
self["epgactions"] = HelpableActionMap(self, "EPGSelectActions", {
"info": (self.Info, _("Show detailed event info")),
"epg": (self.epgButtonPressed, _("Show detailed event info")),
"menu": (self.createSetup, _("Setup menu"))
}, prio=-1, description=helpDescription)
self["colouractions"] = HelpableActionMap(self, "ColorActions", {
"red": (self.redButtonPressed, self.redButtonDescription()),
"redlong": (self.redButtonPressedLong, _("Sort EPG list")),
"green": (self.greenButtonPressed, _("Add/Remove timer for current event")),
"greenlong": (self.greenButtonPressedLong, _("Show timer list")),
"yellow": (self.yellowButtonPressed, _("Search for similar events")),
"blue": (self.blueButtonPressed, _("Add an autotimer for current event")),
"bluelong": (self.METHOD_NAME, _("Show autotimer list"))
}, prio=-1, description=helpDescription)
# EPGSearch bypasses base class initialisation
# try to limit the scope of its quirkyness by providing a limited
# initialisation path
def EPGSearch_init(self, session):
EPGServiceZap.__init__(self, InfoBar.instance.zapToService)
# Backwards compatibility properties for plugins.
@property
def ChoiceBoxDialog(self):
return self.choiceBoxDialog
@ChoiceBoxDialog.setter
def ChoiceBoxDialog(self, value):
self.choiceBoxDialog = value
# Backwards compatibility functions for plugins.
# Button names.
def redButtonPressed(self):
if isPluginInstalled("tmdb"):
self.openTMDb()
elif isPluginInstalled("IMDb"):
self.openIMDb()
def redButtonText(self):
if isPluginInstalled("tmdb"):
return _("TMDb search")
elif isPluginInstalled("IMDb"):
return _("IMDb search")
return ""
def redButtonDescription(self):
if isPluginInstalled("tmdb"):
return _("TMDb search for highlighted event")
elif isPluginInstalled("IMDb"):
return _("IMDb search for highlighted event")
return ""
def redButtonPressedLong(self):
self.sortEpg()
def greenButtonPressed(self):
self.addEditTimer()
def greenButtonPressedLong(self):
self.showTimerList()
def yellowButtonPressed(self):
self.openEPGSearch()
def blueButtonPressed(self):
self.addAutoTimer()
def METHOD_NAME(self):
self.showAutoTimerList()
def Info(self):
self.infoKeyPressed()
def InfoLong(self):
self.OpenSingleEPG()
def infoKeyPressed(self):
self.openEventView()
def eventSelected(self): # used by EPG Search plugin
self.openEventView()
def epgButtonPressed(self):
self.openEventView()
# Actions
def showTimerList(self):
self.openTimerList()
def showAutoTimerList(self):
self.openAutoTimerList()
def OpenSingleEPG(self):
self.openSingleEPG()
def sortEpg(self):
self.sortEPG(self)
def timerAdd(self):
self.addEditTimerMenu()
def doRecordTimer(self):
self.doInstantTimer(0)
def doZapTimer(self):
self.doInstantTimer(1)
def RecordTimerQuestion(self, manual=False):
if manual:
self.addEditTimer()
else:
self.addEditTimerMenu()
def doInstantTimer(self, zap=0):
event, service = self["list"].getCurrent()[:2]
addTimerFromEventSilent(self.session, self.refreshTimerActionButton, event, service, zap)
# Things that need to be able to be overridden.
def refreshList(self):
try:
# Allow plugins to override using the old all lowercase method name.
self.refreshlist()
except AttributeError:
EPGSelectionChannel.refreshList(self) |
1,964 | is workspace file | import logging
import os
import pathlib
import typing as t
from colorama import Fore
from tmuxp.cli.utils import tmuxp_echo
from tmuxp.types import StrPath
from tmuxp.workspace.constants import VALID_WORKSPACE_DIR_FILE_EXTENSIONS
logger = logging.getLogger(__name__)
if t.TYPE_CHECKING:
from typing_extensions import Literal, TypeAlias
ValidExtensions: TypeAlias = Literal[".yml", ".yaml", ".json"]
def METHOD_NAME(
filename: str,
extensions: t.Union["ValidExtensions", t.List["ValidExtensions"], None] = None,
) -> bool:
"""
Return True if file has a valid workspace file type.
Parameters
----------
filename : str
filename to check (e.g. ``mysession.json``).
extensions : str or list
filetypes to check (e.g. ``['.yaml', '.json']``).
Returns
-------
bool
"""
if extensions is None:
extensions = [".yml", ".yaml", ".json"]
extensions = [extensions] if isinstance(extensions, str) else extensions
return any(filename.endswith(e) for e in extensions)
def in_dir(
workspace_dir: t.Union[pathlib.Path, str, None] = None,
extensions: t.Optional[t.List["ValidExtensions"]] = None,
) -> t.List[str]:
"""
Return a list of workspace_files in ``workspace_dir``.
Parameters
----------
workspace_dir : str
directory to search
extensions : list
filetypes to check (e.g. ``['.yaml', '.json']``).
Returns
-------
list
"""
if workspace_dir is None:
workspace_dir = os.path.expanduser("~/.tmuxp")
if extensions is None:
extensions = [".yml", ".yaml", ".json"]
workspace_files = [
filename
for filename in os.listdir(workspace_dir)
if METHOD_NAME(filename, extensions) and not filename.startswith(".")
]
return workspace_files
def in_cwd() -> t.List[str]:
"""
Return list of workspace_files in current working directory.
If filename is ``.tmuxp.py``, ``.tmuxp.json``, ``.tmuxp.yaml``.
Returns
-------
list
workspace_files in current working directory
Examples
--------
>>> sorted(in_cwd())
['.tmuxp.json', '.tmuxp.yaml']
"""
workspace_files = [
filename
for filename in os.listdir(os.getcwd())
if filename.startswith(".tmuxp") and METHOD_NAME(filename)
]
return workspace_files
def get_workspace_dir() -> str:
"""
Return tmuxp workspace directory.
``TMUXP_CONFIGDIR`` environmental variable has precedence if set. We also
evaluate XDG default directory from XDG_CONFIG_HOME environmental variable
if set or its default. Then the old default ~/.tmuxp is returned for
compatibility.
Returns
-------
str :
absolute path to tmuxp config directory
"""
paths = []
if "TMUXP_CONFIGDIR" in os.environ:
paths.append(os.environ["TMUXP_CONFIGDIR"])
if "XDG_CONFIG_HOME" in os.environ:
paths.append(os.path.join(os.environ["XDG_CONFIG_HOME"], "tmuxp"))
else:
paths.append("~/.config/tmuxp/")
paths.append("~/.tmuxp")
for path in paths:
path = os.path.expanduser(path)
if os.path.isdir(path):
return path
# Return last path as default if none of the previous ones matched
return path
def find_workspace_file(
workspace_file: StrPath,
workspace_dir: t.Optional[StrPath] = None,
) -> str:
"""
Return the real config path or raise an exception.
If workspace file is directory, scan for .tmuxp.{yaml,yml,json} in directory. If
one or more found, it will warn and pick the first.
If workspace file is ".", "./" or None, it will scan current directory.
If workspace file is has no path and only a filename, e.g. "my_workspace.yaml" it
will search workspace dir.
If workspace file has no path and no extension, e.g. "my_workspace", it will scan
for file name with yaml, yml and json. If multiple exist, it will warn and pick the
first.
Parameters
----------
workspace_file : str
workspace file, valid examples:
- a file name, my_workspace.yaml
- relative path, ../my_workspace.yaml or ../project
- a period, .
"""
if not workspace_dir:
workspace_dir = get_workspace_dir()
path = os.path
exists, join, isabs = path.exists, path.join, path.isabs
dirname, normpath, splitext = path.dirname, path.normpath, path.splitext
cwd = os.getcwd()
is_name = False
file_error = None
workspace_file = os.path.expanduser(workspace_file)
# if purename, resolve to confg dir
if is_pure_name(workspace_file):
is_name = True
elif (
not isabs(workspace_file)
or len(dirname(workspace_file)) > 1
or workspace_file == "."
or workspace_file == ""
or workspace_file == "./"
): # if relative, fill in full path
workspace_file = normpath(join(cwd, workspace_file))
# no extension, scan
if path.isdir(workspace_file) or not splitext(workspace_file)[1]:
if is_name:
candidates = [
x
for x in [
f"{join(workspace_dir, workspace_file)}{ext}"
for ext in VALID_WORKSPACE_DIR_FILE_EXTENSIONS
]
if exists(x)
]
if not len(candidates):
file_error = (
"workspace-file not found in workspace dir (yaml/yml/json) %s "
"for name" % (workspace_dir)
)
else:
candidates = [
x
for x in [
join(workspace_file, ext)
for ext in [".tmuxp.yaml", ".tmuxp.yml", ".tmuxp.json"]
]
if exists(x)
]
if len(candidates) > 1:
tmuxp_echo(
Fore.RED
+ "Multiple .tmuxp.{yml,yaml,json} workspace_files in %s"
% dirname(workspace_file)
+ Fore.RESET
)
tmuxp_echo(
"This is undefined behavior, use only one. "
"Use file names e.g. myproject.json, coolproject.yaml. "
"You can load them by filename."
)
elif not len(candidates):
file_error = "No tmuxp files found in directory"
if len(candidates):
workspace_file = candidates[0]
elif not exists(workspace_file):
file_error = "file not found"
if file_error:
raise FileNotFoundError(file_error, workspace_file)
return workspace_file
def is_pure_name(path: str) -> bool:
"""
Return True if path is a name and not a file path.
Parameters
----------
path : str
Path (can be absolute, relative, etc.)
Returns
-------
bool
True if path is a name of workspace in workspace dir, not file path.
"""
return (
not os.path.isabs(path)
and len(os.path.dirname(path)) == 0
and not os.path.splitext(path)[1]
and path != "."
and path != ""
) |
1,965 | test address | """
:codeauthor: Rahul Handay <rahulha@saltstack.com>
"""
import pytest
import salt.modules.bluez_bluetooth as bluez
import salt.utils.validate.net
from salt.exceptions import CommandExecutionError
from tests.support.mock import MagicMock, patch
class MockBluetooth:
"""
Mock class for bluetooth
"""
def __init__(self):
pass
@staticmethod
def discover_devices(lookup_names):
"""
Mock method to return all Discoverable devices
"""
return [["a", "b", "c"], ["d", "e", "f"]]
@pytest.fixture
def configure_loader_modules():
return {bluez: {"bluetooth": MockBluetooth()}}
def test_version():
"""
Test if return bluetooth version
"""
mock = MagicMock(return_value="5.7")
with patch.dict(bluez.__salt__, {"cmd.run": mock}):
assert bluez.version() == {
"PyBluez": "<= 0.18 (Unknown, but installed)",
"Bluez": "5.7",
}
def METHOD_NAME():
"""
Test of getting address of bluetooth adapter
"""
mock = MagicMock(return_value="hci : hci0")
with patch.dict(bluez.__salt__, {"cmd.run": mock}):
assert bluez.address_() == {
"hci ": {"device": "hci ", "path": "/sys/class/bluetooth/hci "}
}
def test_power():
"""
Test of getting address of bluetooth adapter
"""
mock = MagicMock(return_value={})
with patch.object(bluez, "address_", mock):
pytest.raises(CommandExecutionError, bluez.power, "hci0", "on")
mock = MagicMock(return_value={"hci0": {"device": "hci0", "power": "on"}})
with patch.object(bluez, "address_", mock):
mock = MagicMock(return_value="")
with patch.dict(bluez.__salt__, {"cmd.run": mock}):
assert bluez.power("hci0", "on")
mock = MagicMock(return_value={"hci0": {"device": "hci0", "power": "on"}})
with patch.object(bluez, "address_", mock):
mock = MagicMock(return_value="")
with patch.dict(bluez.__salt__, {"cmd.run": mock}):
assert not bluez.power("hci0", "off")
def test_discoverable():
"""
Test of enabling bluetooth device
"""
mock = MagicMock(
side_effect=[
{},
{"hci0": {"device": "hci0", "power": "on"}},
{"hci0": {"device": "hci0", "power": "on"}},
]
)
with patch.object(bluez, "address_", mock):
pytest.raises(CommandExecutionError, bluez.discoverable, "hci0")
mock = MagicMock(return_value="UP RUNNING ISCAN")
with patch.dict(bluez.__salt__, {"cmd.run": mock}):
assert bluez.discoverable("hci0")
mock = MagicMock(return_value="")
with patch.dict(bluez.__salt__, {"cmd.run": mock}):
assert not bluez.discoverable("hci0")
def test_noscan():
"""
Test of turning off of scanning modes
"""
mock = MagicMock(
side_effect=[
{},
{"hci0": {"device": "hci0", "power": "on"}},
{"hci0": {"device": "hci0", "power": "on"}},
]
)
with patch.object(bluez, "address_", mock):
pytest.raises(CommandExecutionError, bluez.noscan, "hci0")
mock = MagicMock(return_value="SCAN")
with patch.dict(bluez.__salt__, {"cmd.run": mock}):
assert not bluez.noscan("hci0")
mock = MagicMock(return_value="")
with patch.dict(bluez.__salt__, {"cmd.run": mock}):
assert bluez.noscan("hci0")
def test_scan():
"""
Test of scanning of bluetooth devices
"""
assert bluez.scan() == [{"a": "b"}, {"d": "e"}]
def test_block():
"""
Test of blocking specific bluetooth device
"""
mock = MagicMock(side_effect=[False, True])
with patch.object(salt.utils.validate.net, "mac", mock):
pytest.raises(CommandExecutionError, bluez.block, "DE:AD:BE:EF:CA:ZE")
mock = MagicMock(return_value="")
with patch.dict(bluez.__salt__, {"cmd.run": mock}):
assert bluez.block("DE:AD:BE:EF:CA:FE") is None
def test_unblock():
"""
Test to unblock specific bluetooth device
"""
mock = MagicMock(side_effect=[False, True])
with patch.object(salt.utils.validate.net, "mac", mock):
pytest.raises(CommandExecutionError, bluez.block, "DE:AD:BE:EF:CA:ZE")
mock = MagicMock(return_value="")
with patch.dict(bluez.__salt__, {"cmd.run": mock}):
assert bluez.unblock("DE:AD:BE:EF:CA:FE") is None
def test_pair():
"""
Test to pair bluetooth adapter with a device
"""
mock = MagicMock(side_effect=[False, True, True])
with patch.object(salt.utils.validate.net, "mac", mock):
pytest.raises(CommandExecutionError, bluez.pair, "DE:AD:BE:EF:CA:FE", "1234")
pytest.raises(CommandExecutionError, bluez.pair, "DE:AD:BE:EF:CA:FE", "abcd")
mock = MagicMock(return_value={"device": "hci0"})
with patch.object(bluez, "address_", mock):
mock = MagicMock(return_value="Ok")
with patch.dict(bluez.__salt__, {"cmd.run": mock}):
assert bluez.pair("DE:AD:BE:EF:CA:FE", "1234") == ["Ok"]
def test_unpair():
"""
Test to unpair bluetooth adaptor with a device
"""
mock = MagicMock(side_effect=[False, True])
with patch.object(salt.utils.validate.net, "mac", mock):
pytest.raises(CommandExecutionError, bluez.unpair, "DE:AD:BE:EF:CA:FE")
mock = MagicMock(return_value="Ok")
with patch.dict(bluez.__salt__, {"cmd.run": mock}):
assert bluez.unpair("DE:AD:BE:EF:CA:FE") == ["Ok"]
def test_start():
"""
Test to start bluetooth service
"""
mock = MagicMock(return_value="Ok")
with patch.dict(bluez.__salt__, {"service.start": mock}):
assert bluez.start() == "Ok"
def test_stop():
"""
Test to stop bluetooth service
"""
mock = MagicMock(return_value="Ok")
with patch.dict(bluez.__salt__, {"service.stop": mock}):
assert bluez.stop() == "Ok" |
1,966 | update single job | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=unused-argument
"""A module for the job watcher"""
from IPython.core.magic import line_magic, Magics, magics_class
from qiskit.tools.events.pubsub import Subscriber
from qiskit.utils import optionals as _optionals
from .job_widgets import build_job_viewer, make_clear_button, make_labels, create_job_widget
from .watcher_monitor import _job_monitor
@_optionals.HAS_IBMQ.require_in_instance
class JobWatcher(Subscriber):
"""An IBM Q job watcher."""
def __init__(self):
super().__init__()
self.jobs = []
self._init_subscriber()
self.job_viewer = None
self._clear_button = make_clear_button(self)
self._labels = make_labels()
self.refresh_viewer()
def refresh_viewer(self):
"""Refreshes the job viewer."""
if self.job_viewer is not None:
self.job_viewer.children[0].children = [self._clear_button, self._labels] + list(
reversed(self.jobs)
)
def stop_viewer(self):
"""Stops the job viewer."""
if self.job_viewer:
self.job_viewer.close()
self.job_viewer = None
def start_viewer(self):
"""Starts the job viewer"""
self.job_viewer = build_job_viewer()
self.refresh_viewer()
def METHOD_NAME(self, update_info):
"""Update a single job instance
Args:
update_info (tuple): Updated job info.
"""
job_id = update_info[0]
found_job = False
ind = None
for idx, job in enumerate(self.jobs):
if job.job_id == job_id:
found_job = True
ind = idx
break
if found_job:
job_wid = self.jobs[ind]
# update status
if update_info[1] == "DONE":
stat = f"<font style='color:#34BC6E'>{update_info[1]}</font>"
elif update_info[1] == "ERROR":
stat = f"<font style='color:#DC267F'>{update_info[1]}</font>"
elif update_info[1] == "CANCELLED":
stat = f"<font style='color:#FFB000'>{update_info[1]}</font>"
else:
stat = update_info[1]
job_wid.children[3].value = stat
# update queue
if update_info[2] == 0:
queue = "-"
else:
queue = str(update_info[2])
job_wid.children[4].value = queue
# update msg
job_wid.children[5].value = update_info[3]
def cancel_job(self, job_id):
"""Cancels a job in the watcher
Args:
job_id (str): Job id to remove.
Raises:
Exception: Job id not found.
"""
from qiskit.providers.ibmq.job.exceptions import ( # pylint: disable=no-name-in-module
IBMQJobApiError,
)
do_pop = False
ind = None
for idx, job in enumerate(self.jobs):
if job.job_id == job_id:
do_pop = True
ind = idx
break
if not do_pop:
raise Exception("job_id not found")
if "CANCELLED" not in self.jobs[ind].children[3].value:
try:
self.jobs[ind].job.cancel()
status = self.jobs[ind].job.status()
except IBMQJobApiError:
pass
else:
self.METHOD_NAME((self.jobs[ind].job_id, status.name, 0, status.value))
def clear_done(self):
"""Clears the done jobs from the list."""
_temp_jobs = []
do_refresh = False
for job in self.jobs:
job_str = job.children[3].value
if not (("DONE" in job_str) or ("CANCELLED" in job_str) or ("ERROR" in job_str)):
_temp_jobs.append(job)
else:
job.close()
do_refresh = True
if do_refresh:
self.jobs = _temp_jobs
self.refresh_viewer()
def _init_subscriber(self):
def _add_job(job):
status = job.status()
job_widget = create_job_widget(
self, job, job.backend(), status.name, job.queue_position(), status.value
)
self.jobs.append(job_widget)
self.refresh_viewer()
_job_monitor(job, status, self)
self.subscribe("ibmq.job.start", _add_job)
@magics_class
class JobWatcherMagic(Magics):
"""A class for enabling/disabling the job watcher."""
@line_magic
def qiskit_job_watcher(self, line="", cell=None):
"""A Jupyter magic function to enable job watcher."""
_JOB_WATCHER.stop_viewer()
_JOB_WATCHER.start_viewer()
@line_magic
def qiskit_disable_job_watcher(self, line="", cell=None):
"""A Jupyter magic function to disable job watcher."""
_JOB_WATCHER.stop_viewer()
if _optionals.HAS_IBMQ:
# The Jupyter job watcher instance
_JOB_WATCHER = JobWatcher() |
1,967 | test one upcast one function | import os
import common_tests
class TestRenameSoundDynamic(common_tests.CommonTests):
@classmethod
def get_test_driver(cls) -> common_tests.CommonTestDriver:
return common_tests.CommonTestDriver()
def write_load_config(self):
with open(os.path.join(self.test_driver.repo_dir, ".hhconfig"), "w") as f:
f.write(
"""
enable_sound_dynamic_type = true
"""
)
def write_and_test_one_file(
self, file_input, command_type, element_name, upcast_locations, using_sd=True
) -> None:
with open(os.path.join(self.test_driver.repo_dir, "a.php"), "w") as f:
f.write(file_input)
if using_sd:
self.write_load_config()
self.test_driver.start_hh_server(changed_files=["a.php"], args=["--no-load"])
self.check_upcast_cmd(command_type, element_name, upcast_locations, using_sd)
def check_upcast_cmd(
self, command_type, element_name, upcast_locations, using_sd=True
):
if using_sd:
expected_output = [
"Server is using sound dynamic. ",
f"Number of upcast positions for \\{element_name} is {len(upcast_locations)}",
]
expected_output.extend(upcast_locations)
else:
expected_output = [
"Server is NOT using sound dynamic. Change the .hhconfig file to enable sound dynamic. ",
]
self.test_driver.check_cmd(
expected_output=expected_output,
options=["--refactor-sound-dynamic", command_type, element_name],
)
def test_no_sd(self) -> None:
self.write_and_test_one_file(
"""<?hh
<<file:__EnableUnstableFeatures('upcast_expression')>>
<<__SupportDynamicType>>
function h(shape(...) $m): int {
$x = h<> upcast dynamic;
}
""",
"Function",
"h",
[],
using_sd=False,
)
def METHOD_NAME(self) -> None:
self.write_and_test_one_file(
"""<?hh
<<file:__EnableUnstableFeatures('upcast_expression')>>
<<__SupportDynamicType>>
function h(shape(...) $m): int {
$x = h<> upcast dynamic;
}
""",
"Function",
"h",
[
f'File "{self.test_driver.repo_dir}/a.php", line 5, characters 20-37:',
],
)
def test_one_upcast_one_class(self) -> None:
self.write_and_test_one_file(
"""
<?hh
<<file:__EnableUnstableFeatures('upcast_expression')>>
<<__SupportDynamicType>>
class Counter {
private int $i = 0;
}
function c(dynamic $d): void {
$g = Counter::class upcast dynamic;
$h = new Counter() upcast dynamic;
}
""",
"Class",
"Counter",
[
f'File "{self.test_driver.repo_dir}/a.php", line 10, characters 8-36:',
f'File "{self.test_driver.repo_dir}/a.php", line 11, characters 8-35:',
],
)
def test_one_upcast_multiple_function(self) -> None:
self.write_and_test_one_file(
"""<?hh
<<file:__EnableUnstableFeatures('upcast_expression')>>
<<__SupportDynamicType>>
function h(shape(...) $m): int {
$x = h<> upcast dynamic;
}
<<__SupportDynamicType>>
function g(int $m): int {
$x = g<> upcast dynamic;
}
""",
"Function",
"h",
[
f'File "{self.test_driver.repo_dir}/a.php", line 5, characters 20-37:',
],
)
def test_multiple_files(self) -> None:
with open(os.path.join(self.test_driver.repo_dir, "a.php"), "w") as f:
f.write(
"""<?hh
<<file:__EnableUnstableFeatures('upcast_expression')>>
<<__SupportDynamicType>>
function h(shape(...) $m): int {
$x = h<> upcast dynamic;
}
<<__SupportDynamicType>>
function g(int $m): int {
$x = g<> upcast dynamic;
}
"""
)
with open(os.path.join(self.test_driver.repo_dir, "b.php"), "w") as f:
f.write(
"""<?hh
<<file:__EnableUnstableFeatures('upcast_expression')>>
<<__SupportDynamicType>>
function b(dynamic $d): void {
$y = h<> upcast dynamic;
$d(3);
}
"""
)
with open(os.path.join(self.test_driver.repo_dir, "c.php"), "w") as f:
f.write(
"""<?hh
<<file:__EnableUnstableFeatures('upcast_expression')>>
<<__SupportDynamicType>>
function c(int $i): void {
$z = h<> upcast dynamic;
}
"""
)
self.write_load_config()
self.test_driver.start_hh_server(
changed_files=["a.php", "b.php", "c.php"], args=["--no-load"]
)
self.check_upcast_cmd(
"Function",
"h",
[
f'File "{self.test_driver.repo_dir}/a.php", line 5, characters 20-37:',
f'File "{self.test_driver.repo_dir}/b.php", line 5, characters 24-41:',
f'File "{self.test_driver.repo_dir}/c.php", line 5, characters 24-41:',
],
) |
1,968 | test get item with slice | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021-present davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import mock
import pytest
from hikari import audit_logs
from hikari import channels
from hikari import snowflakes
@pytest.mark.asyncio()
class TestMessagePinEntryInfo:
async def test_fetch_channel(self):
app = mock.AsyncMock()
app.rest.fetch_channel.return_value = mock.Mock(spec_set=channels.GuildTextChannel)
model = audit_logs.MessagePinEntryInfo(app=app, channel_id=123, message_id=456)
assert await model.fetch_channel() is model.app.rest.fetch_channel.return_value
model.app.rest.fetch_channel.assert_awaited_once_with(123)
async def test_fetch_message(self):
model = audit_logs.MessagePinEntryInfo(app=mock.AsyncMock(), channel_id=123, message_id=456)
assert await model.fetch_message() is model.app.rest.fetch_message.return_value
model.app.rest.fetch_message.assert_awaited_once_with(123, 456)
@pytest.mark.asyncio()
class TestMessageDeleteEntryInfo:
async def test_fetch_channel(self):
app = mock.AsyncMock()
app.rest.fetch_channel.return_value = mock.Mock(spec_set=channels.GuildTextChannel)
model = audit_logs.MessageDeleteEntryInfo(app=app, count=1, channel_id=123)
assert await model.fetch_channel() is model.app.rest.fetch_channel.return_value
model.app.rest.fetch_channel.assert_awaited_once_with(123)
@pytest.mark.asyncio()
class TestMemberMoveEntryInfo:
async def test_fetch_channel(self):
app = mock.AsyncMock()
app.rest.fetch_channel.return_value = mock.Mock(spec_set=channels.GuildVoiceChannel)
model = audit_logs.MemberMoveEntryInfo(app=app, count=1, channel_id=123)
assert await model.fetch_channel() is model.app.rest.fetch_channel.return_value
model.app.rest.fetch_channel.assert_awaited_once_with(123)
class TestAuditLogEntry:
@pytest.mark.asyncio()
async def test_fetch_user_when_no_user(self):
model = audit_logs.AuditLogEntry(
app=mock.AsyncMock(),
id=123,
target_id=None,
changes=[],
user_id=None,
action_type=0,
options=None,
reason=None,
guild_id=snowflakes.Snowflake(34123123),
)
assert await model.fetch_user() is None
model.app.rest.fetch_user.assert_not_called()
@pytest.mark.asyncio()
async def test_fetch_user_when_user(self):
model = audit_logs.AuditLogEntry(
app=mock.AsyncMock(),
id=123,
target_id=None,
changes=[],
user_id=456,
action_type=0,
options=None,
reason=None,
guild_id=snowflakes.Snowflake(123321123),
)
assert await model.fetch_user() is model.app.rest.fetch_user.return_value
model.app.rest.fetch_user.assert_awaited_once_with(456)
class TestAuditLog:
def test_iter(self):
entry_1 = object()
entry_2 = object()
entry_3 = object()
audit_log = audit_logs.AuditLog(
entries={
snowflakes.Snowflake(432123): entry_1,
snowflakes.Snowflake(432654): entry_2,
snowflakes.Snowflake(432888): entry_3,
},
integrations={},
users={},
threads={},
webhooks={},
)
assert list(audit_log) == [entry_1, entry_2, entry_3]
def test_get_item_with_index(self):
entry = object()
entry_2 = object()
audit_log = audit_logs.AuditLog(
entries={
snowflakes.Snowflake(432123): object(),
snowflakes.Snowflake(432654): entry,
snowflakes.Snowflake(432888): object(),
snowflakes.Snowflake(677777): object(),
snowflakes.Snowflake(999999): entry_2,
},
integrations={},
threads={},
users={},
webhooks={},
)
assert audit_log[1] is entry
assert audit_log[4] is entry_2
def METHOD_NAME(self):
entry_1 = object()
entry_2 = object()
audit_log = audit_logs.AuditLog(
entries={
snowflakes.Snowflake(432123): object(),
snowflakes.Snowflake(432654): entry_1,
snowflakes.Snowflake(432888): object(),
snowflakes.Snowflake(666666): entry_2,
snowflakes.Snowflake(783452): object(),
},
integrations={},
threads={},
users={},
webhooks={},
)
assert audit_log[1:5:2] == (entry_1, entry_2)
def test_len(self):
audit_log = audit_logs.AuditLog(
entries={
snowflakes.Snowflake(432123): object(),
snowflakes.Snowflake(432654): object(),
snowflakes.Snowflake(432888): object(),
snowflakes.Snowflake(783452): object(),
},
integrations={},
threads={},
users={},
webhooks={},
)
assert len(audit_log) == 4 |
1,969 | test fields step submit updates submission with | from datetime import date
from django.conf import settings
from django.test import TestCase
from django.urls import reverse
from geocity.apps.submissions import models as submissions_models
from geocity.tests import factories
from geocity.tests.utils import LoggedInUserMixin, get_parser
class SubmissionUpdateTestCase(LoggedInUserMixin, TestCase):
def setUp(self):
super().setUp()
self.submission = factories.SubmissionFactory(author=self.user)
factories.SelectedFormFactory.create_batch(3, submission=self.submission)
self.submission.administrative_entity.forms.set(self.submission.forms.all())
def test_form_step_submit_updates_submission(self):
new_form = factories.FormFactory()
self.submission.administrative_entity.forms.add(new_form)
current_forms = list(self.submission.forms.all())
self.client.post(
(
reverse(
"submissions:submission_select_forms",
kwargs={"submission_id": self.submission.pk},
)
),
data={
"forms-selected_forms": [form.pk for form in current_forms + [new_form]]
},
)
self.submission.refresh_from_db()
self.assertEqual(submissions_models.Submission.objects.count(), 1)
self.assertEqual(
set(self.submission.forms.all()),
set(current_forms + [new_form]),
)
def test_fields_step_submit_updates_submission(self):
new_field = factories.FieldFactory()
new_field.forms.set(self.submission.forms.all())
data = {
"fields-{}_{}".format(form.pk, new_field.pk): "value-{}".format(form.pk)
for form in self.submission.forms.all()
}
self.client.post(
reverse(
"submissions:submission_fields",
kwargs={"submission_id": self.submission.pk},
),
data=data,
)
self.assertEqual(
set(
item["val"]
for item in self.submission.get_fields_values().values_list(
"value", flat=True
)
),
set(data.values()),
)
def test_missing_mandatory_address_field_gives_invalid_feedback(self):
submission = factories.SubmissionFactory(author=self.user)
factories.SelectedFormFactory(submission=submission)
submission.administrative_entity.forms.set(submission.forms.all())
field = factories.FieldFactoryTypeAddress(
input_type=submissions_models.Field.INPUT_TYPE_ADDRESS, is_mandatory=True
)
field.forms.set(submission.forms.all())
data = {
"fields-{}_{}".format(form.pk, field.pk): ""
for form in submission.forms.all()
}
response = self.client.post(
reverse(
"submissions:submission_fields",
kwargs={"submission_id": submission.pk},
),
data=data,
)
parser = get_parser(response.content)
self.assertEqual(1, len(parser.select(".invalid-feedback")))
def test_fields_step_submit_updates_submission_with_address(self):
address_field = factories.FieldFactoryTypeAddress(
input_type=submissions_models.Field.INPUT_TYPE_ADDRESS
)
address_field.forms.set(self.submission.forms.all())
form = self.submission.forms.first()
data = {f"fields-{form.pk}_{address_field.pk}": "Hôtel Martinez, Cannes"}
self.client.post(
reverse(
"submissions:submission_fields",
kwargs={"submission_id": self.submission.pk},
),
data=data,
)
self.submission.refresh_from_db()
field_val = self.submission.get_fields_values().get(
field__input_type=submissions_models.Field.INPUT_TYPE_ADDRESS
)
self.assertEqual(field_val.value, {"val": "Hôtel Martinez, Cannes"})
def test_fields_step_submit_updates_geotime_with_address_store_geometry_for_address_field(
self,
):
address_field = factories.FieldFactoryTypeAddress(
input_type=submissions_models.Field.INPUT_TYPE_ADDRESS,
store_geometry_for_address_field=True,
)
address_field.forms.set(self.submission.forms.all())
form = self.submission.forms.first()
data = {
f"fields-{form.pk}_{address_field.pk}": "Place pestalozzi 2, 1400 Yverdon-les-Bains"
}
self.client.post(
reverse(
"submissions:submission_fields",
kwargs={"submission_id": self.submission.pk},
),
data=data,
)
self.submission.refresh_from_db()
field_val = self.submission.get_fields_values().get(
field__input_type=submissions_models.Field.INPUT_TYPE_ADDRESS
)
self.assertEqual(
field_val.value, {"val": "Place pestalozzi 2, 1400 Yverdon-les-Bains"}
)
geocoded_geotime_row = submissions_models.SubmissionGeoTime.objects.filter(
submission=self.submission, comes_from_automatic_geocoding=True
).count()
self.assertEqual(1, geocoded_geotime_row)
def METHOD_NAME(self):
date_field = factories.FieldFactory(
input_type=submissions_models.Field.INPUT_TYPE_DATE, name="datum"
)
today = date.today()
form = self.submission.forms.first()
date_field.forms.set([form])
data = {
f"fields-{form.pk}_{date_field.pk}": today.strftime(
settings.DATE_INPUT_FORMAT
)
}
self.client.post(
reverse(
"submissions:submission_fields",
kwargs={"submission_id": self.submission.pk},
),
data=data,
)
field_val = self.submission.get_fields_values().get(field__name="datum")
self.assertEqual(
field_val.value,
{"val": today.isoformat()},
)
self.assertEqual(
field_val.field.input_type,
submissions_models.Field.INPUT_TYPE_DATE,
) |
1,970 | add cutout | import math
from pyaedt.generic.general_methods import ET
from pyaedt.generic.general_methods import pyaedt_function_handler
class Polygon(object):
def __init__(self, ipc):
self._ipc = ipc
self.is_void = False
self.poly_steps = []
self.solid_fill_id = ""
self.cutout = []
@pyaedt_function_handler()
def add_poly_step(self, polygon=None): # pragma no cover
if polygon:
polygon_data = polygon.GetPolygonData()
if polygon_data.IsClosed():
arcs = polygon_data.GetArcData()
if not arcs:
return
# begin
new_segment_tep = PolyStep()
new_segment_tep.poly_type = PolyType.Segment
new_segment_tep.x = arcs[0].Start.X.ToDouble()
new_segment_tep.y = arcs[0].Start.Y.ToDouble()
self.poly_steps.append(new_segment_tep)
for arc in arcs:
if arc.Height == 0:
new_segment_tep = PolyStep()
new_segment_tep.poly_type = PolyType.Segment
new_segment_tep.x = arc.End.X.ToDouble()
new_segment_tep.y = arc.End.Y.ToDouble()
self.poly_steps.append(new_segment_tep)
else:
arc_center = arc.GetCenter()
new_poly_step = PolyStep()
new_poly_step.poly_type = PolyType.Curve
new_poly_step.center_X = arc_center.X.ToDouble()
new_poly_step.center_y = arc_center.Y.ToDouble()
new_poly_step.x = arc.End.X.ToDouble()
new_poly_step.y = arc.End.Y.ToDouble()
new_poly_step.clock_wise = not arc.IsCCW()
self.poly_steps.append(new_poly_step)
for void in polygon.voids:
void_polygon_data = void.GetPolygonData()
if void_polygon_data.IsClosed():
void_arcs = void_polygon_data.GetArcData()
if not void_arcs:
return
void_polygon = Cutout(self._ipc)
self.cutout.append(void_polygon)
# begin
new_segment_tep = PolyStep()
new_segment_tep.poly_type = PolyType.Segment
new_segment_tep.x = void_arcs[0].Start.X.ToDouble()
new_segment_tep.y = void_arcs[0].Start.Y.ToDouble()
void_polygon.poly_steps.append(new_segment_tep)
for void_arc in void_arcs:
if void_arc.Height == 0:
new_segment_tep = PolyStep()
new_segment_tep.poly_type = PolyType.Segment
new_segment_tep.x = void_arc.End.X.ToDouble()
new_segment_tep.y = void_arc.End.Y.ToDouble()
void_polygon.poly_steps.append(new_segment_tep)
else:
arc_center = void_arc.GetCenter()
new_poly_step = PolyStep()
new_poly_step.poly_type = PolyType.Curve
new_poly_step.center_X = arc_center.X.ToDouble()
new_poly_step.center_y = arc_center.Y.ToDouble()
new_poly_step.x = void_arc.End.X.ToDouble()
new_poly_step.y = void_arc.End.Y.ToDouble()
new_poly_step.clock_wise = not void_arc.IsCCW()
void_polygon.poly_steps.append(new_poly_step)
@pyaedt_function_handler()
def METHOD_NAME(self, cutout): # pragma no cover
if not isinstance(cutout, Cutout):
return False
self.cutout.append(cutout)
@pyaedt_function_handler()
def write_xml(self, root_net): # pragma no cover
if not self.poly_steps:
return
feature = ET.SubElement(root_net, "Features")
location = ET.SubElement(feature, "Location")
location.set("x", str(0))
location.set("y", str(0))
contour = ET.SubElement(feature, "Contour")
polygon = ET.SubElement(contour, "Polygon")
polygon_begin = ET.SubElement(polygon, "PolyBegin")
polygon_begin.set("x", str(self._ipc.from_meter_to_units(self.poly_steps[0].x, self._ipc.units)))
polygon_begin.set("y", str(self._ipc.from_meter_to_units(self.poly_steps[0].y, self._ipc.units)))
for poly_step in self.poly_steps[1:]:
poly_step.write_xml(polygon, self._ipc)
for cutout in self.cutout:
cutout.write_xml(contour, self._ipc)
class Cutout(object):
def __init__(self, ipc):
self._ipc = ipc
self.poly_steps = []
@pyaedt_function_handler()
def write_xml(self, contour, ipc): # pragma no cover
cutout = ET.SubElement(contour, "Cutout")
cutout_begin = ET.SubElement(cutout, "PolyBegin")
cutout_begin.set("x", str(ipc.from_meter_to_units(self.poly_steps[0].x, ipc.units)))
cutout_begin.set("y", str(ipc.from_meter_to_units(self.poly_steps[0].y, ipc.units)))
for poly_step in self.poly_steps[1:]:
if poly_step.poly_type == 0:
poly = ET.SubElement(cutout, "PolyStepSegment")
poly.set("x", str(ipc.from_meter_to_units(poly_step.x, ipc.units)))
poly.set("y", str(ipc.from_meter_to_units(poly_step.y, ipc.units)))
elif poly_step.poly_type == 1:
poly = ET.SubElement(cutout, "PolyStepCurve")
poly.set("x", str(ipc.from_meter_to_units(poly_step.x, ipc.units)))
poly.set("y", str(ipc.from_meter_to_units(poly_step.y, ipc.units)))
poly.set("centerX", str(ipc.from_meter_to_units(poly_step.center_X, ipc.units)))
poly.set("centerY", str(ipc.from_meter_to_units(poly_step.center_y, ipc.units)))
poly.set("clockwise", str(poly_step.clock_wise).lower())
class PolyStep(object):
def __init__(self):
self.poly_type = PolyType().Segment
self.x = 0.0
self.y = 0.0
self.center_X = 0.0
self.center_y = 0.0
self.clock_wise = False
@pyaedt_function_handler()
def write_xml(self, polygon, ipc): # pragma no cover
if self.poly_type == 0:
poly = ET.SubElement(polygon, "PolyStepSegment")
poly.set("x", str(ipc.from_meter_to_units(self.x, ipc.units)))
poly.set("y", str(ipc.from_meter_to_units(self.y, ipc.units)))
elif self.poly_type == 1:
poly = ET.SubElement(polygon, "PolyStepCurve")
poly.set("x", str(ipc.from_meter_to_units(self.x, ipc.units)))
poly.set("y", str(ipc.from_meter_to_units(self.y, ipc.units)))
poly.set("centerX", str(ipc.from_meter_to_units(self.center_X, ipc.units)))
poly.set("centerY", str(ipc.from_meter_to_units(self.center_y, ipc.units)))
poly.set("clockwise", str(self.clock_wise).lower())
class PolyType(object):
(Segment, Curve) = range(0, 2)
class Curve(object):
def __init__(self):
self.center_X = 0.0
self.center_y = 0.0
self.clock_wise = False
class Arc(object):
@staticmethod
def get_arc_radius_angle(h, c): # pragma no cover
if not isinstance(h, float) and isinstance(c, float):
return False
r = h / 2 + math.pow(c, 2) / (8 * h)
theta = 2 * math.asin(c / (2 * r))
return r, theta |
1,971 | get | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import json
import frappe
from frappe import _
from frappe.model.document import Document, get_controller
from frappe.utils import cint, quoted
from frappe.website.path_resolver import resolve_path
no_cache = 1
def get_context(context, **dict_params):
"""Returns context for a list standard list page.
Will also update `get_list_context` from the doctype module file"""
frappe.local.form_dict.update(dict_params)
doctype = frappe.local.form_dict.doctype
context.parents = [{"route": "me", "title": _("My Account")}]
context.meta = frappe.get_meta(doctype)
context.update(get_list_context(context, doctype) or {})
context.doctype = doctype
context.txt = frappe.local.form_dict.txt
context.update(METHOD_NAME(**frappe.local.form_dict))
@frappe.whitelist(allow_guest=True)
def METHOD_NAME(doctype, txt=None, limit_start=0, limit=20, pathname=None, **kwargs):
"""Returns processed HTML page for a standard listing."""
limit_start = cint(limit_start)
raw_result = get_list_data(doctype, txt, limit_start, limit=limit + 1, **kwargs)
show_more = len(raw_result) > limit
if show_more:
raw_result = raw_result[:-1]
meta = frappe.get_meta(doctype)
list_context = frappe.flags.list_context
if not raw_result:
return {"result": []}
if txt:
list_context.default_subtitle = _('Filtered by "{0}"').format(txt)
result = []
row_template = list_context.row_template or "templates/includes/list/row_template.html"
list_view_fields = [df for df in meta.fields if df.in_list_view][:4]
for doc in raw_result:
doc.doctype = doctype
new_context = frappe._dict(doc=doc, meta=meta, list_view_fields=list_view_fields)
if not list_context.get_list and not isinstance(new_context.doc, Document):
new_context.doc = frappe.get_doc(doc.doctype, doc.name)
new_context.update(new_context.doc.as_dict())
if not frappe.flags.in_test:
pathname = pathname or frappe.local.request.path
new_context["pathname"] = pathname.strip("/ ")
new_context.update(list_context)
set_route(new_context)
rendered_row = frappe.render_template(row_template, new_context, is_path=True)
result.append(rendered_row)
from frappe.utils.response import json_handler
return {
"raw_result": json.dumps(raw_result, default=json_handler),
"result": result,
"show_more": show_more,
"next_start": limit_start + limit,
}
@frappe.whitelist(allow_guest=True)
def get_list_data(
doctype, txt=None, limit_start=0, fields=None, cmd=None, limit=20, web_form_name=None, **kwargs
):
"""Returns processed HTML page for a standard listing."""
limit_start = cint(limit_start)
if frappe.is_table(doctype):
frappe.throw(_("Child DocTypes are not allowed"), title=_("Invalid DocType"))
if not txt and frappe.form_dict.search:
txt = frappe.form_dict.search
del frappe.form_dict["search"]
controller = get_controller(doctype)
meta = frappe.get_meta(doctype)
filters = prepare_filters(doctype, controller, kwargs)
list_context = get_list_context(frappe._dict(), doctype, web_form_name)
list_context.title_field = getattr(controller, "website", {}).METHOD_NAME(
"page_title_field", meta.title_field or "name"
)
if list_context.filters:
filters.update(list_context.filters)
_get_list = list_context.get_list or get_list
kwargs = dict(
doctype=doctype,
txt=txt,
filters=filters,
limit_start=limit_start,
limit_page_length=limit,
order_by=list_context.order_by or "modified desc",
)
# allow guest if flag is set
if not list_context.get_list and (list_context.allow_guest or meta.allow_guest_to_view):
kwargs["ignore_permissions"] = True
raw_result = _get_list(**kwargs)
# list context to be used if called as rendered list
frappe.flags.list_context = list_context
return raw_result
def set_route(context):
"""Set link for the list item"""
if context.web_form_name:
context.route = f"{context.pathname}?name={quoted(context.doc.name)}"
elif context.doc and getattr(context.doc, "route", None):
context.route = context.doc.route
else:
context.route = "{}/{}".format(
context.pathname or quoted(context.doc.doctype), quoted(context.doc.name)
)
def prepare_filters(doctype, controller, kwargs):
for key in kwargs.keys():
try:
kwargs[key] = json.loads(kwargs[key])
except ValueError:
pass
filters = frappe._dict(kwargs)
meta = frappe.get_meta(doctype)
if hasattr(controller, "website") and controller.website.METHOD_NAME("condition_field"):
filters[controller.website["condition_field"]] = 1
if filters.pathname:
# resolve additional filters from path
resolve_path(filters.pathname)
for key, val in frappe.local.form_dict.items():
if key not in filters and key != "flags":
filters[key] = val
# filter the filters to include valid fields only
for fieldname, val in list(filters.items()):
if not meta.has_field(fieldname):
del filters[fieldname]
return filters
def get_list_context(context, doctype, web_form_name=None):
from frappe.modules import load_doctype_module
from frappe.website.doctype.web_form.web_form import get_web_form_module
list_context = context or frappe._dict()
meta = frappe.get_meta(doctype)
def update_context_from_module(module, list_context):
# call the user defined method `get_list_context`
# from the python module
if hasattr(module, "get_list_context"):
out = frappe._dict(module.get_list_context(list_context) or {})
if out:
list_context = out
return list_context
# get context from the doctype module
if not meta.custom:
# custom doctypes don't have modules
module = load_doctype_module(doctype)
list_context = update_context_from_module(module, list_context)
# get context for custom webform
if meta.custom and web_form_name:
webform_list_contexts = frappe.get_hooks("webform_list_context")
if webform_list_contexts:
out = frappe._dict(frappe.get_attr(webform_list_contexts[0])(meta.module) or {})
if out:
list_context = out
# get context from web form module
if web_form_name:
web_form = frappe.get_doc("Web Form", web_form_name)
list_context = update_context_from_module(get_web_form_module(web_form), list_context)
# get path from '/templates/' folder of the doctype
if not meta.custom and not list_context.row_template:
list_context.row_template = meta.get_row_template()
if not meta.custom and not list_context.list_template:
list_context.template = meta.get_list_template() or "www/list.html"
return list_context
def get_list(
doctype,
txt,
filters,
limit_start,
limit_page_length=20,
ignore_permissions=False,
fields=None,
order_by=None,
):
meta = frappe.get_meta(doctype)
if not filters:
filters = []
if not fields:
fields = "distinct *"
or_filters = []
if txt:
if meta.search_fields:
or_filters.extend(
[doctype, f, "like", "%" + txt + "%"]
for f in meta.get_search_fields()
if f == "name" or meta.get_field(f).fieldtype in ("Data", "Text", "Small Text", "Text Editor")
)
else:
if isinstance(filters, dict):
filters["name"] = ("like", "%" + txt + "%")
else:
filters.append([doctype, "name", "like", "%" + txt + "%"])
return frappe.get_list(
doctype,
fields=fields,
filters=filters,
or_filters=or_filters,
limit_start=limit_start,
limit_page_length=limit_page_length,
ignore_permissions=ignore_permissions,
order_by=order_by,
) |
1,972 | set up | import unittest
import os
from unittest import mock
from unittest.mock import MagicMock
import aim.sdk.utils as utils
class TestUtils(unittest.TestCase):
def METHOD_NAME(self):
self.maxDiff = None
def test_should_search_aim_repo_not_found_back_slash(self):
# arrange
path = '/'
# act
path, found = utils.search_aim_repo(path)
# assert
self.assertFalse(found)
self.assertIsNone(path)
@mock.patch('os.path.exists')
def test_should_search_aim_repo_not_found_dot_back_slash(self, mock_os_path_exists: mock.MagicMock):
# arrange
path = '/'
built_dir = os.path.dirname(__file__)
build_dir_split = built_dir.split(os.sep)
mock_os_path_exists.side_effect = [False] * len(build_dir_split)
# act
path, found = utils.search_aim_repo(path)
# assert
self.assertFalse(found)
self.assertIsNone(path)
def test_should_generate_run_hash_default_len(self):
# arrange
expected_hash_len = 24
# act
actual_hash = utils.generate_run_hash()
# assert
self.assertIsInstance(actual_hash, str)
self.assertEqual(expected_hash_len, len(actual_hash))
def test_should_generate_run_hash_twelve_char_long(self):
# arrange
hash_length = 12
expected_hash_len = 12
# act
actual_hash = utils.generate_run_hash(hash_length)
# assert
self.assertIsInstance(actual_hash, str)
self.assertEqual(expected_hash_len, len(actual_hash))
@mock.patch('uuid.uuid4')
def test_should_generate_run_hash_six_char_long(self, mock_uuid: MagicMock):
# arrange
expected_uuid_hex = '16710b81-ccab-4409-bd79-50a770b565a6'
mock_uuid.return_value.hex = expected_uuid_hex
hash_length = 6
expected_hash_len = 6
# act
actual_hash = utils.generate_run_hash(hash_length)
# assert
self.assertIsInstance(actual_hash, str)
self.assertEqual(expected_hash_len, len(actual_hash))
self.assertEqual(expected_uuid_hex[:expected_hash_len], actual_hash)
mock_uuid.assert_called_once()
def test_should_get_obj_type_name_str(self):
# arrange
obj = 'hello aim!'
expected_type = 'str'
# act
actual_type = utils.get_object_typename(obj)
# assert
self.assertEqual(expected_type, actual_type)
def test_should_get_clean_repo_path(self):
# arrange
path = '/'
expected_repo_path = os.getcwd()
# act
actual_repo_path = utils.clean_repo_path(path)
# assert
self.assertEqual(expected_repo_path, actual_repo_path)
def test_should_get_clean_repo_path_empty_str(self):
# arrange
path = ''
expected_repo_path = ''
# act
actual_repo_path = utils.clean_repo_path(path)
# assert
self.assertEqual(expected_repo_path, actual_repo_path)
def test_should_get_obj_type_name_list_unknown(self):
# arrange
obj = [None]
expected_type = 'list(unknown)'
# act
actual_type = utils.get_object_typename(obj)
# assert
self.assertEqual(expected_type, actual_type)
def test_should_get_obj_type_name_bool(self):
# arrange
obj = True
expected_type = 'int'
# act
actual_type = utils.get_object_typename(obj)
# assert
self.assertEqual(expected_type, actual_type)
def test_should_check_types_compatibility_int(self):
# arrange
data_type = 'int'
base_data_type = 'int'
# act
actual_compatibility = utils.check_types_compatibility(data_type, base_data_type, None)
# assert
self.assertTrue(actual_compatibility)
def test_should_check_types_compatibility_base_list(self):
# arrange
data_type = 'list(str)'
base_data_type = 'list'
# act
actual_compatibility = utils.check_types_compatibility(data_type, base_data_type, None)
# assert
self.assertTrue(actual_compatibility)
def test_should_check_types_compatibility_base_list_str(self):
# arrange
data_type = 'list'
base_data_type = 'list(str)'
# act
actual_compatibility = utils.check_types_compatibility(data_type, base_data_type, None)
# assert
self.assertTrue(actual_compatibility) |
1,973 | unpack list | """Implements (a subset of) Sun XDR -- eXternal Data Representation.
See: RFC 1014
"""
import struct
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
from functools import wraps
__all__ = ["Error", "Packer", "Unpacker", "ConversionError"]
# exceptions
class Error(Exception):
"""Exception class for this module. Use:
except xdrlib.Error, var:
# var has the Error instance for the exception
Public ivars:
msg -- contains the message
"""
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return repr(self.msg)
def __str__(self):
return str(self.msg)
class ConversionError(Error):
pass
def raise_conversion_error(function):
""" Wrap any raised struct.errors in a ConversionError. """
@wraps(function)
def result(self, value):
try:
return function(self, value)
except struct.error as e:
raise ConversionError(e.args[0])
return result
class Packer:
"""Pack various data representations into a buffer."""
def __init__(self):
self.reset()
def reset(self):
self.__buf = _StringIO()
def get_buffer(self):
return self.__buf.getvalue()
# backwards compatibility
get_buf = get_buffer
@raise_conversion_error
def pack_uint(self, x):
self.__buf.write(struct.pack('>L', x))
@raise_conversion_error
def pack_int(self, x):
self.__buf.write(struct.pack('>l', x))
pack_enum = pack_int
def pack_bool(self, x):
if x: self.__buf.write('\0\0\0\1')
else: self.__buf.write('\0\0\0\0')
def pack_uhyper(self, x):
try:
self.pack_uint(x>>32 & 0xffffffffL)
except (TypeError, struct.error) as e:
raise ConversionError(e.args[0])
try:
self.pack_uint(x & 0xffffffffL)
except (TypeError, struct.error) as e:
raise ConversionError(e.args[0])
pack_hyper = pack_uhyper
@raise_conversion_error
def pack_float(self, x):
self.__buf.write(struct.pack('>f', x))
@raise_conversion_error
def pack_double(self, x):
self.__buf.write(struct.pack('>d', x))
def pack_fstring(self, n, s):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
data = s[:n]
n = ((n+3)//4)*4
data = data + (n - len(data)) * '\0'
self.__buf.write(data)
pack_fopaque = pack_fstring
def pack_string(self, s):
n = len(s)
self.pack_uint(n)
self.pack_fstring(n, s)
pack_opaque = pack_string
pack_bytes = pack_string
def pack_list(self, list, pack_item):
for item in list:
self.pack_uint(1)
pack_item(item)
self.pack_uint(0)
def pack_farray(self, n, list, pack_item):
if len(list) != n:
raise ValueError, 'wrong array size'
for item in list:
pack_item(item)
def pack_array(self, list, pack_item):
n = len(list)
self.pack_uint(n)
self.pack_farray(n, list, pack_item)
class Unpacker:
"""Unpacks various data representations from the given buffer."""
def __init__(self, data):
self.reset(data)
def reset(self, data):
self.__buf = data
self.__pos = 0
def get_position(self):
return self.__pos
def set_position(self, position):
self.__pos = position
def get_buffer(self):
return self.__buf
def done(self):
if self.__pos < len(self.__buf):
raise Error('unextracted data remains')
def unpack_uint(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
x = struct.unpack('>L', data)[0]
try:
return int(x)
except OverflowError:
return x
def unpack_int(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>l', data)[0]
unpack_enum = unpack_int
def unpack_bool(self):
return bool(self.unpack_int())
def unpack_uhyper(self):
hi = self.unpack_uint()
lo = self.unpack_uint()
return long(hi)<<32 | lo
def unpack_hyper(self):
x = self.unpack_uhyper()
if x >= 0x8000000000000000L:
x = x - 0x10000000000000000L
return x
def unpack_float(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>f', data)[0]
def unpack_double(self):
i = self.__pos
self.__pos = j = i+8
data = self.__buf[i:j]
if len(data) < 8:
raise EOFError
return struct.unpack('>d', data)[0]
def unpack_fstring(self, n):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
i = self.__pos
j = i + (n+3)//4*4
if j > len(self.__buf):
raise EOFError
self.__pos = j
return self.__buf[i:i+n]
unpack_fopaque = unpack_fstring
def unpack_string(self):
n = self.unpack_uint()
return self.unpack_fstring(n)
unpack_opaque = unpack_string
unpack_bytes = unpack_string
def METHOD_NAME(self, unpack_item):
list = []
while 1:
x = self.unpack_uint()
if x == 0: break
if x != 1:
raise ConversionError, '0 or 1 expected, got %r' % (x,)
item = unpack_item()
list.append(item)
return list
def unpack_farray(self, n, unpack_item):
list = []
for i in range(n):
list.append(unpack_item())
return list
def unpack_array(self, unpack_item):
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item) |
1,974 | load model | # The GPEN implementation is also open-sourced by the authors,
# and available at https://github.com/yangxy/GPEN/blob/main/face_detect/retinaface_detection.py
import os
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from .models.retinaface import RetinaFace
from .utils import PriorBox, decode, decode_landm, py_cpu_nms
cfg_re50 = {
'name': 'Resnet50',
'min_sizes': [[16, 32], [64, 128], [256, 512]],
'steps': [8, 16, 32],
'variance': [0.1, 0.2],
'clip': False,
'pretrain': False,
'return_layers': {
'layer2': 1,
'layer3': 2,
'layer4': 3
},
'in_channel': 256,
'out_channel': 256
}
class RetinaFaceDetection(object):
def __init__(self, model_path, device='cuda'):
cudnn.benchmark = True
self.model_path = model_path
self.device = device
self.cfg = cfg_re50
self.net = RetinaFace(cfg=self.cfg)
self.METHOD_NAME()
self.net = self.net.to(device)
self.mean = torch.tensor([[[[104]], [[117]], [[123]]]]).to(device)
def check_keys(self, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(self.net.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
assert len(
used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(self, state_dict, prefix):
new_state_dict = dict()
# remove unnecessary 'module.'
for k, v in state_dict.items():
if k.startswith(prefix):
new_state_dict[k[len(prefix):]] = v
else:
new_state_dict[k] = v
return new_state_dict
def METHOD_NAME(self, load_to_cpu=False):
pretrained_dict = torch.load(
self.model_path, map_location=torch.device('cpu'))
if 'state_dict' in pretrained_dict.keys():
pretrained_dict = self.remove_prefix(pretrained_dict['state_dict'],
'module.')
else:
pretrained_dict = self.remove_prefix(pretrained_dict, 'module.')
self.check_keys(pretrained_dict)
self.net.load_state_dict(pretrained_dict, strict=False)
self.net.eval()
def detect(self,
img_raw,
resize=1,
confidence_threshold=0.9,
nms_threshold=0.4,
top_k=5000,
keep_top_k=750,
save_image=False):
img = np.float32(img_raw)
im_height, im_width = img.shape[:2]
ss = 1.0
# tricky
if max(im_height, im_width) > 1500:
ss = 1000.0 / max(im_height, im_width)
img = cv2.resize(img, (0, 0), fx=ss, fy=ss)
im_height, im_width = img.shape[:2]
scale = torch.Tensor(
[img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(self.device)
scale = scale.to(self.device)
loc, conf, landms = self.net(img) # forward pass
del img
priorbox = PriorBox(self.cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(self.device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, self.cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(
landms.data.squeeze(0), prior_data, self.cfg['variance'])
scale1 = torch.Tensor([
im_width, im_height, im_width, im_height, im_width, im_height,
im_width, im_height, im_width, im_height
])
scale1 = scale1.to(self.device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
# ignore low scores
inds = np.where(scores > confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(
np.float32, copy=False)
keep = py_cpu_nms(dets, nms_threshold)
dets = dets[keep, :]
landms = landms[keep]
# keep top-K faster NMS
dets = dets[:keep_top_k, :]
landms = landms[:keep_top_k, :]
landms = landms.reshape((-1, 5, 2))
landms = landms.transpose((0, 2, 1))
landms = landms.reshape(
-1,
10,
)
return dets / ss, landms / ss
def detect_tensor(self,
img,
resize=1,
confidence_threshold=0.9,
nms_threshold=0.4,
top_k=5000,
keep_top_k=750,
save_image=False):
im_height, im_width = img.shape[-2:]
ss = 1000 / max(im_height, im_width)
img = F.interpolate(img, scale_factor=ss)
im_height, im_width = img.shape[-2:]
scale = torch.Tensor([im_width, im_height, im_width,
im_height]).to(self.device)
img -= self.mean
loc, conf, landms = self.net(img) # forward pass
priorbox = PriorBox(self.cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(self.device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, self.cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(
landms.data.squeeze(0), prior_data, self.cfg['variance'])
scale1 = torch.Tensor([
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]
])
scale1 = scale1.to(self.device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
# ignore low scores
inds = np.where(scores > confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(
np.float32, copy=False)
keep = py_cpu_nms(dets, nms_threshold)
dets = dets[keep, :]
landms = landms[keep]
# keep top-K faster NMS
dets = dets[:keep_top_k, :]
landms = landms[:keep_top_k, :]
landms = landms.reshape((-1, 5, 2))
landms = landms.transpose((0, 2, 1))
landms = landms.reshape(
-1,
10,
)
return dets / ss, landms / ss |
1,975 | test grid shape | """
Unit tests for landlab.components.overland_flow.OverlandFlow
last updated: 3/14/16
"""
import numpy as np
from landlab import RasterModelGrid
from landlab.components.overland_flow import OverlandFlow
from landlab.components.overland_flow._links import horizontal_link_ids
(_SHAPE, _SPACING, _ORIGIN) = ((32, 240), (25, 25), (0.0, 0.0))
_ARGS = (_SHAPE, _SPACING, _ORIGIN)
def _left_edge_horizontal_ids(shape):
return horizontal_link_ids(shape)[:, 0]
def test_deAlm_name(deAlm):
assert deAlm.name == "OverlandFlow"
def test_deAlm_input_var_names(deAlm):
assert deAlm.input_var_names == ("surface_water__depth", "topographic__elevation")
def test_deAlm_output_var_names(deAlm):
assert deAlm.output_var_names == (
"surface_water__depth",
"surface_water__discharge",
"water_surface__gradient",
)
def test_deAlm_var_units(deAlm):
assert set(deAlm.input_var_names) | set(deAlm.output_var_names) == set(
dict(deAlm.units).keys()
)
assert deAlm.var_units("surface_water__depth") == "m"
assert deAlm.var_units("surface_water__discharge") == "m3/s"
assert deAlm.var_units("water_surface__gradient") == "-"
assert deAlm.var_units("topographic__elevation") == "m"
def METHOD_NAME(deAlm):
assert deAlm.grid.number_of_node_rows == _SHAPE[0]
assert deAlm.grid.number_of_node_columns == _SHAPE[1]
def test_deAlm_analytical():
grid = RasterModelGrid((32, 240), xy_spacing=25)
grid.add_zeros("surface_water__depth", at="node")
grid.add_zeros("topographic__elevation", at="node")
grid.set_closed_boundaries_at_grid_edges(True, True, True, True)
left_inactive_ids = _left_edge_horizontal_ids(grid.shape)
deAlm = OverlandFlow(grid, mannings_n=0.01, h_init=0.001)
time = 0.0
while time < 500.0:
grid.at_link["surface_water__discharge"][left_inactive_ids] = grid.at_link[
"surface_water__discharge"
][left_inactive_ids + 1]
dt = deAlm.calc_time_step()
deAlm.overland_flow(dt)
h_boundary = ((7.0 / 3.0) * (0.01**2) * (0.4**3) * time) ** (3.0 / 7.0)
grid.at_node["surface_water__depth"][grid.nodes[1:-1, 1]] = h_boundary
time += dt
x = np.arange(0, ((grid.shape[1]) * grid.dx), grid.dx)
h_analytical = -(7.0 / 3.0) * (0.01**2) * (0.4**2) * (x - (0.4 * 500))
h_analytical[np.where(h_analytical > 0)] = h_analytical[
np.where(h_analytical > 0)
] ** (3.0 / 7.0)
h_analytical[np.where(h_analytical < 0)] = 0.0
hdeAlm = deAlm.h.reshape(grid.shape)
hdeAlm = hdeAlm[1][1:]
hdeAlm = np.append(hdeAlm, [0])
np.testing.assert_almost_equal(h_analytical, hdeAlm, decimal=1)
def test_deAlm_analytical_imposed_dt_short():
grid = RasterModelGrid((32, 240), xy_spacing=25)
grid.add_zeros("surface_water__depth", at="node")
grid.add_zeros("topographic__elevation", at="node")
grid.set_closed_boundaries_at_grid_edges(True, True, True, True)
left_inactive_ids = _left_edge_horizontal_ids(grid.shape)
deAlm = OverlandFlow(grid, mannings_n=0.01, h_init=0.001)
time = 0.0
while time < 500.0:
grid.at_link["surface_water__discharge"][left_inactive_ids] = grid.at_link[
"surface_water__discharge"
][left_inactive_ids + 1]
dt = 10.0
deAlm.overland_flow(dt)
h_boundary = ((7.0 / 3.0) * (0.01**2) * (0.4**3) * time) ** (3.0 / 7.0)
grid.at_node["surface_water__depth"][grid.nodes[1:-1, 1]] = h_boundary
time += dt
x = np.arange(0, ((grid.shape[1]) * grid.dx), grid.dx)
h_analytical = -(7.0 / 3.0) * (0.01**2) * (0.4**2) * (x - (0.4 * 500))
h_analytical[np.where(h_analytical > 0)] = h_analytical[
np.where(h_analytical > 0)
] ** (3.0 / 7.0)
h_analytical[np.where(h_analytical < 0)] = 0.0
hdeAlm = deAlm.h.reshape(grid.shape)
hdeAlm = hdeAlm[1][1:]
hdeAlm = np.append(hdeAlm, [0])
np.testing.assert_almost_equal(h_analytical, hdeAlm, decimal=1)
def test_deAlm_analytical_imposed_dt_long():
grid = RasterModelGrid((32, 240), xy_spacing=25)
grid.add_zeros("surface_water__depth", at="node")
grid.add_zeros("topographic__elevation", at="node")
grid.set_closed_boundaries_at_grid_edges(True, True, True, True)
left_inactive_ids = _left_edge_horizontal_ids(grid.shape)
deAlm = OverlandFlow(grid, mannings_n=0.01, h_init=0.001)
time = 0.0
while time < 500.0:
grid.at_link["surface_water__discharge"][left_inactive_ids] = grid.at_link[
"surface_water__discharge"
][left_inactive_ids + 1]
dt = 100.0
deAlm.run_one_step(dt)
h_boundary = ((7.0 / 3.0) * (0.01**2) * (0.4**3) * time) ** (3.0 / 7.0)
grid.at_node["surface_water__depth"][grid.nodes[1:-1, 1]] = h_boundary
time += dt
x = np.arange(0, ((grid.shape[1]) * grid.dx), grid.dx)
h_analytical = -(7.0 / 3.0) * (0.01**2) * (0.4**2) * (x - (0.4 * 500))
h_analytical[np.where(h_analytical > 0)] = h_analytical[
np.where(h_analytical > 0)
] ** (3.0 / 7.0)
h_analytical[np.where(h_analytical < 0)] = 0.0
hdeAlm = deAlm.h.reshape(grid.shape)
hdeAlm = hdeAlm[1][1:]
hdeAlm = np.append(hdeAlm, [0])
np.testing.assert_almost_equal(h_analytical, hdeAlm, decimal=1) |
1,976 | get metadata | """Authors: Heberto Mayorquin, Cody Baker and Ben Dichter."""
from typing import Optional
import numpy as np
from pynwb import NWBFile
from pynwb.device import Device
from pynwb.ophys import Fluorescence, ImageSegmentation, ImagingPlane, TwoPhotonSeries
from ...baseextractorinterface import BaseExtractorInterface
from ...utils import fill_defaults, get_base_schema, get_schema_from_hdmf_class
class BaseSegmentationExtractorInterface(BaseExtractorInterface):
"""Parent class for all SegmentationExtractorInterfaces."""
ExtractorModuleName = "roiextractors"
def __init__(self, **source_data):
super().__init__(**source_data)
self.segmentation_extractor = self.get_extractor()(**source_data)
def get_metadata_schema(self) -> dict:
metadata_schema = super().get_metadata_schema()
metadata_schema["required"] = ["Ophys"]
metadata_schema["properties"]["Ophys"] = get_base_schema()
metadata_schema["properties"]["Ophys"]["properties"] = dict(
Device=dict(type="array", minItems=1, items=get_schema_from_hdmf_class(Device)),
)
metadata_schema["properties"]["Ophys"]["properties"].update(
Fluorescence=get_schema_from_hdmf_class(Fluorescence),
ImageSegmentation=get_schema_from_hdmf_class(ImageSegmentation),
ImagingPlane=get_schema_from_hdmf_class(ImagingPlane),
TwoPhotonSeries=get_schema_from_hdmf_class(TwoPhotonSeries),
)
metadata_schema["properties"]["Ophys"]["required"] = ["Device", "ImageSegmentation"]
# Temporary fixes until centralized definition of metadata schemas
metadata_schema["properties"]["Ophys"]["properties"]["ImagingPlane"].update(type="array")
metadata_schema["properties"]["Ophys"]["properties"]["TwoPhotonSeries"].update(type="array")
metadata_schema["properties"]["Ophys"]["properties"]["Fluorescence"]["properties"]["roi_response_series"][
"items"
]["required"] = list()
metadata_schema["properties"]["Ophys"]["properties"]["ImageSegmentation"]["additionalProperties"] = True
metadata_schema["properties"]["Ophys"]["properties"]["Fluorescence"]["properties"]["roi_response_series"].pop(
"maxItems"
)
metadata_schema["properties"]["Ophys"]["properties"]["DfOverF"] = metadata_schema["properties"]["Ophys"][
"properties"
]["Fluorescence"]
fill_defaults(metadata_schema, self.METHOD_NAME())
return metadata_schema
def METHOD_NAME(self) -> dict:
from ...tools.roiextractors import get_nwb_segmentation_metadata
metadata = super().METHOD_NAME()
metadata.update(get_nwb_segmentation_metadata(self.segmentation_extractor))
return metadata
def get_original_timestamps(self) -> np.ndarray:
reinitialized_extractor = self.get_extractor()(**self.source_data)
return reinitialized_extractor.frame_to_time(frames=np.arange(stop=reinitialized_extractor.get_num_frames()))
def get_timestamps(self) -> np.ndarray:
return self.segmentation_extractor.frame_to_time(
frames=np.arange(stop=self.segmentation_extractor.get_num_frames())
)
def set_aligned_timestamps(self, aligned_timestamps: np.ndarray):
self.segmentation_extractor.set_times(times=aligned_timestamps)
def add_to_nwbfile(
self,
nwbfile: NWBFile,
metadata: Optional[dict] = None,
stub_test: bool = False,
stub_frames: int = 100,
include_roi_centroids: bool = True,
include_roi_acceptance: bool = True,
mask_type: Optional[str] = "image", # Literal["image", "pixel", "voxel"]
iterator_options: Optional[dict] = None,
compression_options: Optional[dict] = None,
):
"""
Parameters
----------
nwbfile : NWBFile
The NWBFile to add the plane segmentation to.
metadata : dict, optional
The metadata for the interface
stub_test : bool, default: False
stub_frames : int, default: 100
include_roi_centroids : bool, default: True
Whether to include the ROI centroids on the PlaneSegmentation table.
If there are a very large number of ROIs (such as in whole-brain recordings),
you may wish to disable this for faster write speeds.
include_roi_acceptance : bool, default: True
Whether to include if the detected ROI was 'accepted' or 'rejected'.
If there are a very large number of ROIs (such as in whole-brain recordings), you may wish to ddisable this for
faster write speeds.
mask_type : {'image', 'pixel', 'voxel'}, optional
There are two types of ROI masks in NWB: ImageMasks and PixelMasks.
Image masks have the same shape as the reference images the segmentation was applied to, and weight each pixel
by its contribution to the ROI (typically boolean, with 0 meaning 'not in the ROI').
Pixel masks are instead indexed by ROI, with the data at each index being the shape of the image by the number
of pixels in each ROI.
Voxel masks are instead indexed by ROI, with the data at each index being the shape of the volume by the number
of voxels in each ROI.
Specify your choice between these three as mask_type='image', 'pixel', 'voxel', or None.
If None, the mask information is not written to the NWB file.
Defaults to 'image'.
iterator_options : dict, optional
The options to use when iterating over the image masks of the segmentation extractor.
compression_options : dict, optional
The options to use when compressing the image masks of the segmentation extractor.
Returns
-------
"""
from ...tools.roiextractors import add_segmentation
if stub_test:
stub_frames = min([stub_frames, self.segmentation_extractor.get_num_frames()])
segmentation_extractor = self.segmentation_extractor.frame_slice(start_frame=0, end_frame=stub_frames)
else:
segmentation_extractor = self.segmentation_extractor
add_segmentation(
segmentation_extractor=segmentation_extractor,
nwbfile=nwbfile,
metadata=metadata,
include_roi_centroids=include_roi_centroids,
include_roi_acceptance=include_roi_acceptance,
mask_type=mask_type,
iterator_options=iterator_options,
compression_options=compression_options,
) |
1,977 | delete file | """ Utility functions for the tests.
Access: from test import test_utils.
"""
import os
import numpy as np
import scipy.sparse as sps
import porepy as pp
def permute_matrix_vector(A, rhs, block_dof, full_dof, grids, variables):
"""Permute the matrix and rhs from assembler order to a specified order.
Args:
A: global solution matrix as returned by Assembler.assemble_matrix_rhs.
rhs: global rhs vector as returned by Assembler.assemble_matrix_rhs.
block_dof: Map coupling a (grid, variable) pair to an block index of A, as
returned by Assembler.assemble_matrix_rhs.
full_dof: Number of DOFs for each pair in block_dof, as returned by
Assembler.assemble_matrix_rhs.
Returns:
sps.bmat(A.size): Permuted matrix.
np.ndarray(b.size): Permuted rhs vector.
"""
sz = len(block_dof)
mat = np.empty((sz, sz), dtype=object)
b = np.empty(sz, dtype=object)
dof = np.empty(sz, dtype=object)
# Initialize dof vector
dof[0] = np.arange(full_dof[0])
for i in range(1, sz):
dof[i] = dof[i - 1][-1] + 1 + np.arange(full_dof[i])
for row in range(sz):
# Assembler index 0
i = block_dof[(grids[row], variables[row])]
b[row] = rhs[dof[i]]
for col in range(sz):
# Assembler index 1
j = block_dof[(grids[col], variables[col])]
# Put the A block indexed by i and j in mat of running indexes row and col
mat[row, col] = A[dof[i]][:, dof[j]]
return sps.bmat(mat, format="csr"), np.concatenate(tuple(b))
def setup_flow_assembler(mdg, method, data_key=None, coupler=None):
"""Setup a standard assembler for the flow problem for a given grid bucket.
The assembler will be set up with primary variable name 'pressure' on the
GridBucket nodes, and mortar_flux for the mortar variables.
Parameters:
mdg: GridBucket.
method (EllipticDiscretization).
data_key (str, optional): Keyword used to identify data dictionary for
node and edge discretization.
Coupler (EllipticInterfaceLaw): Defaults to RobinCoulping.
Returns:
Assembler, ready to discretize and assemble problem.
"""
if data_key is None:
data_key = "flow"
if coupler is None:
coupler = pp.RobinCoupling(data_key, method)
if isinstance(method, pp.MVEM) or isinstance(method, pp.RT0):
mixed_form = True
else:
mixed_form = False
for _, data in mdg.subdomains(return_data=True):
if mixed_form:
data[pp.PRIMARY_VARIABLES] = {"pressure": {"cells": 1, "faces": 1}}
else:
data[pp.PRIMARY_VARIABLES] = {"pressure": {"cells": 1}}
data[pp.DISCRETIZATION] = {"pressure": {"diffusive": method}}
for intf, data in mdg.interfaces(return_data=True):
g2, g1 = mdg.interface_to_subdomain_pair(intf)
data[pp.PRIMARY_VARIABLES] = {"mortar_flux": {"cells": 1}}
data[pp.COUPLING_DISCRETIZATION] = {
"lambda": {
g1: ("pressure", "diffusive"),
g2: ("pressure", "diffusive"),
intf: ("mortar_flux", coupler),
}
}
data[pp.DISCRETIZATION_MATRICES] = {"flow": {}}
assembler = pp.Assembler(mdg)
return assembler
def solve_and_distribute_pressure(mdg, assembler):
"""Given an assembler, assemble and solve the pressure equation, and distribute
the result.
Parameters:
GridBucket: Of problem to be solved
assembler (Assembler):
"""
assembler.discretize()
A, b = assembler.assemble_matrix_rhs()
p = np.linalg.solve(A.A, b)
assembler.distribute_variable(p)
def compare_arrays(a, b, tol=1e-4, sort=True):
"""Compare two arrays and check that they are equal up to a column permutation.
Typical usage is to compare coordinate arrays.
Parameters:
a, b (np.array): Arrays to be compared. W
tol (double, optional): Tolerance used in comparison.
sort (boolean, defaults to True): Sort arrays columnwise before comparing
Returns:
True if there is a permutation ind so that all(a[:, ind] == b).
"""
a = np.atleast_2d(a)
b = np.atleast_2d(b)
if not np.all(a.shape == b.shape):
return False
if sort:
a = np.sort(a, axis=0)
b = np.sort(b, axis=0)
for i in range(a.shape[1]):
dist = np.sum((b - a[:, i].reshape((-1, 1))) ** 2, axis=0)
if dist.min() > tol:
return False
for i in range(b.shape[1]):
dist = np.sum((a - b[:, i].reshape((-1, 1))) ** 2, axis=0)
if dist.min() > tol:
return False
return True
def METHOD_NAME(file_name):
"""Delete a file if it exist. Cleanup after tests."""
if os.path.exists(file_name):
os.remove(file_name)
def compare_grids(g1, g2):
"""Compare two grids. They are considered equal if the topology and geometry is the
same.
"""
if g1.dim != g2.dim:
return False
if (g1.num_cells, g1.num_faces, g1.num_nodes) != (
g2.num_cells,
g2.num_faces,
g2.num_nodes,
):
return False
dfn = g1.face_nodes - g2.face_nodes
if dfn.data.size > 0 and np.max(np.abs(dfn.data)) > 0.1:
return False
dcf = g1.cell_faces - g2.cell_faces
if dcf.data.size > 0 and np.max(np.abs(dcf.data)) > 0.1:
return False
if g1.dim > 0:
coord = g1.nodes - g2.nodes
else:
coord = g1.cell_centers - g2.cell_centers
dist = np.sum(coord**2, axis=0)
if dist.max() > 1e-16:
return False
# No need to test other geometric quastities; these are processed from those already
# checked, thus the grids are identical.
return True
def compare_mortar_grids(mg1, mg2):
if mg1.dim != mg2.dim:
return False
if mg1.num_cells != mg2.num_cells:
return False
for key, g1 in mg1.side_grids.items():
if key not in mg2.side_grids:
return False
g2 = mg2.side_grids[key]
if not compare_grids(g1, g2):
return False
return True
def compare_md_grids(mdg1, mdg2):
for dim in range(4):
subdomains_1 = mdg1.subdomains(dim=dim)
subdomains_2 = mdg2.subdomains(dim=dim)
# Two mdgs are considered equal only if the grids are returned in the same
# order. This may be overly restrictive, but it will have to do.
if len(subdomains_1) != len(subdomains_2):
return False
for sd1, sd2 in zip(subdomains_1, subdomains_2):
if not compare_grids(sd1, sd2):
return False
# Not sure how to do testing on Mortar grids. |
1,978 | status is | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyowm.commons import exceptions
def METHOD_NAME(weather, status, weather_code_registry):
"""
Checks if the weather status code of a *Weather* object corresponds to the
detailed status indicated. The lookup is performed against the provided
*WeatherCodeRegistry* object.
:param weather: the *Weather* object whose status code is to be checked
:type weather: *Weather*
:param status: a string indicating a detailed weather status
:type status: str
:param weather_code_registry: a *WeatherCodeRegistry* object
:type weather_code_registry: *WeatherCodeRegistry*
:returns: ``True`` if the check is positive, ``False`` otherwise
"""
weather_status = weather_code_registry. \
status_for(weather.weather_code).lower()
return weather_status == status
def any_status_is(weather_list, status, weather_code_registry):
"""
Checks if the weather status code of any of the *Weather* objects in the
provided list corresponds to the detailed status indicated. The lookup is
performed against the provided *WeatherCodeRegistry* object.
:param weathers: a list of *Weather* objects
:type weathers: list
:param status: a string indicating a detailed weather status
:type status: str
:param weather_code_registry: a *WeatherCodeRegistry* object
:type weather_code_registry: *WeatherCodeRegistry*
:returns: ``True`` if the check is positive, ``False`` otherwise
"""
return any(
METHOD_NAME(weather, status, weather_code_registry)
for weather in weather_list
)
def filter_by_status(weather_list, status, weather_code_registry):
"""
Filters out from the provided list of *Weather* objects a sublist of items
having a status corresponding to the provided one. The lookup is performed
against the provided *WeatherCodeRegistry* object.
:param weathers: a list of *Weather* objects
:type weathers: list
:param status: a string indicating a detailed weather status
:type status: str
:param weather_code_registry: a *WeatherCodeRegistry* object
:type weather_code_registry: *WeatherCodeRegistry*
:returns: ``True`` if the check is positive, ``False`` otherwise
"""
return [
weather
for weather in weather_list
if METHOD_NAME(weather, status, weather_code_registry)
]
def is_in_coverage(unixtime, weathers_list):
"""
Checks if the supplied UNIX time is contained into the time range
(coverage) defined by the most ancient and most recent *Weather* objects
in the supplied list
:param unixtime: the UNIX time to be searched in the time range
:type unixtime: int
:param weathers_list: the list of *Weather* objects to be scanned for
global time coverage
:type weathers_list: list
:returns: ``True`` if the UNIX time is contained into the time range,
``False`` otherwise
"""
if not weathers_list:
return False
min_of_coverage = min(weather.reference_time() for weather in weathers_list)
max_of_coverage = max([weather.reference_time() \
for weather in weathers_list])
return unixtime >= min_of_coverage and unixtime <= max_of_coverage
def find_closest_weather(weathers_list, unixtime):
"""
Extracts from the provided list of Weather objects the item which is
closest in time to the provided UNIXtime.
:param weathers_list: a list of *Weather* objects
:type weathers_list: list
:param unixtime: a UNIX time
:type unixtime: int
:returns: the *Weather* object which is closest in time or ``None`` if the
list is empty
"""
if not weathers_list:
return None
if not is_in_coverage(unixtime, weathers_list):
raise exceptions.NotFoundError('Error: the specified time is ' + \
'not included in the weather coverage range')
closest_weather = weathers_list[0]
time_distance = abs(closest_weather.reference_time() - unixtime)
for weather in weathers_list:
if abs(weather.reference_time() - unixtime) < time_distance:
time_distance = abs(weather.reference_time() - unixtime)
closest_weather = weather
return closest_weather |
1,979 | main |
import sys, re, csv, cgi
from optparse import OptionParser
class CsvOutput:
def __init__(self):
self.writer = csv.writer(sys.stdout)
def header(self, headerRow):
self.writer.writerow(headerRow)
def footer(self):
pass
def row(self, rowdata):
self.writer.writerow(rowdata)
class HtmlOutput:
def header(self, headerRow):
print '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
print '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">'
print '<body><table><tr>'
for header in headerRow[:-1]:
print "<th>" + cgi.escape(header) + "</th>"
print "</tr>"
def footer(self):
print "</table></body></html>";
def row(self, rowdata):
print "<tr>"
for data in rowdata[:-1]:
print "<td>" + cgi.escape(data) + "</td>"
print "</tr>"
stacktrace = rowdata[-1]
if stacktrace:
lines = stacktrace.splitlines()
print '<tr><td colspan="' + (len(rowdata) - 1) + '">'
print cgi.escape(lines[0])
for line in lines[1:]:
print '<br/>    '
print cgi.escape(line)
print '</td></tr>'
def xor(a, b):
return (a and not b) or (b and not a)
def checkTimeFormat(optionsParser, time):
if time and not re.match(r"^[0-2]\d(?::[0-5]\d(?::[0-5]\d)(?:\.\d\d\d)?)?$", time):
print "\nERROR: Times must be in the format HH[:MM[:SS[.MMM]]]\n"
optionsParser.print_help()
sys.exit(2)
def parseArguments():
parser = OptionParser(usage="usage: %prog [options] FILE", version="%prog 3.0")
parser.add_option("-m", "--maxrows", type="int", default=sys.maxint,
help="maximum number of logging rows to return")
parser.add_option("-a", "--after", type="string", metavar="TIME",
help="only show logs after the given time in the format HH[:MM[:SS[.MMM]]]")
parser.add_option("-b", "--before", type="string", metavar="TIME",
help="only show logs before the given time in the format HH[:MM[:SS[.MMM]]]")
parser.add_option("-e", "--equals", action="append", nargs=2, metavar="COLUMN VALUE",
help="only show logs where the given column (by name or zero-based number) matches the given value")
parser.add_option("-n", "--notequals", action="append", nargs=2, metavar="COLUMN VALUE",
help="only show logs where the given column (by name or zero-based number) does not match the given value")
parser.add_option("--empty", action="append", metavar="COLUMN",
help="only show logs where the given column (by name or zero-based number) is empty")
parser.add_option("--notempty", action="append", metavar="COLUMN",
help="only show logs where the given column (by name or zero-based number) is not empty")
parser.add_option("-v", "--invert", action="store_true", default=False,
help="print out results that do not match any specified filters")
parser.add_option("--xhtml", action="store_true", default=False,
help="print out results in HTML format")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
sys.exit(2)
checkTimeFormat(parser, options.after)
checkTimeFormat(parser, options.before)
return (options, args[0]);
def createOutputter(options):
if options.xhtml:
return HtmlOutput()
else:
return CsvOutput()
def getColumnIndex(headerRow, column):
columnNum = -1
try:
columnNum = int(column)
except ValueError:
try:
columnNum = headerRow.index(column)
except:
pass
if columnNum < 0 or columnNum >= len(headerRow):
print "Column '" + column + "' does not exist in the data"
sys.exit(2)
else:
return columnNum
def createFilters(options, headerRow):
filters = []
timeColumn = getColumnIndex(headerRow, "Time")
if options.after:
filters.append(lambda rowdata: rowdata[timeColumn] > options.after)
if options.before:
filters.append(lambda rowdata: rowdata[timeColumn] < options.before)
if options.equals:
for (column, value) in options.equals:
columnNum = getColumnIndex(headerRow, column)
filters.append(lambda rowdata: rowdata[columnNum] == value)
if options.notequals:
for (column, value) in options.notequals:
columnNum = getColumnIndex(headerRow, column)
filters.append(lambda rowdata: rowdata[columnNum] != value)
if options.empty:
for column in options.empty:
columnNum = getColumnIndex(headerRow, column)
filters.append(lambda rowdata: not rowdata[columnNum])
if options.notempty:
for column in options.notempty:
columnNum = getColumnIndex(headerRow, column)
filters.append(lambda rowdata: rowdata[columnNum])
return filters
def METHOD_NAME():
(options, logfile) = parseArguments()
reader = csv.reader(open(logfile))
headerRow = reader.next()
filters = createFilters(options, headerRow)
outputter = createOutputter(options)
outputter.header(headerRow)
rowsOutput = 0
for rowdata in reader:
if rowsOutput >= options.maxrows:
break
filtersSayYes = True
for filter in filters:
if not filter(rowdata):
filtersSayYes = False
break
# Exclusive OR
if xor(filtersSayYes, options.invert):
rowsOutput = rowsOutput + 1
outputter.row(rowdata)
outputter.footer()
if __name__ == "__main__":
METHOD_NAME() |
1,980 | generate sharedkey | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hashlib # for SHA-256
import hmac # HMAC–SHA-256
from struct import pack, unpack
from typing import Optional
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from magma.subscriberdb.crypto.EC import ECDH_SECP256R1, KDF, X25519
from magma.subscriberdb.crypto.utils import CMException
########################################################
# CryptoMobile python toolkit
#
# ECIES: Elliptic Curve Integrated Encryption Scheme
# as defined by 3GPP to protect subscriber's fixed identity SUPI into SUCI
# TS 33.501, section 6.12 and annex C
#
# Based on an elliptic curve algorithm:
# - profile A: Curve25519 / X25519
# - profile B: NIST secp256r1
# and ANSI-X9.63-KDF, SHA-256 hash function, HMAC–SHA-256 MAC function and
# AES–128 in CTR mode encryption function
#######################################################
_backend = default_backend()
class AES_CTR_cryptography(object): # noqa: N801
"""AES in CTR mode"""
block_size = 16
def __init__(self, key, nonce, cnt=0):
"""
Init - Initialize AES in ECB mode with the given key and nonce buffer
Args:
key : 16 bytes buffer
nonce: 8 most significant bytes buffer of the counter initial value
counter will be incremented starting at 0
cnt : uint64, 8 least significant bytes value of the counter
default is 0
Returns: None
"""
self.aes = Cipher(
algorithms.AES(key),
modes.CTR(nonce + pack('>Q', cnt)),
backend=_backend,
).encryptor()
def encrypt(self, data):
"""Encrypt / decrypt data with the key and IV set at initialization"""
return self.aes.update(data)
decrypt = encrypt
class ECIES_UE(object): # noqa: N801
"""ECIES_UE handles the ECIES computation required on the UE side to
protect its fixed identity SUPI into a SUCI
"""
def __init__(self, profile):
if profile == 'A':
self.EC = X25519()
elif profile == 'B':
self.EC = ECDH_SECP256R1()
else:
raise CMException('unknown ECIES profile %s' % profile)
self.ephemeral_key = None
self.shared_key = None
def METHOD_NAME(self, hn_pub_key, fresh=True):
"""
generate_sharedkey - Generates a shared keystream based on a UE ephemeral keypair (regenerated
if fresh is True) and the HN public key
Args:
hn_pub_key: hn_pub_key
fresh: True
Returns: None
"""
if fresh:
# regenerate a new UE ephemeral keypair
self.EC.generate_keypair()
# get the UE ephemeral pubkey
self.ephemeral_key = self.EC.get_pubkey()
# generate the shared keystream by mixing the UE ephemeral key with HN pubkey
self.shared_key = KDF(self.ephemeral_key, self.EC.METHOD_NAME(hn_pub_key))
def protect(self, plaintext):
"""
Protect - Protects the given plaintext and returns a 3-tuple of bytes:
UE ephemeral pubkey, ciphertext, MAC
Args:
plaintext: plaintext
Returns:
ephemeral_key: public key
ciphertext: ciphertext to decrypt
mac: encrypted mac
"""
aes_key, aes_nonce, aes_cnt, mac_key = (
self.shared_key[:16],
self.shared_key[16:24],
unpack('>Q', self.shared_key[24:32])[0],
self.shared_key[32:64],
)
# encryption
aes = AES_CTR_cryptography(aes_key, aes_nonce, aes_cnt)
ciphertext = aes.encrypt(plaintext)
mac = hmac.new(mac_key, ciphertext, hashlib.sha256).digest()
return self.ephemeral_key, ciphertext, mac[0:8] # noqa: WPS349
class ECIES_HN(object): # noqa: N801
"""ECIES_HN handles the ECIES computation required on the Home Network side
to unprotect a subscriber's SUCI into a fixed identity SUPI
"""
def __init__(self, hn_priv_key, profile='A', raw_keypair=None):
if profile == 'A':
self.EC = X25519(loc_privkey=hn_priv_key)
elif profile == 'B':
if isinstance(raw_keypair, (tuple, list)) and len(raw_keypair) == 2:
self.EC = ECDH_SECP256R1(raw_keypair=raw_keypair)
else:
self.EC = ECDH_SECP256R1(loc_privkey=hn_priv_key)
else:
raise CMException('unknown ECIES profile %s' % profile)
def unprotect(self, ue_pubkey, ciphertext, mac) -> Optional[bytes]:
"""
Unprotect - Unprotects the given ciphertext using associated MAC and UE ephemeral
public key returns the decrypted cleartext bytes buffer or None if MAC verification
failed
Args:
ue_pubkey: emphereal public key
ciphertext: ciphertext to decode
mac: encrypted mac
Returns:
cleartext: decrypted mac
"""
shared_key = KDF(ue_pubkey, self.EC.METHOD_NAME(ue_pubkey))
aes_key, aes_nonce, aes_cnt, mac_key = (
shared_key[:16],
shared_key[16:24],
unpack('>Q', shared_key[24:32])[0],
shared_key[32:64],
)
# verify MAC
mac_hn = hmac.new(mac_key, ciphertext, hashlib.sha256).digest()
mac_verif = hmac.compare_digest(mac_hn[0:8], mac) # noqa: WPS349
# decrypt
aes = AES_CTR_cryptography(aes_key, aes_nonce, aes_cnt)
cleartext = aes.decrypt(ciphertext)
if mac_verif:
return cleartext
return None |
1,981 | test compare viscpu with pyuvsim | """Compare vis_cpu with pyuvsim visibilities."""
import pytest
import copy
import numpy as np
from astropy import units
from astropy.coordinates import Latitude, Longitude
from astropy.time import Time
from astropy.units import Quantity
from pyradiosky import SkyModel
from pyuvsim import AnalyticBeam, simsetup, uvsim
from pyuvsim.telescope import BeamList
from hera_sim import io
from hera_sim.beams import PolyBeam
from hera_sim.visibilities import ModelData, VisCPU, VisibilitySimulation
nfreq = 3
ntime = 20
nants = 4
def get_uvdata(pol_array=None):
hera_lat = -30.7215
hera_lon = 21.4283
hera_alt = 1073.0
obstime = Time("2018-08-31T04:02:30.11", format="isot", scale="utc")
if pol_array is None:
pol_array = np.array(["XX", "YY", "XY", "YX"])
np.random.seed(10)
# Random antenna locations
x = np.random.random(nants) * 400.0 # Up to 400 metres
y = np.random.random(nants) * 400.0
z = np.random.random(nants) * 0.0
ants = {i: (x[i], y[i], z[i]) for i in range(nants)}
# Observing parameters in a UVData object
return io.empty_uvdata(
Nfreqs=nfreq,
start_freq=100e6,
channel_width=97.3e3,
start_time=obstime.jd,
integration_time=20.0,
Ntimes=ntime,
array_layout=ants,
polarization_array=pol_array,
telescope_location=(hera_lat, hera_lon, hera_alt),
telescope_name="test_array",
x_orientation="east",
phase_type="drift",
vis_units="Jy",
write_files=False,
)
@pytest.fixture(scope="function")
def uvdata_allpols():
return get_uvdata()
def get_sky_model(uvdata, nsource):
# One fixed source plus random other sources
sources = [
[125.7, -30.72, 2, 0], # Fix a single source near zenith
]
if nsource > 1: # Add random other sources
ra = np.random.uniform(low=0.0, high=360.0, size=nsource - 1)
dec = -30.72 + np.random.random(nsource - 1) * 10.0
flux = np.random.random(nsource - 1) * 4
for i in range(nsource - 1):
sources.append([ra[i], dec[i], flux[i], 0])
sources = np.array(sources)
# Source locations and frequencies
ra_dec = np.deg2rad(sources[:, :2])
freqs = np.unique(uvdata.freq_array)
# Stokes for the first frequency only. Stokes for other frequencies
# are calculated later.
stokes = np.zeros((4, 1, ra_dec.shape[0]))
stokes[0, 0] = sources[:, 2]
reference_frequency = np.full(len(ra_dec), freqs[0])
# Set up sky model
sky_model = SkyModel(
name=[str(i) for i in range(len(ra_dec))],
ra=Longitude(ra_dec[:, 0], "rad"),
dec=Latitude(ra_dec[:, 1], "rad"),
spectral_type="spectral_index",
spectral_index=sources[:, 3],
stokes=stokes * units.Jy,
reference_frequency=Quantity(reference_frequency, "Hz"),
frame="icrs",
)
# Calculate stokes at all the frequencies.
sky_model.at_frequencies(Quantity(freqs, "Hz"), inplace=True)
return sky_model
def get_beams(beam_type, polarized):
"""Get a list of beam objects, one per antenna."""
if beam_type == "gaussian":
beams = [AnalyticBeam("gaussian", sigma=0.103)]
elif beam_type == "PolyBeam":
cfg_pol_beam = dict(
ref_freq=1e8,
spectral_index=-0.6975,
beam_coeffs=[
2.35088101e-01,
-4.20162599e-01,
2.99189140e-01,
-1.54189057e-01,
3.38651457e-02,
3.46936067e-02,
-4.98838130e-02,
3.23054464e-02,
-7.56006552e-03,
-7.24620596e-03,
7.99563166e-03,
-2.78125602e-03,
-8.19945835e-04,
1.13791191e-03,
-1.24301372e-04,
-3.74808752e-04,
1.93997376e-04,
-1.72012040e-05,
],
polarized=polarized,
)
beams = [PolyBeam(**cfg_pol_beam)]
else:
raise ValueError("beam_type '%s' not recognized" % beam_type)
return beams
@pytest.mark.parametrize(
"nsource,beam_type,polarized",
[
(1, "gaussian", False),
(1, "PolyBeam", False),
(1, "PolyBeam", True),
(100, "gaussian", False),
(100, "PolyBeam", False),
(100, "PolyBeam", True),
],
)
def METHOD_NAME(uvdata_allpols, nsource, beam_type, polarized):
"""Compare vis_cpu and pyuvsim simulated visibilities."""
sky_model = get_sky_model(uvdata_allpols, nsource)
# Beam models
beams = get_beams(beam_type=beam_type, polarized=polarized)
beam_dict = {str(i): 0 for i in range(nants)}
# ---------------------------------------------------------------------------
# (1) Run vis_cpu
# ---------------------------------------------------------------------------
# Trim unwanted polarizations
uvdata_viscpu = copy.deepcopy(uvdata_allpols)
if not polarized:
uvdata_viscpu.select(polarizations=["ee"], inplace=True)
# Construct simulator object and run
simulator = VisCPU(
ref_time=Time("2018-08-31T04:02:30.11", format="isot", scale="utc"),
use_gpu=False,
)
# TODO: if we update the PolyBeam API so that it doesn't *require* 2 feeds,
# we can get rid of this.
vis_cpu_beams = [copy.deepcopy(beam) for beam in beams]
if not polarized:
for beam in vis_cpu_beams:
beam.efield_to_power()
sim = VisibilitySimulation(
data_model=ModelData(
uvdata=uvdata_viscpu, sky_model=sky_model, beams=vis_cpu_beams
),
simulator=simulator,
)
sim.simulate()
uvd_viscpu = sim.uvdata
# ---------------------------------------------------------------------------
# (2) Run pyuvsim
# ---------------------------------------------------------------------------
uvd_uvsim = uvsim.run_uvdata_uvsim(
uvdata_allpols,
BeamList(beams),
beam_dict=beam_dict,
catalog=simsetup.SkyModelData(sky_model),
quiet=True,
)
# ---------------------------------------------------------------------------
# Compare results
# ---------------------------------------------------------------------------
# Set relative/absolute tolerances depending on no. of sources
# (N.B. vis_cpu source position correction approximation degrades with time)
if nsource < 10:
# Very stringent for a few sources
rtol = 1e-4
atol = 1e-7
else:
# Within 0.1% or so for many sources
rtol = 1e-3
atol = 1e-5
for i in range(nants):
for j in range(nants):
print("Baseline: ", i, j)
np.testing.assert_allclose(
uvd_uvsim.get_data((i, j, "xx")),
uvd_viscpu.get_data((i, j, "xx")),
atol=atol,
rtol=rtol,
) |
1,982 | get node thread stacks | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from typing import List, Dict, Set
from ....resource import Resource
from ....typing import BandType
from ...core import NodeRole
from ..core import NodeStatus
class AbstractClusterAPI:
@staticmethod
def _calc_statuses(
statuses: Set[NodeStatus] = None, exclude_statuses: Set[NodeStatus] = None
) -> Set[NodeStatus]:
if statuses:
return statuses
elif exclude_statuses is not None:
return set(NodeStatus.__members__.values()).difference(exclude_statuses)
else:
return {NodeStatus.READY}
@abstractmethod
async def get_supervisors(self, filter_ready: bool = True) -> List[str]:
"""
Get supervisor addresses
Returns
-------
out
list of supervisors
"""
@abstractmethod
async def watch_supervisors(self):
"""
Watch supervisor addresses
Returns
-------
out
generator of list of supervisors
"""
@abstractmethod
async def watch_nodes(
self,
role: NodeRole,
env: bool = False,
resource: bool = False,
detail: bool = False,
statuses: Set[NodeStatus] = None,
exclude_statuses: Set[NodeStatus] = None,
) -> List[Dict[str, Dict]]:
"""
Watch changes of workers
Returns
-------
out: List[Dict[str, Dict]]
dict of worker resources by addresses and bands
"""
@abstractmethod
async def get_nodes_info(
self,
nodes: List[str] = None,
role: NodeRole = None,
env: bool = False,
resource: bool = False,
detail: bool = False,
statuses: Set[NodeStatus] = None,
exclude_statuses: Set[NodeStatus] = None,
):
"""
Get worker info
Parameters
----------
nodes
address of nodes
role
roles of nodes
env
receive env info
resource
receive resource info
detail
receive detail info
Returns
-------
out: Dict
info of worker
"""
@abstractmethod
async def get_all_bands(
self,
role: NodeRole = None,
statuses: Set[NodeStatus] = None,
exclude_statuses: Set[NodeStatus] = None,
) -> Dict[BandType, Resource]:
"""
Get all bands that can be used for computation.
Returns
-------
band_to_resource : dict
Band to resource.
"""
@abstractmethod
async def watch_all_bands(
self,
role: NodeRole = None,
statuses: Set[NodeStatus] = None,
exclude_statuses: Set[NodeStatus] = None,
):
"""
Watch all bands that can be used for computation.
Returns
-------
band_to_resource : dict
Band to resource.
"""
@abstractmethod
async def get_mars_versions(self) -> List[str]:
"""
Get versions used in current Mars cluster
Returns
-------
version_list : list
List of versions
"""
@abstractmethod
async def get_node_pool_configs(self, address: str) -> List[Dict]:
"""
Get pool configs of a Mars node
Returns
-------
config_list : List[Dict]
List of configs for all pool processes
"""
async def METHOD_NAME(self, address: str) -> List[Dict[int, List[str]]]:
"""
Get current thread pool stacks of a Mars node
Parameters
----------
address
Returns
-------
""" |
1,983 | create tensor | import logging
import unittest
import torch
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel import p2p_communication
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
logging.getLogger("apex").setLevel(logging.DEBUG)
# [P2P Ops Involved in Pipeline Model Parallel forward/backward]
# **forward_backward_pipelining_without_interleaving**
# - send_forward / recv_forward
# - send_backward / recv_backward
# - send_forward_recv_backward
# - send_backward_recv_forward
# **forward_backward_pipelining_with_interleaving**
# - send_backward_recv_backward
# - recv_backward
# - recv_forward
# - send_forward_backward_recv_forward_backward
# - send_forward_recv_forward
class P2PCommTestBase:
numel = 4
shape = (2, 2)
dtype = torch.float32
@property
def world_size(self):
return min(2, torch.cuda.device_count())
def _init_model_parallel(self):
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=1,
pipeline_model_parallel_size_=self.world_size,
virtual_pipeline_model_parallel_size_=None,
)
def METHOD_NAME(self, value: int = None):
return torch.tensor(
[value] * self.numel).view(self.shape).to(device="cuda", dtype=self.dtype)
# Brief: Simulate warm-up.
# Brief: test `recv_forward` & `send_forward`.
def test_no_interleaving_warmup(self):
self.assertEqual(self.world_size, 2)
self._init_model_parallel()
input_tensor = None
if parallel_state.is_pipeline_first_stage():
tensor = self.METHOD_NAME(self.rank)
print(tensor)
p2p_communication.send_forward(output_tensor=tensor, tensor_shape=self.shape, dtype=self.dtype)
else:
input_tensor = p2p_communication.recv_forward(tensor_shape=self.shape, dtype=self.dtype)
if parallel_state.is_pipeline_first_stage():
self.assertIsNone(input_tensor)
else:
expected_input_tensor = self.METHOD_NAME(self.rank - 1)
self.assertEqual(input_tensor, expected_input_tensor)
# Brief: test `send_forward`, `send_forward_recv_forward`, and `recv_forward`.
def test_send_forward_recv_forward(self):
self._init_model_parallel()
prev_tensor = None
tensor = self.METHOD_NAME(self.rank)
if parallel_state.is_pipeline_first_stage():
p2p_communication.send_forward(output_tensor=tensor, tensor_shape=self.shape, dtype=self.dtype)
elif parallel_state.is_pipeline_last_stage():
prev_tensor = p2p_communication.recv_forward(tensor_shape=self.shape, dtype=self.dtype)
else:
prev_tensor = p2p_communication.send_forward_recv_forward(
output_tensor=tensor,
recv_prev=True,
tensor_shape=self.shape,
dtype=self.dtype,
)
if parallel_state.is_pipeline_first_stage():
self.assertIsNone(prev_tensor)
else:
expected_prev_tensor = self.METHOD_NAME(self.rank - 1)
self.assertEqual(prev_tensor, expected_prev_tensor)
# Brief: test `send_backward`, `send_backward_recv_backward`, and `recv_backward`.
def test_send_backward_recv_backward(self):
self._init_model_parallel()
tensor = self.METHOD_NAME(self.rank)
next_tensor = None
if parallel_state.is_pipeline_first_stage():
next_tensor = p2p_communication.recv_backward(tensor_shape=self.shape, dtype=self.dtype)
elif parallel_state.is_pipeline_last_stage():
p2p_communication.send_backward(input_tensor_grad=tensor, tensor_shape=self.shape, dtype=self.dtype)
else:
next_tensor = p2p_communication.send_backward_recv_backward(
input_tensor_grad=tensor,
recv_next=True,
tensor_shape=self.shape,
dtype=self.dtype,
)
if parallel_state.is_pipeline_last_stage():
self.assertIsNone(next_tensor)
else:
expected_next_tensor = self.METHOD_NAME(self.rank + 1)
self.assertEqual(next_tensor, expected_next_tensor)
# n.b.(mkozuki): Intentionally skip NCCL backend tests as I trust pytorch/pytorch repo.
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires >= 2 GPUs")
class UccP2PCommTest(P2PCommTestBase, UccDistributedTestBase): pass
if __name__ == "__main__":
common_utils.run_tests() |
1,984 | get sample | import functools
import operator
import os
import os.path
import sys
import numpy as np
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
np.random.seed(201910213)
_num_samples = 15
_sample_size = 11
_samples = np.random.normal(size=(_num_samples,_sample_size)).astype(np.float32)
# Sample access functions
def METHOD_NAME(index):
return _samples[index,:]
def num_samples():
return _num_samples
def sample_dims():
return (_sample_size,)
# ==============================================
# NumPy softmax
# ==============================================
def numpy_log_softmax(x):
"""Log-softmax, computed with NumPy
The computation is performed with 64-bit floats.
"""
if x.dtype is not np.float64:
x = x.astype(np.float64)
x = x - np.max(x)
return x - np.log(np.sum(np.exp(x)))
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann, weekly):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer, None # Don't request any specific number of nodes
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
# Note: Sum with a weights layer so that gradient checking will
# verify that error signals are correct.
x_weights = lbann.Weights(optimizer=lbann.SGD(),
initializer=lbann.ConstantInitializer(value=0.0),
name='input_weights')
x = lbann.Sum(lbann.Reshape(lbann.Input(data_field='samples'),
dims=_sample_size),
lbann.WeightsLayer(weights=x_weights,
dims=_sample_size))
x_lbann = x
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# ------------------------------------------
# Data-parallel layout
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.LogSoftmax(x, data_layout='data_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='data-parallel layout'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = METHOD_NAME(i).astype(np.float64)
y = numpy_log_softmax(x)
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Model-parallel layout
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.LogSoftmax(x, data_layout='model_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='model-parallel layout'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = METHOD_NAME(i).astype(np.float64)
y = numpy_log_softmax(x)
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Gradient checking
# ------------------------------------------
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# ------------------------------------------
# Construct model
# ------------------------------------------
num_epochs = 0
return lbann.Model(num_epochs,
layers=lbann.traverse_layer_graph(x_lbann),
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func |
1,985 | skorch regressor explainer | import pytest
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification, make_regression
from torch import nn
from skorch import NeuralNetClassifier, NeuralNetRegressor
from explainerdashboard.explainers import RegressionExplainer, ClassifierExplainer
@pytest.fixture(scope="session")
def skorch_regressor():
X, y = make_regression(100, 5, n_informative=3, random_state=0)
X = X.astype(np.float32)
y = y / np.std(y)
y = y.reshape(-1, 1).astype(np.float32)
X_df = pd.DataFrame(X, columns=['col'+str(i) for i in range(X.shape[1])])
class MyModule(nn.Module):
def __init__(skorch_classifier_explainer, input_units=5, num_units=5, nonlin=nn.ReLU()):
super(MyModule, skorch_classifier_explainer).__init__()
skorch_classifier_explainer.dense0 = nn.Linear(input_units, num_units)
skorch_classifier_explainer.nonlin = nonlin
skorch_classifier_explainer.dense1 = nn.Linear(num_units, num_units)
skorch_classifier_explainer.output = nn.Linear(num_units, 1)
def forward(skorch_classifier_explainer, X, **kwargs):
X = skorch_classifier_explainer.nonlin(skorch_classifier_explainer.dense0(X))
X = skorch_classifier_explainer.nonlin(skorch_classifier_explainer.dense1(X))
X = skorch_classifier_explainer.output(X)
return X
model = NeuralNetRegressor(
MyModule,
max_epochs=20,
lr=0.2,
iterator_train__shuffle=True,
)
model.fit(X_df.values, y)
return model, X_df, y
@pytest.fixture(scope="session")
def skorch_classifier():
X, y = make_classification(200, 5, n_informative=3, random_state=0)
X = X.astype(np.float32)
y = y.astype(np.int64)
X_df = pd.DataFrame(X, columns=['col'+str(i) for i in range(X.shape[1])])
class MyModule(nn.Module):
def __init__(skorch_classifier_explainer, input_units=5, num_units=5, nonlin=nn.ReLU()):
super(MyModule, skorch_classifier_explainer).__init__()
skorch_classifier_explainer.dense0 = nn.Linear(input_units, num_units)
skorch_classifier_explainer.nonlin = nonlin
skorch_classifier_explainer.dense1 = nn.Linear(num_units, num_units)
skorch_classifier_explainer.output = nn.Linear(num_units, 2)
skorch_classifier_explainer.softmax = nn.Softmax(dim=-1)
def forward(skorch_classifier_explainer, X, **kwargs):
X = skorch_classifier_explainer.nonlin(skorch_classifier_explainer.dense0(X))
X = skorch_classifier_explainer.nonlin(skorch_classifier_explainer.dense1(X))
X = skorch_classifier_explainer.softmax(skorch_classifier_explainer.output(X))
return X
model = NeuralNetClassifier(
MyModule,
max_epochs=20,
lr=0.1,
)
model.fit(X_df.values, y)
return model, X_df, y
@pytest.fixture(scope="session")
def METHOD_NAME(skorch_regressor):
model, X, y = skorch_regressor
return RegressionExplainer(model, X, y)
@pytest.fixture(scope="session")
def skorch_classifier_explainer(skorch_classifier):
model, X, y = skorch_classifier
return ClassifierExplainer(model, X, y)
def test_preds(METHOD_NAME):
assert isinstance(METHOD_NAME.preds, np.ndarray)
def test_permutation_importances(METHOD_NAME):
assert isinstance(METHOD_NAME.get_permutation_importances_df(), pd.DataFrame)
def test_shap_base_value(METHOD_NAME):
assert isinstance(METHOD_NAME.shap_base_value(), (np.floating, float))
def test_shap_values_shape(METHOD_NAME):
assert (METHOD_NAME.get_shap_values_df().shape == (len(METHOD_NAME), len(METHOD_NAME.merged_cols)))
def test_shap_values(METHOD_NAME):
assert isinstance(METHOD_NAME.get_shap_values_df(), pd.DataFrame)
def test_mean_abs_shap(METHOD_NAME):
assert isinstance(METHOD_NAME.get_mean_abs_shap_df(), pd.DataFrame)
def test_calculate_properties(METHOD_NAME):
METHOD_NAME.calculate_properties(include_interactions=False)
def test_pdp_df(METHOD_NAME):
assert isinstance(METHOD_NAME.pdp_df("col1"), pd.DataFrame)
def test_preds(skorch_classifier_explainer):
assert isinstance(skorch_classifier_explainer.preds, np.ndarray)
def test_pred_probas(skorch_classifier_explainer):
assert isinstance(skorch_classifier_explainer.pred_probas(), np.ndarray)
def test_permutation_importances(skorch_classifier_explainer):
assert isinstance(skorch_classifier_explainer.get_permutation_importances_df(), pd.DataFrame)
def test_shap_base_value(skorch_classifier_explainer):
assert isinstance(skorch_classifier_explainer.shap_base_value(), (np.floating, float))
def test_shap_values_shape(skorch_classifier_explainer):
assert (skorch_classifier_explainer.get_shap_values_df().shape == (len(skorch_classifier_explainer), len(skorch_classifier_explainer.merged_cols)))
def test_shap_values(skorch_classifier_explainer):
assert isinstance(skorch_classifier_explainer.get_shap_values_df(), pd.DataFrame)
def test_mean_abs_shap(skorch_classifier_explainer):
assert isinstance(skorch_classifier_explainer.get_mean_abs_shap_df(), pd.DataFrame)
def test_calculate_properties(skorch_classifier_explainer):
skorch_classifier_explainer.calculate_properties(include_interactions=False)
def test_pdp_df(skorch_classifier_explainer):
assert isinstance(skorch_classifier_explainer.pdp_df("col1"), pd.DataFrame)
def test_metrics(skorch_classifier_explainer):
assert isinstance(skorch_classifier_explainer.metrics(), dict)
def test_precision_df(skorch_classifier_explainer):
assert isinstance(skorch_classifier_explainer.get_precision_df(), pd.DataFrame)
def test_lift_curve_df(skorch_classifier_explainer):
assert isinstance(skorch_classifier_explainer.get_liftcurve_df(), pd.DataFrame)
def test_prediction_result_df(skorch_classifier_explainer):
assert isinstance(skorch_classifier_explainer.prediction_result_df(0), pd.DataFrame) |
1,986 | extract test key | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
# !/usr/bin/env python
"""
Provides "diff-like" comparison of images.
Currently relies on matplotlib for image processing so limited to PNG format.
"""
import argparse
from pathlib import Path
import re
import sys
import warnings
# Force iris.tests to use the ```tkagg``` backend by using the '-d'
# command-line argument as idiff is an interactive tool that requires a
# gui interface.
sys.argv.append("-d")
from PIL import Image # noqa
import matplotlib.image as mimg # noqa
import matplotlib.pyplot as plt # noqa
import matplotlib.testing.compare as mcompare # noqa
from matplotlib.testing.exceptions import ImageComparisonFailure # noqa
import matplotlib.widgets as mwidget # noqa
import iris.tests # noqa
import iris.tests.graphics as graphics # noqa
# Allows restoration of test id from result image name
_RESULT_NAME_PATTERN = re.compile(graphics.RESULT_PREFIX + r"(.*).png")
def METHOD_NAME(result_image_name):
"""
Extracts the name of the test which a result image refers to
"""
name_match = _RESULT_NAME_PATTERN.match(str(result_image_name))
if name_match:
test_key = name_match.group(1)
else:
emsg = f"Incorrectly named image in result dir: {result_image_name}"
raise ValueError(emsg)
return test_key
_POSTFIX_DIFF = "-failed-diff.png"
def diff_viewer(
test_id,
status,
phash,
expected_path,
result_path,
diff_fname,
):
fig = plt.figure(figsize=(14, 12))
plt.suptitle(expected_path.name)
ax = plt.subplot(221)
ax.imshow(mimg.imread(expected_path))
ax = plt.subplot(222, sharex=ax, sharey=ax)
ax.imshow(mimg.imread(result_path))
ax = plt.subplot(223, sharex=ax, sharey=ax)
ax.imshow(mimg.imread(diff_fname))
result_dir = result_path.parent
repo = graphics.read_repo_json()
def accept(event):
if test_id not in repo:
repo[test_id] = phash
graphics.write_repo_json(repo)
out_file = result_dir / (test_id + ".png")
result_path.rename(out_file)
msg = f"ACCEPTED: {result_path.name} -> {out_file.name}"
print(msg)
else:
msg = f"DUPLICATE: {result_path.name} -> {expected_path.name} (ignored)"
print(msg)
result_path.unlink()
diff_fname.unlink()
plt.close()
def reject(event):
if test_id not in repo:
print(f"REJECTED: {result_path.name}")
else:
msg = f"DUPLICATE: {result_path.name} -> {expected_path.name} (ignored)"
print(msg)
result_path.unlink()
diff_fname.unlink()
plt.close()
def skip(event):
# Let's keep both the result and the diff files.
print(f"SKIPPED: {result_path.name}")
plt.close()
ax_accept = plt.axes([0.59, 0.05, 0.1, 0.075])
ax_reject = plt.axes([0.7, 0.05, 0.1, 0.075])
ax_skip = plt.axes([0.81, 0.05, 0.1, 0.075])
baccept = mwidget.Button(ax_accept, "Accept")
baccept.on_clicked(accept)
breject = mwidget.Button(ax_reject, "Reject")
breject.on_clicked(reject)
bskip = mwidget.Button(ax_skip, "Skip")
bskip.on_clicked(skip)
plt.text(0.59, 0.15, status, transform=fig.transFigure)
plt.show()
def step_over_diffs(result_dir, display=True):
processed = False
if display:
msg = "\nComparing the expected image with the test result image."
print(msg)
# Remove old image diff results.
for fname in result_dir.glob(f"*{_POSTFIX_DIFF}"):
fname.unlink()
reference_image_dir = Path(iris.tests.get_data_path("images"))
repo = graphics.read_repo_json()
# Filter out all non-test result image files.
results = []
for fname in sorted(result_dir.glob(f"{graphics.RESULT_PREFIX}*.png")):
# We only care about PNG images.
try:
im = Image.open(fname)
if im.format != "PNG":
# Ignore - it's not a png image.
continue
except IOError:
# Ignore - it's not an image.
continue
results.append(fname)
count = len(results)
for count_index, result_path in enumerate(results):
test_key = METHOD_NAME(result_path.name)
test_key = graphics.fully_qualify(test_key, repo)
reference_image_path = reference_image_dir / (test_key + ".png")
try:
# Calculate the test result perceptual image hash.
phash = graphics.get_phash(result_path)
distance = graphics.get_phash(reference_image_path) - phash
except FileNotFoundError:
wmsg = "Ignoring unregistered test result {!r}."
warnings.warn(wmsg.format(test_key))
continue
processed = True
try:
# Creates the diff file when the images aren't identical
mcompare.compare_images(reference_image_path, result_path, tol=0)
except Exception as e:
if isinstance(e, ValueError) or isinstance(
e, ImageComparisonFailure
):
print(f"Could not compare {result_path}: {e}")
continue
else:
# Propagate the exception, keeping the stack trace
raise
diff_path = result_dir / Path(f"{result_path.stem}{_POSTFIX_DIFF}")
args = phash, reference_image_path, result_path, diff_path
if display:
status = f"Image {count_index + 1} of {count}: hamming distance = {distance}"
prefix = test_key, status
yield prefix + args
else:
yield args
if display and not processed:
print("\nThere are no iris test result images to process.\n")
if __name__ == "__main__":
default = Path(iris.tests.__file__).parent / Path(
"result_image_comparison"
)
description = "Iris graphic test difference tool."
formatter_class = argparse.RawTextHelpFormatter
parser = argparse.ArgumentParser(
description=description, formatter_class=formatter_class
)
help = "path to iris tests result image directory (default: %(default)s)"
parser.add_argument("--resultdir", "-r", default=default, help=help)
help = 'force "iris.tests" to use the tkagg backend (default: %(default)s)'
parser.add_argument("-d", action="store_true", default=True, help=help)
args = parser.parse_args()
result_dir = Path(args.resultdir)
if not result_dir.is_dir():
emsg = f"Invalid results directory: {result_dir}"
raise ValueError(emsg)
for args in step_over_diffs(result_dir):
diff_viewer(*args) |
1,987 | output | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"mesh secret show",
is_preview=True,
)
class Show(AAZCommand):
"""Get the details of a secret.
"""
_aaz_info = {
"version": "2018-09-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.servicefabricmesh/secrets/{}", "2018-09-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self.METHOD_NAME()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The name of the secret.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.SecretGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def METHOD_NAME(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class SecretGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabricMesh/secrets/{secretResourceName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"secretResourceName", self.ctx.args.name,
skip_quote=True,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2018-09-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"required": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.content_type = AAZStrType(
serialized_name="contentType",
)
properties.description = AAZStrType()
properties.kind = AAZStrType(
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.status = AAZStrType()
properties.status_details = AAZStrType(
serialized_name="statusDetails",
flags={"read_only": True},
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
1,988 | log in | # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from datetime import datetime
from logging import getLogger
from pathlib import Path
from typing import Any, Optional, Hashable
from deeppavlov.core.common.file import read_json
from deeppavlov.core.common.paths import get_settings_path
from deeppavlov.core.data.utils import jsonify_data
LOGGER_CONFIG_FILENAME = 'dialog_logger_config.json'
LOG_TIMESTAMP_FORMAT = '%Y-%m-%d_%H-%M-%S_%f'
log = getLogger(__name__)
class DialogLogger:
"""DeepPavlov dialog logging facility.
DialogLogger is an entity which provides tools for dialogs logging.
Args:
enabled: DialogLogger on/off flag.
logger_name: Dialog logger name that is used for organising log files.
Attributes:
logger_name: Dialog logger name which is used for organising log files.
log_max_size: Maximum size of log file, kb.
self.log_file: Current log file object.
"""
def __init__(self, enabled: bool = False, logger_name: Optional[str] = None) -> None:
self.config: dict = read_json(get_settings_path() / LOGGER_CONFIG_FILENAME)
self.enabled: bool = enabled or self.config['enabled']
if self.enabled:
self.logger_name: str = logger_name or self.config['logger_name']
self.log_max_size: int = self.config['logfile_max_size_kb']
self.log_file = self._get_log_file()
self.log_file.writelines('"Dialog logger initiated"\n')
@staticmethod
def _get_timestamp_utc_str() -> str:
"""Returns str converted current UTC timestamp.
Returns:
utc_timestamp_str: str converted current UTC timestamp.
"""
utc_timestamp_str = datetime.strftime(datetime.utcnow(), LOG_TIMESTAMP_FORMAT)
return utc_timestamp_str
def _get_log_file(self):
"""Returns opened file object for writing dialog logs.
Returns:
log_file: opened Python file object.
"""
log_dir: Path = Path(self.config['log_path']).expanduser().resolve() / self.logger_name
log_dir.mkdir(parents=True, exist_ok=True)
log_file_path = Path(log_dir, f'{self._get_timestamp_utc_str()}_{self.logger_name}.log')
log_file = open(log_file_path, 'a', buffering=1, encoding='utf8')
return log_file
def _log(self, utterance: Any, direction: str, dialog_id: Optional[Hashable]=None):
"""Logs single dialog utterance to current dialog log file.
Args:
utterance: Dialog utterance.
direction: 'in' or 'out' utterance direction.
dialog_id: Dialog ID.
"""
if isinstance(utterance, str):
pass
elif isinstance(utterance, (list, dict)):
utterance = jsonify_data(utterance)
else:
utterance = str(utterance)
dialog_id = str(dialog_id) if not isinstance(dialog_id, str) else dialog_id
if self.log_file.tell() >= self.log_max_size * 1024:
self.log_file.close()
self.log_file = self._get_log_file()
else:
try:
log_msg = {}
log_msg['timestamp'] = self._get_timestamp_utc_str()
log_msg['dialog_id'] = dialog_id
log_msg['direction'] = direction
log_msg['message'] = utterance
log_str = json.dumps(log_msg, ensure_ascii=self.config['ensure_ascii'])
self.log_file.write(f'{log_str}\n')
except IOError:
log.error('Failed to write dialog log.')
def METHOD_NAME(self, utterance: Any, dialog_id: Optional[Hashable] = None) -> None:
"""Wraps _log method for all input utterances.
Args:
utterance: Dialog utterance.
dialog_id: Dialog ID.
"""
if self.enabled:
self._log(utterance, 'in', dialog_id)
def log_out(self, utterance: Any, dialog_id: Optional[Hashable] = None) -> None:
"""Wraps _log method for all output utterances.
Args:
utterance: Dialog utterance.
dialog_id: Dialog ID.
"""
if self.enabled:
self._log(utterance, 'out', dialog_id) |
1,989 | import data | import os
import subprocess
import time
from benchmark_base import BenchmarkBase
import pandas as pd
import csv
# Dont fail if not imported locally
try:
from neo4j import GraphDatabase
except ImportError:
pass
def create_graph_projection(tx):
tx.run(
"""
CALL gds.graph.project.cypher(
'social',
'MATCH (n) RETURN id(n) AS id',
'MATCH (n)-[r:FOLLOWS]->(m) RETURN id(n) AS source, id(m) AS target')
YIELD
graphName AS graph, nodeQuery, nodeCount AS nodes, relationshipQuery, relationshipCount AS rels
"""
)
def query_degree(tx):
result = tx.run(
"""
MATCH p=(n)-[r:FOLLOWS]->() RETURN n.id, COUNT(p)
"""
)
return list(result)
def get_out_neighbors(tx):
result = tx.run(
"""
MATCH p=(n)-[:FOLLOWS]->(neighbor)
RETURN n.id, COUNT(p)
"""
)
return list(result)
def run_pagerank(tx):
result = tx.run("""CALL gds.pageRank.stream("social")""")
return list(result)
def run_connected_components(tx):
result = tx.run(
"""
CALL gds.wcc.stream("social")
"""
)
return list(result)
def execute_bash_command(command, background=False):
print("Executing command: ", command)
if background:
subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
return
process = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = process.communicate()
return stdout.decode("utf-8"), stderr.decode("utf-8")
def write_array_to_csv(arr, file_path):
with open(file_path, "w", newline="") as csv_file:
writer = csv.writer(csv_file, delimiter="\t")
writer.writerows(arr)
def modify_data():
print("Generating data...")
file_dir = os.path.abspath(os.getcwd()) + "/data/"
print("File dir: ", file_dir)
if "simple-profiles-header-neo4j.csv" not in os.listdir(file_dir):
print("Generating node header")
write_array_to_csv(
[["node:ID", "name"]], file_dir + "simple-profiles-header-neo4j.csv"
)
print("Generating relationship header")
write_array_to_csv(
[["node:START_ID", "node:END_ID", ":TYPE"]],
file_dir + "simple-relationships-headers-neo4j.csv",
)
print("Generating node data")
df = pd.read_csv(file_dir + "simple-profiles.csv", sep="\t", header=None)
df["copy"] = df[0].copy()
df.to_csv(
file_dir + "simple-profiles-neo4j.csv", index=None, header=None, sep="\t"
)
print("Generating relationship data")
df = pd.read_csv(file_dir + "simple-relationships.csv", sep="\t", header=None)
df["type"] = "FOLLOWS"
df.to_csv(
file_dir + "simple-relationships-neo4j.csv",
sep="\t",
index=None,
header=None,
)
print("Done")
def METHOD_NAME():
return execute_bash_command(
"neo4j-admin database import full --overwrite-destination --delimiter='TAB' "
"--nodes=/var/lib/neo4j/import/data2/data/simple-profiles-header-neo4j.csv,"
"/var/lib/neo4j/import/data2/data/simple-profiles-neo4j.csv "
"--relationships=/var/lib/neo4j/import/data2/data/simple-relationships-headers-neo4j.csv,"
"/var/lib/neo4j/import/data2/data/simple-relationships-neo4j.csv neo4j"
)
# tx.run("""
# LOAD CSV FROM 'file:///data2/data/simple-relationships.csv' AS row
# FIELDTERMINATOR '\t'
# WITH row[0] AS source, row[1] AS target
# MERGE (n1:Node {id: source})
# MERGE (n2:Node {id: target})
# MERGE (n1)-[:FOLLOWS]->(n2)
# """)
class Neo4jBench(BenchmarkBase):
def start_docker(self, **kwargs):
modify_data()
image_name = "neo4j:5.8.0"
container_folder = "/var/lib/neo4j/import/data2/"
envs = {
"NEO4J_AUTH": "neo4j/password",
"NEO4J_PLUGINS": '["graph-data-science"]',
}
ports = {"7474": "7474", "7687": "7687"}
exec_commands = [
'/bin/bash -c "apt update && apt install python3-pip -y"',
'/bin/bash -c "python3 -m pip install neo4j requests tqdm pandas numpy docker"',
# '/bin/bash -c "neo4j start"',
# '/bin/bash -c "sleep 15"',
'/bin/bash -c "cd /var/lib/neo4j/import/data2/; python3 benchmark_driver.py --no-docker --bench neo"',
]
# image_path = 'DockerFiles/pyneo' image_path ports
code, contents = super().start_docker(
image_name=image_name,
container_folder=container_folder,
exec_commands=exec_commands,
envs=envs,
wait=35,
ports=ports,
start_cmd="tail -f /dev/null",
)
return code, contents
def shutdown(self):
self.driver.close()
def __init__(self):
self.driver = None
def name(self):
return "Neo4j"
def setup(self):
uri = "bolt://localhost:7687"
username = "neo4j"
password = "password"
print("Logging into neo4j")
# self.driver = GraphDatabase.driver(uri, auth=(username, password))
print("Importing data")
stout, sterr = METHOD_NAME()
print("status: ", stout)
print("error: ", sterr)
print("Starting neo4j")
execute_bash_command(
'export NEO4J_AUTH="neo4j/password"; export NEO4J_PLUGINS=\'['
'"graph-data-science"]\';/bin/bash -c "tini -s -g -- '
'/startup/docker-entrypoint.sh neo4j start &"',
background=True,
)
print("Sleeping for 50 seconds...")
time.sleep(50)
# print("Updating password")
# change user password on neo4j login
self.driver = GraphDatabase.driver(uri, auth=(username, password))
# with self.driver.session(database="system") as session:
# session.run("ALTER CURRENT USER SET PASSWORD FROM 'neo4j' TO 'password'")
# self.driver = GraphDatabase.driver(uri, auth=(username, 'password'))
print("Creating graph projection")
self.execute_write(create_graph_projection)
print("Done")
def execute_read(self, query):
with self.driver.session() as session:
return session.execute_read(query)
def execute_write(self, query):
with self.driver.session() as session:
session.execute_write(query)
def degree(self):
return self.execute_read(query_degree)
def out_neighbours(self):
return self.execute_read(get_out_neighbors)
def page_rank(self):
return self.execute_read(run_pagerank)
def connected_components(self):
return self.execute_read(run_connected_components) |
1,990 | test delete channel member no member | import datetime
from unittest.mock import ANY
import pytest
from quetz.metrics.db_models import PackageVersionMetric
def test_get_package_list(package_version, package_name, channel_name, client):
response = client.get("/api/dummylogin/bartosz")
response = client.get(
f"/api/channels/{channel_name}/packages/{package_name}/versions"
)
assert response.status_code == 200
assert response.json() == [
{
"id": ANY,
"channel_name": "my-channel",
"package_name": "my-package",
"platform": "linux-64",
"version": "0.1",
"build_string": "",
"build_number": 0,
"filename": "test-package-0.1-0.tar.bz2",
"info": {},
"uploader": {"name": "Bartosz", "avatar_url": "http:///avatar"},
"time_created": ANY,
"download_count": 0,
}
]
def test_package_version_list_by_date(
package_version, package_name, channel_name, client
):
now = datetime.datetime.utcnow()
later = now + datetime.timedelta(minutes=1)
earlier = now - datetime.timedelta(minutes=1)
response = client.get("/api/dummylogin/bartosz")
response = client.get(
f"/api/channels/{channel_name}/packages/{package_name}/versions"
"?time_created__ge=" + later.isoformat()
)
assert response.status_code == 200
assert response.json() == []
response = client.get(
f"/api/channels/{channel_name}/packages/{package_name}/versions"
"?time_created__ge=" + earlier.isoformat()
)
assert response.status_code == 200
assert len(response.json()) == 1
@pytest.mark.parametrize(
"channel_role,expected_code",
[("owner", 201), ("maintainer", 201), ("member", 403), (None, 403)],
)
def test_add_package_permissions(auth_client, public_channel, expected_code):
response = auth_client.post(
f"/api/channels/{public_channel.name}/packages",
json={"name": "test-package", "summary": "none", "description": "none"},
)
assert response.status_code == expected_code
@pytest.mark.parametrize(
"channel_role,expected_code",
[("owner", 200), ("maintainer", 200), ("member", 403), (None, 403)],
)
def test_get_channel_members(auth_client, public_channel, expected_code):
response = auth_client.get(f"/api/channels/{public_channel.name}/members")
assert response.status_code == expected_code
@pytest.mark.parametrize(
"role,expected_code",
[
("owner", 201),
("maintainer", 201),
("member", 201),
("invalid", 422),
],
)
def test_post_channel_member(
auth_client, public_channel, other_user, role, expected_code
):
response = auth_client.post(
f"/api/channels/{public_channel.name}/members",
json={"username": other_user.username, "role": role},
)
assert response.status_code == expected_code
if expected_code == 201:
response = auth_client.get(f"/api/channels/{public_channel.name}/members")
response.raise_for_status()
for element in response.json():
if element["user"]["username"] == other_user.username:
assert element["role"] == role
break
else:
raise RuntimeError(f"User '{other_user.username}' not found.")
def test_post_channel_member_unknown_user(auth_client, public_channel):
response = auth_client.post(
f"/api/channels/{public_channel.name}/members",
json={"username": "unknown-user", "role": "member"},
)
assert response.status_code == 404
def test_delete_channel_member(auth_client, public_channel, other_user):
auth_client.post(
f"/api/channels/{public_channel.name}/members",
json={"username": other_user.username, "role": "member"},
)
response = auth_client.delete(
f"/api/channels/{public_channel.name}/members",
params={"username": other_user.username},
)
assert response.status_code == 200
def METHOD_NAME(auth_client, public_channel, other_user):
response = auth_client.delete(
f"/api/channels/{public_channel.name}/members",
params={"username": other_user.username},
)
assert response.status_code == 404
def test_upload_wrong_file_type(auth_client, public_channel):
files = {"files": ("my_package-0.1-0.tar.bz", b"dfdf")}
response = auth_client.post(
f"/api/channels/{public_channel.name}/files/", files=files
)
assert response.status_code == 400
assert "not a bzip2 file" in response.json()['detail']
def test_increment_download_count(
auth_client, public_channel, package_version, db, mocker
):
def get_db(config):
yield db
mocker.patch("quetz.main.get_db", get_db)
assert not db.query(PackageVersionMetric).one_or_none()
with auth_client:
response = auth_client.get(
f"/get/{public_channel.name}/linux-64/test-package-0.1-0.tar.bz2"
)
assert response.status_code == 200
metrics = (
db.query(PackageVersionMetric)
.filter(PackageVersionMetric.channel_name == public_channel.name)
.filter(PackageVersionMetric.platform == package_version.platform)
.filter(PackageVersionMetric.filename == package_version.filename)
.all()
)
assert len(metrics) > 1
assert metrics[0].count == 1
db.refresh(package_version)
assert package_version.download_count == 1
with auth_client:
response = auth_client.get(
f"/get/{public_channel.name}/linux-64/test-package-0.1-0.tar.bz2"
)
assert response.status_code == 200
db.refresh(metrics[0])
assert metrics[0].count == 2
db.refresh(package_version)
assert package_version.download_count == 2 |
1,991 | er get local package path | #
# (c) 2008 Mandriva, http://www.mandriva.com/
#
# $Id: package_api.py 713 2009-02-27 14:06:11Z oroussy $
#
# This file is part of Pulse 2, http://pulse2.mandriva.org
#
# Pulse 2 is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Pulse 2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pulse 2; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
This module define the package_api_get API
It provides methods to acces to package informations.
"""
from pulse2.apis.clients import Pulse2Api
import twisted.internet.defer
class PackageGetA(Pulse2Api):
def __init__(self, *attr):
self.name = "PackageGetApi"
Pulse2Api.__init__(self, *attr)
def getAllPackages(self, mirror = None):
try:
d = self.callRemote("getAllPackages", mirror)
d.addErrback(self.onError, "getAllPackages", mirror, [{'label':'A', 'version':'0', 'ERR':'PULSE2ERROR_GETALLPACKAGE', 'mirror':self.server_addr.replace(self.credentials, '')}])
return d
except Exception, e:
self.logger.error("getAllPackages %s"%(str(e)))
return [{'label':'A', 'version':'0', 'ERR':'PULSE2ERROR_GETALLPACKAGE', 'mirror':self.server_addr.replace(self.credentials, '')}]
def getAllPendingPackages(self, mirror = None):
try:
d = self.callRemote("getAllPendingPackages", mirror)
d.addErrback(self.onError, "getAllPendingPackages", mirror)
return d
except Exception, e:
self.logger.error("getAllPendingPackages %s"%(str(e)))
return []
# FIXME ! __convertDoReboot* shouldn't be needed
def __convertDoRebootList(self, pkgs):
ret = []
for pkg in pkgs:
ret.append(self.__convertDoReboot(pkg))
return ret
def __convertDoReboot(self, pkg):
if pkg:
try:
do_reboot = pkg['reboot']
if do_reboot == '' or do_reboot == '0' or do_reboot == 0 or do_reboot == u'0' or do_reboot == 'false' or do_reboot == u'false' or do_reboot == False or do_reboot == 'disable' or do_reboot == u'disable' or do_reboot == 'off' or do_reboot == u'off':
pkg['do_reboot'] = 'disable'
elif do_reboot == '1' or do_reboot == 1 or do_reboot == u'1' or do_reboot == 'true' or do_reboot == u'true' or do_reboot == True or do_reboot == 'enable' or do_reboot == u'enable' or do_reboot == 'on' or do_reboot == u'on':
pkg['do_reboot'] = 'enable'
else:
self.logger.warning("Dont know option '%s' for do_reboot, will use 'disable'"%(do_reboot))
del pkg['reboot']
except KeyError:
pkg['do_reboot'] = 'disable'
return pkg
def getPackageDetail(self, pid):
d = self.callRemote("getPackageDetail", pid)
d.addCallback(self.__convertDoReboot)
d.addErrback(self.onError, "getPackageDetail", pid, False)
return d
def getPackagesDetail(self, pids):
d = self.callRemote("getPackagesDetail", pids)
d.addCallback(self.__convertDoRebootList)
d.addErrback(self.onErrorGetPackageDetailCall, pids, False)
return d
def treatMultipleGetPackageDetailCall(self, results):
ret = []
for i in results:
ret.append(i[1])
return ret
def onErrorGetPackageDetailCall(self, error, pids, value = []):
# when the package server is old, this one call function does not exists
# so we call several time the existing function
self.logger.warn("one of your package server does not support getPackagesDetail, you should update it.")
ds = []
for pid in pids:
d = self.callRemote("getPackageDetail", pid)
d.addCallback(self.__convertDoReboot)
d.addErrback(self.onError, "getPackageDetail", pid, False)
ds.append(d)
dl = twisted.internet.defer.DeferredList(ds)
dl.addCallback(self.treatMultipleGetPackageDetailCall)
return dl
def getPackageLabel(self, pid):
d = self.callRemote("getPackageLabel", pid)
d.addErrback(self.onError, "getPackageLabel", pid, False)
return d
def METHOD_NAME(self):
return self.config.repopath
def getLocalPackagePath(self, pid):
d = self.callRemote("getLocalPackagePath", pid)
d.addErrback(self.METHOD_NAME)
return d
def getLocalPackagesPath(self, pids):
d = self.callRemote("getLocalPackagesPath", pids)
d.addErrback(self.onError, "getLocalPackagesPath", pids, False)
return d
def getPackageVersion(self, pid):
d = self.callRemote("getPackageVersion", pid)
d.addErrback(self.onError, "getPackageVersion", pid, False)
return d
def getPackageSize(self, pid):
d = self.callRemote("getPackageSize", pid)
d.addErrback(self.onError, "getPackageSize", pid, 0)
return d
def getPackageInstallInit(self, pid):
d = self.callRemote("getPackageInstallInit", pid)
d.addErrback(self.onError, "getPackageInstallInit", pid, False)
return d
def getPackagePreCommand(self, pid):
d = self.callRemote("getPackagePreCommand", pid)
d.addErrback(self.onError, "getPackagePreCommand", pid, False)
return d
def getPackageCommand(self, pid):
d = self.callRemote("getPackageCommand", pid)
d.addErrback(self.onError, "getPackageCommand", pid, False)
return d
def getPackagePostCommandSuccess(self, pid):
d = self.callRemote("getPackagePostCommandSuccess", pid)
d.addErrback(self.onError, "getPackagePostCommandSuccess", pid, False)
return d
def getPackagePostCommandFailure(self, pid):
d = self.callRemote("getPackagePostCommandFailure", pid)
d.addErrback(self.onError, "getPackagePostCommandFailure", pid, False)
return d
def getPackageHasToReboot(self, pid):
d = self.callRemote("getPackageHasToReboot", pid)
d.addErrback(self.onError, "getPackageHasToReboot", pid, False)
return d
def getPackageFiles(self, pid):
d = self.callRemote("getPackageFiles", pid)
d.addErrback(self.onError, "getPackageFiles", pid)
return d
def getFileChecksum(self, file):
d = self.callRemote("getFileChecksum", file)
d.addErrback(self.onError, "getFileChecksum", file, False)
return d
def getPackagesIds(self, label):
d = self.callRemote("getPackagesIds", label)
d.addErrback(self.onError, "getPackagesIds", label)
return d
def getPackageId(self, label, version):
d = self.callRemote("getPackageId", label, version)
d.addErrback(self.onError, "getPackageId", (label, version), False)
return d
def isAvailable(self, pid, mirror):
d = self.callRemote("isAvailable", pid, mirror)
d.addErrback(self.onError, "getPackageId", (pid, mirror), False)
return d |
1,992 | prepare | # Copyright 2023 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs FIO and Netperf benchmarks in parallel.
Fio_netperf benchmark specifies its own benchmark config, using the first 2
VM's to run both FIO (on each VM) and Netperf (between the VM's). The benchmark-
specific flags for each benchmark can still be used, such as netperf_test_length
and fio_runtime (which are both specified in this benchmark config), which
determine how long the run stage lasts.
"""
from typing import Any, Dict, List
from absl import flags
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import benchmark_spec as bm_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_benchmarks import fio_benchmark
from perfkitbenchmarker.linux_benchmarks import netperf_benchmark
from perfkitbenchmarker.linux_packages import fio
BENCHMARK_NAME = 'fio_netperf'
BENCHMARK_CONFIG = """
fio_netperf:
description: Run FIO and Netperf benchmarks in parallel
vm_groups:
vm_1:
vm_spec: *default_single_core
disk_spec: *default_500_gb
vm_2:
vm_spec: *default_single_core
disk_spec: *default_500_gb
flags:
netperf_test_length: 300
fio_runtime: 300
placement_group_style: closest_supported
"""
FLAGS = flags.FLAGS
MIN_RUN_STAGE_DURATION = 60
RUN_STAGE_DELAY_THRESHOLD = 0.01
def GetConfig(user_config: Dict[Any, Any]) -> Dict[Any, Any]:
"""Merge BENCHMARK_CONFIG with user_config to create benchmark_spec.
Args:
user_config: user-define configs (through FLAGS.benchmark_config_file or
FLAGS.config_override).
Returns:
merged configs
"""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS.fio_target_mode != fio_benchmark.AGAINST_FILE_WITHOUT_FILL_MODE:
for vm_group in config['vm_groups'].keys():
disk_spec = config['vm_groups'][vm_group]['disk_spec']
for cloud in disk_spec:
disk_spec[cloud]['mount_point'] = None
return config
def METHOD_NAME(benchmark_spec: bm_spec.BenchmarkSpec) -> None:
"""Prepare both benchmarks on the target vm.
Args:
benchmark_spec: The benchmark specification.
"""
min_test_length = min(
FLAGS.netperf_test_length * len(FLAGS.netperf_num_streams),
FLAGS.fio_runtime * len(FLAGS.fio_generate_scenarios),
)
max_test_length = max(
FLAGS.netperf_test_length * len(FLAGS.netperf_num_streams),
FLAGS.fio_runtime * len(FLAGS.fio_generate_scenarios),
)
if min_test_length < MIN_RUN_STAGE_DURATION:
raise errors.Setup.InvalidFlagConfigurationError(
'Combo benchmark run stages must run for at least'
f' {MIN_RUN_STAGE_DURATION} seconds.'
)
elif float(max_test_length) / min_test_length - 1 > RUN_STAGE_DELAY_THRESHOLD:
raise errors.Setup.InvalidFlagConfigurationError(
'Combo benchmark run stages must have similar runtimes.'
)
vms = benchmark_spec.vms[:2]
# Prepare Netperf benchmark
client_vm, server_vm = vms
background_tasks.RunThreaded(
netperf_benchmark.PrepareNetperf, [client_vm, server_vm]
)
background_tasks.RunParallelThreads(
[
(netperf_benchmark.PrepareClientVM, [client_vm], {}),
(
netperf_benchmark.PrepareServerVM,
[server_vm, client_vm.internal_ip, client_vm.ip_address],
{},
),
],
2,
)
# Prepare FIO benchmark
exec_path = fio.GetFioExec()
background_tasks.RunThreaded(
lambda vm: fio_benchmark.PrepareWithExec(vm, exec_path), vms
)
def Run(benchmark_spec: bm_spec.BenchmarkSpec) -> List[sample.Sample]:
"""Runs both benchmarks in parallel.
Args:
benchmark_spec: The benchmark specification.
Returns:
A list of sample.Sample objects with the performance results.
Raises:
RunError: A run-stage error raised by an individual benchmark.
"""
vms = benchmark_spec.vms[:2]
output_samples_list = background_tasks.RunParallelThreads(
[
(fio_benchmark.RunFioOnVMs, [vms], {}),
(netperf_benchmark.RunClientServerVMs, vms, {}),
],
2,
)
# Both FIO and netperf benchmarks are guaranteed to have samples for
# 'start time' and 'end time'.
# FIO samples collected from client/server VM.
sample_times = {}
for fio_sample in output_samples_list[0]:
if fio_sample.metric == 'start_time':
key = (
'fio_start_time' + str(fio_sample.metadata['machine_instance'])
)
sample_times[key] = min(
sample_times.get(key, float('inf')), fio_sample.value
)
elif fio_sample.metric == 'end_time':
key = (
'fio_end_time' + str(fio_sample.metadata['machine_instance'])
)
sample_times[key] = max(
sample_times.get(key, float('-inf')), fio_sample.value
)
# Netperf samples collected for each of num_streams.
for netperf_sample in output_samples_list[1]:
if netperf_sample.metric == 'start_time':
sample_times['netperf_start_time'] = min(
sample_times.get('netperf_start_time', float('inf')),
netperf_sample.value,
)
elif netperf_sample.metric == 'end_time':
sample_times['netperf_end_time'] = max(
sample_times.get('netperf_end_time', float('-inf')),
netperf_sample.value,
)
min_test_length = min(FLAGS.netperf_test_length, FLAGS.fio_runtime)
if (
float(
max(
abs(
sample_times['fio_start_time0']
- sample_times['netperf_start_time']
),
abs(
sample_times['fio_start_time1']
- sample_times['netperf_start_time']
),
)
)
/ min_test_length
- 1
> RUN_STAGE_DELAY_THRESHOLD
):
raise errors.Benchmarks.RunError(
'Run stage start delay threshold exceeded.'
)
if (
float(
max(
abs(
sample_times['fio_end_time0']
- sample_times['netperf_end_time']
),
abs(
sample_times['fio_end_time1']
- sample_times['netperf_end_time']
),
)
)
/ min_test_length
- 1
> RUN_STAGE_DELAY_THRESHOLD
):
raise errors.Benchmarks.RunError('Run stage end delay threshold exceeded.')
return output_samples_list[0] + output_samples_list[1]
def Cleanup(benchmark_spec: bm_spec.BenchmarkSpec) -> None:
"""Cleanup benchmarks on the target vm.
Args:
benchmark_spec: The benchmark specification.
"""
vms = benchmark_spec.vms[:2]
background_tasks.RunThreaded(fio_benchmark.CleanupVM, vms)
netperf_benchmark.CleanupClientServerVMs(vms[0], vms[1]) |
1,993 | test absent when trail does not exist | import logging
import random
import string
import pytest
import salt.loader
import salt.states.boto_cloudtrail as boto_cloudtrail
from tests.support.mock import MagicMock, patch
boto = pytest.importorskip("boto")
boto3 = pytest.importorskip("boto3", "1.2.1")
botocore = pytest.importorskip("botocore", "1.4.41")
log = logging.getLogger(__name__)
class GlobalConfig:
region = "us-east-1"
access_key = "GKTADJGHEIQSXMKKRBJ08H"
secret_key = "askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs"
conn_parameters = {
"region": region,
"key": access_key,
"keyid": secret_key,
"profile": {},
}
error_message = (
"An error occurred (101) when calling the {0} operation: Test-defined error"
)
not_found_error = botocore.exceptions.ClientError(
{"Error": {"Code": "TrailNotFoundException", "Message": "Test-defined error"}},
"msg",
)
error_content = {"Error": {"Code": 101, "Message": "Test-defined error"}}
trail_ret = dict(
Name="testtrail",
IncludeGlobalServiceEvents=True,
KmsKeyId=None,
LogFileValidationEnabled=False,
S3BucketName="auditinfo",
TrailARN="arn:aws:cloudtrail:us-east-1:214351231622:trail/testtrail",
)
status_ret = dict(
IsLogging=False,
LatestCloudWatchLogsDeliveryError=None,
LatestCloudWatchLogsDeliveryTime=None,
LatestDeliveryError=None,
LatestDeliveryTime=None,
LatestDigestDeliveryError=None,
LatestDigestDeliveryTime=None,
LatestNotificationError=None,
LatestNotificationTime=None,
StartLoggingTime=None,
StopLoggingTime=None,
)
@pytest.fixture
def session_instance():
with patch("boto3.session.Session") as patched_session:
yield patched_session()
@pytest.fixture
def global_config():
GlobalConfig.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
params = GlobalConfig()
return params
@pytest.fixture
def configure_loader_modules(minion_opts):
minion_opts["grains"] = salt.loader.grains(minion_opts)
ctx = {}
utils = salt.loader.utils(
minion_opts,
whitelist=["boto", "boto3", "args", "systemd", "path", "platform", "reg"],
context=ctx,
)
serializers = salt.loader.serializers(minion_opts)
funcs = salt.loader.minion_mods(
minion_opts, context=ctx, utils=utils, whitelist=["boto_cloudtrail"]
)
salt_states = salt.loader.states(
opts=minion_opts,
functions=funcs,
utils=utils,
whitelist=["boto_cloudtrail"],
serializers=serializers,
)
return {
boto_cloudtrail: {
"__opts__": minion_opts,
"__salt__": funcs,
"__utils__": utils,
"__states__": salt_states,
"__serializers__": serializers,
}
}
@pytest.mark.slow_test
def test_present_when_trail_does_not_exist(global_config, session_instance):
"""
Tests present on a trail that does not exist.
"""
conn = MagicMock()
session_instance.client.return_value = conn
conn.get_trail_status.side_effect = [
global_config.not_found_error,
global_config.status_ret,
]
conn.create_trail.return_value = global_config.trail_ret
conn.describe_trails.return_value = {"trailList": [global_config.trail_ret]}
with patch.dict(
boto_cloudtrail.__salt__,
{"boto_iam.get_account_id": MagicMock(return_value="1234")},
):
result = boto_cloudtrail.__states__["boto_cloudtrail.present"](
"trail present",
Name=global_config.trail_ret["Name"],
S3BucketName=global_config.trail_ret["S3BucketName"],
)
assert result["result"]
assert result["changes"]["new"]["trail"]["Name"] == global_config.trail_ret["Name"]
@pytest.mark.slow_test
def test_present_when_trail_exists(global_config, session_instance):
conn = MagicMock()
session_instance.client.return_value = conn
conn.get_trail_status.return_value = global_config.status_ret
conn.create_trail.return_value = global_config.trail_ret
conn.describe_trails.return_value = {"trailList": [global_config.trail_ret]}
with patch.dict(
boto_cloudtrail.__salt__,
{"boto_iam.get_account_id": MagicMock(return_value="1234")},
):
result = boto_cloudtrail.__states__["boto_cloudtrail.present"](
"trail present",
Name=global_config.trail_ret["Name"],
S3BucketName=global_config.trail_ret["S3BucketName"],
LoggingEnabled=False,
)
assert result["result"]
assert result["changes"] == {}
@pytest.mark.slow_test
def test_present_with_failure(global_config, session_instance):
conn = MagicMock()
session_instance.client.return_value = conn
conn.get_trail_status.side_effect = [
global_config.not_found_error,
global_config.status_ret,
]
conn.create_trail.side_effect = botocore.exceptions.ClientError(
global_config.error_content, "create_trail"
)
with patch.dict(
boto_cloudtrail.__salt__,
{"boto_iam.get_account_id": MagicMock(return_value="1234")},
):
result = boto_cloudtrail.__states__["boto_cloudtrail.present"](
"trail present",
Name=global_config.trail_ret["Name"],
S3BucketName=global_config.trail_ret["S3BucketName"],
LoggingEnabled=False,
)
assert not result["result"]
assert "An error occurred" in result["comment"]
def METHOD_NAME(global_config, session_instance):
"""
Tests absent on a trail that does not exist.
"""
conn = MagicMock()
session_instance.client.return_value = conn
conn.get_trail_status.side_effect = global_config.not_found_error
result = boto_cloudtrail.__states__["boto_cloudtrail.absent"]("test", "mytrail")
assert result["result"]
assert result["changes"] == {}
def test_absent_when_trail_exists(global_config, session_instance):
conn = MagicMock()
session_instance.client.return_value = conn
conn.get_trail_status.return_value = global_config.status_ret
result = boto_cloudtrail.__states__["boto_cloudtrail.absent"](
"test", global_config.trail_ret["Name"]
)
assert result["result"]
assert result["changes"]["new"]["trail"] is None
def test_absent_with_failure(global_config, session_instance):
conn = MagicMock()
session_instance.client.return_value = conn
conn.get_trail_status.return_value = global_config.status_ret
conn.delete_trail.side_effect = botocore.exceptions.ClientError(
global_config.error_content, "delete_trail"
)
result = boto_cloudtrail.__states__["boto_cloudtrail.absent"](
"test", global_config.trail_ret["Name"]
)
assert not result["result"]
assert "An error occurred" in result["comment"] |
1,994 | encode puzzle hash | # Copyright (c) 2017 Pieter Wuille
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import annotations
from typing import Iterable, List, Optional, Tuple
from chia.types.blockchain_format.sized_bytes import bytes32
# Based on this specification from Pieter Wuille:
# https://github.com/sipa/bips/blob/bip-bech32m/bip-bech32m.mediawiki
"""Reference implementation for Bech32m and segwit addresses."""
CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
def bech32_polymod(values: List[int]) -> int:
"""Internal function that computes the Bech32 checksum."""
generator = [0x3B6A57B2, 0x26508E6D, 0x1EA119FA, 0x3D4233DD, 0x2A1462B3]
chk = 1
for value in values:
top = chk >> 25
chk = (chk & 0x1FFFFFF) << 5 ^ value
for i in range(5):
chk ^= generator[i] if ((top >> i) & 1) else 0
return chk
def bech32_hrp_expand(hrp: str) -> List[int]:
"""Expand the HRP into values for checksum computation."""
return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
M = 0x2BC830A3
def bech32_verify_checksum(hrp: str, data: List[int]) -> bool:
return bech32_polymod(bech32_hrp_expand(hrp) + data) == M
def bech32_create_checksum(hrp: str, data: List[int]) -> List[int]:
values = bech32_hrp_expand(hrp) + data
polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ M
return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
def bech32_encode(hrp: str, data: List[int]) -> str:
"""Compute a Bech32 string given HRP and data values."""
combined = data + bech32_create_checksum(hrp, data)
return hrp + "1" + "".join([CHARSET[d] for d in combined])
def bech32_decode(bech: str, max_length: int = 90) -> Tuple[Optional[str], Optional[List[int]]]:
"""Validate a Bech32 string, and determine HRP and data."""
bech = bech.strip()
if (any(ord(x) < 33 or ord(x) > 126 for x in bech)) or (bech.lower() != bech and bech.upper() != bech):
return (None, None)
bech = bech.lower()
pos = bech.rfind("1")
if pos < 1 or pos + 7 > len(bech) or len(bech) > max_length:
return (None, None)
if not all(x in CHARSET for x in bech[pos + 1 :]):
return (None, None)
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos + 1 :]]
if not bech32_verify_checksum(hrp, data):
return (None, None)
return hrp, data[:-6]
def convertbits(data: Iterable[int], frombits: int, tobits: int, pad: bool = True) -> List[int]:
"""General power-of-2 base conversion."""
acc = 0
bits = 0
ret = []
maxv = (1 << tobits) - 1
max_acc = (1 << (frombits + tobits - 1)) - 1
for value in data:
if value < 0 or (value >> frombits):
raise ValueError("Invalid Value")
acc = ((acc << frombits) | value) & max_acc
bits += frombits
while bits >= tobits:
bits -= tobits
ret.append((acc >> bits) & maxv)
if pad:
if bits:
ret.append((acc << (tobits - bits)) & maxv)
elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
raise ValueError("Invalid bits")
return ret
def METHOD_NAME(puzzle_hash: bytes32, prefix: str) -> str:
encoded = bech32_encode(prefix, convertbits(puzzle_hash, 8, 5))
return encoded
def decode_puzzle_hash(address: str) -> bytes32:
hrpgot, data = bech32_decode(address)
if data is None:
raise ValueError("Invalid Address")
decoded = convertbits(data, 5, 8, False)
decoded_bytes = bytes32(decoded)
return decoded_bytes |
1,995 | opencomplete | #!/usr/bin/python3 -OO
# Copyright 2007-2023 The SABnzbd-Team (sabnzbd.org)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.sabtraylinux - System tray icon for Linux, inspired from the Windows one
"""
import gi
from gi.repository import Gtk, GLib
import logging
try:
gi.require_version("XApp", "1.0")
from gi.repository import XApp
if not hasattr(XApp, "StatusIcon"):
raise ImportError
HAVE_XAPP = True
logging.debug("XApp found: %s" % XApp)
except Exception:
HAVE_XAPP = False
logging.debug("XApp not available, falling back to Gtk.StatusIcon")
from time import sleep
import subprocess
from threading import Thread
from os.path import abspath
import sabnzbd
from sabnzbd.panic import launch_a_browser
import sabnzbd.api as api
import sabnzbd.cfg as cfg
from sabnzbd.misc import to_units
class StatusIcon(Thread):
sabicons = {
"default": abspath("icons/logo-arrow.svg"),
"green": abspath("icons/logo-arrow_green.svg"),
"pause": abspath("icons/logo-arrow_gray.svg"),
}
updatefreq = 1000 # ms
def __init__(self):
self.mythread = Thread(target=self.dowork)
self.mythread.start()
def dowork(self):
# Wait for translated texts to be loaded
while not sabnzbd.WEBUI_READY:
sleep(0.2)
logging.debug("language file not loaded, waiting")
self.sabpaused = False
if HAVE_XAPP:
self.statusicon = XApp.StatusIcon()
else:
self.statusicon = Gtk.StatusIcon()
self.statusicon.set_name("SABnzbd")
self.statusicon.set_visible(True)
self.icon = self.sabicons["default"]
self.refresh_icon()
self.tooltip = "SABnzbd %s" % sabnzbd.__version__
self.refresh_tooltip()
if HAVE_XAPP:
self.statusicon.connect("activate", self.right_click_event)
else:
self.statusicon.connect("popup-menu", self.right_click_event)
GLib.timeout_add(self.updatefreq, self.run)
Gtk.main()
def refresh_icon(self):
if HAVE_XAPP:
# icon path must be absolute in XApp
self.statusicon.set_icon_name(self.icon)
else:
self.statusicon.set_from_file(self.icon)
def refresh_tooltip(self):
self.statusicon.set_tooltip_text(self.tooltip)
# run this every updatefreq ms
def run(self):
self.sabpaused, bytes_left, bpsnow, time_left = api.fast_queue()
mb_left = to_units(bytes_left)
speed = to_units(bpsnow)
if self.sabpaused:
self.tooltip = T("Paused")
self.icon = self.sabicons["pause"]
elif bytes_left > 0:
self.tooltip = "%sB/s %s: %sB (%s)" % (speed, T("Remaining"), mb_left, time_left)
self.icon = self.sabicons["green"]
else:
self.tooltip = T("Idle")
self.icon = self.sabicons["default"]
self.refresh_icon()
self.tooltip = "SABnzbd %s\n%s" % (sabnzbd.__version__, self.tooltip)
self.refresh_tooltip()
return 1
def right_click_event(self, icon, button, time):
"""menu"""
menu = Gtk.Menu()
maddnzb = Gtk.MenuItem(label=T("Add NZB"))
mshowinterface = Gtk.MenuItem(label=T("Show interface"))
mopencomplete = Gtk.MenuItem(label=T("Open complete folder"))
mrss = Gtk.MenuItem(label=T("Read all RSS feeds"))
if self.sabpaused:
mpauseresume = Gtk.MenuItem(label=T("Resume"))
else:
mpauseresume = Gtk.MenuItem(label=T("Pause"))
mrestart = Gtk.MenuItem(label=T("Restart"))
mshutdown = Gtk.MenuItem(label=T("Shutdown"))
maddnzb.connect("activate", self.addnzb)
mshowinterface.connect("activate", self.browse)
mopencomplete.connect("activate", self.METHOD_NAME)
mrss.connect("activate", self.rss)
mpauseresume.connect("activate", self.pauseresume)
mrestart.connect("activate", self.restart)
mshutdown.connect("activate", self.shutdown)
menu.append(maddnzb)
menu.append(mshowinterface)
menu.append(mopencomplete)
menu.append(mrss)
menu.append(mpauseresume)
menu.append(mrestart)
menu.append(mshutdown)
menu.show_all()
menu.popup(None, None, None, self.statusicon, button, time)
def addnzb(self, icon):
"""menu handlers"""
dialog = Gtk.FileChooserDialog(title="SABnzbd - " + T("Add NZB"), action=Gtk.FileChooserAction.OPEN)
dialog.add_buttons(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN, Gtk.ResponseType.OK)
dialog.set_select_multiple(True)
filter = Gtk.FileFilter()
filter.set_name("*.nzb,*.gz,*.bz2,*.zip,*.rar,*.7z")
filter.add_pattern("*.nzb")
filter.add_pattern("*.gz")
filter.add_pattern("*.bz2")
filter.add_pattern("*.zip")
filter.add_pattern("*.rar")
filter.add_pattern("*.7z")
dialog.add_filter(filter)
response = dialog.run()
if response == Gtk.ResponseType.OK:
for filename in dialog.get_filenames():
sabnzbd.nzbparser.add_nzbfile(filename)
dialog.destroy()
def METHOD_NAME(self, icon):
subprocess.Popen(["xdg-open", cfg.complete_dir.get_path()])
def browse(self, icon):
launch_a_browser(sabnzbd.BROWSER_URL, True)
def pauseresume(self, icon):
if self.sabpaused:
self.resume()
else:
self.pause()
def restart(self, icon):
self.hover_text = T("Restart")
sabnzbd.trigger_restart()
def shutdown(self, icon):
self.hover_text = T("Shutdown")
sabnzbd.shutdown_program()
def pause(self):
sabnzbd.Scheduler.plan_resume(0)
sabnzbd.Downloader.pause()
def resume(self):
sabnzbd.Scheduler.plan_resume(0)
sabnzbd.downloader.unpause_all()
def rss(self, icon):
sabnzbd.Scheduler.force_rss() |
1,996 | test add | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Tests for :class:`aiida.orm.nodes.data.base.BaseType` classes."""
import operator
import pytest
from aiida.orm import Bool, Float, Int, NumericType, Str, load_node
@pytest.mark.parametrize(
'node_type, default, value', [
(Bool, False, True),
(Int, 0, 5),
(Float, 0.0, 5.5),
(Str, '', 'a'),
]
)
def test_create(node_type, default, value):
"""Test the creation of the ``BaseType`` nodes."""
node = node_type()
assert node.value == default
node = node_type(value)
assert node.value == value
@pytest.mark.parametrize('node_type', [Bool, Float, Int, Str])
def test_store_load(node_type):
"""Test ``BaseType`` node storing and loading."""
node = node_type()
node.store()
loaded = load_node(node.pk)
assert node.value == loaded.value
def test_modulo():
"""Test ``Int`` modulus operation."""
term_a = Int(12)
term_b = Int(10)
assert term_a % term_b == 2
assert isinstance(term_a % term_b, NumericType)
assert term_a % 10 == 2
assert isinstance(term_a % 10, NumericType)
assert 12 % term_b == 2
assert isinstance(12 % term_b, NumericType)
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
def METHOD_NAME(node_type, a, b):
"""Test addition for ``Int`` and ``Float`` nodes."""
node_a = node_type(a)
node_b = node_type(b)
result = node_a + node_b
assert isinstance(result, node_type)
assert result.value == a + b
# Node and native (both ways)
result = node_a + b
assert isinstance(result, node_type)
assert result.value == a + b
result = a + node_b
assert isinstance(result, node_type)
assert result.value == a + b
# Inplace
result = node_type(a)
result += node_b
assert isinstance(result, node_type)
assert result.value == a + b
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
def test_multiplication(node_type, a, b):
"""Test floats multiplication."""
node_a = node_type(a)
node_b = node_type(b)
# Check multiplication
result = node_a * node_b
assert isinstance(result, node_type)
assert result.value == a * b
# Check multiplication Node and native (both ways)
result = node_a * b
assert isinstance(result, node_type)
assert result.value == a * b
result = a * node_b
assert isinstance(result, node_type)
assert result.value == a * b
# Inplace
result = node_type(a)
result *= node_b
assert isinstance(result, node_type)
assert result.value == a * b
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
def test_division(node_type, a, b):
"""Test the ``BaseType`` normal division operator."""
node_a = node_type(a)
node_b = node_type(b)
result = node_a / node_b
assert result == a / b
assert isinstance(result, Float) # Should be a `Float` for both node types
@pytest.mark.parametrize('node_type, a, b', [
(Int, 3, 5),
(Float, 1.2, 5.5),
])
def test_division_integer(node_type, a, b):
"""Test the ``Int`` integer division operator."""
node_a = node_type(a)
node_b = node_type(b)
result = node_a // node_b
assert result == a // b
assert isinstance(result, node_type)
@pytest.mark.parametrize('node_type, base, power', [
(Int, 5, 2),
(Float, 3.5, 3),
])
def test_power(node_type, base, power):
"""Test power operator."""
node_base = node_type(base)
node_power = node_type(power)
result = node_base**node_power
assert result == base**power
assert isinstance(result, node_type)
@pytest.mark.parametrize('node_type, a, b', [
(Int, 5, 2),
(Float, 3.5, 3),
])
def test_modulus(node_type, a, b):
"""Test modulus operator."""
node_a = node_type(a)
node_b = node_type(b)
assert node_a % node_b == a % b
assert isinstance(node_a % node_b, node_type)
assert node_a % b == a % b
assert isinstance(node_a % b, node_type)
assert a % node_b == a % b
assert isinstance(a % node_b, node_type)
@pytest.mark.parametrize(
'opera', [
operator.add, operator.mul, operator.pow, operator.lt, operator.le, operator.gt, operator.ge, operator.iadd,
operator.imul
]
)
def test_operator(opera):
"""Test operations between Int and Float objects."""
node_a = Float(2.2)
node_b = Int(3)
for node_x, node_y in [(node_a, node_b), (node_b, node_a)]:
res = opera(node_x, node_y)
c_val = opera(node_x.value, node_y.value)
assert res._type == type(c_val) # pylint: disable=protected-access
assert res == opera(node_x.value, node_y.value)
@pytest.mark.parametrize('node_type, a, b', [
(Bool, False, True),
(Int, 2, 5),
(Float, 2.5, 5.5),
(Str, 'a', 'b'),
])
def test_equality(node_type, a, b):
"""Test equality comparison for the base types."""
node_a = node_type(a)
node_a_clone = node_type(a)
node_b = node_type(b)
# Test equality comparison with Python base types
assert node_a == a
assert node_a != b
# Test equality comparison with other `BaseType` nodes
assert node_a == node_a_clone
assert node_a != node_b
@pytest.mark.parametrize('numeric_type', (Float, Int))
def test_unary_pos(numeric_type):
"""Test the ``__pos__`` unary operator for all ``NumericType`` subclasses."""
node_positive = numeric_type(1)
node_negative = numeric_type(-1)
assert +node_positive == node_positive
assert +node_negative == node_negative
@pytest.mark.parametrize('numeric_type', (Float, Int))
def test_unary_neg(numeric_type):
"""Test the ``__neg__`` unary operator for all ``NumericType`` subclasses."""
node_positive = numeric_type(1)
node_negative = numeric_type(-1)
assert -node_positive != node_positive
assert -node_negative != node_negative
assert -node_positive == node_negative
assert -node_negative == node_positive
assert -node_negative == node_positive
@pytest.mark.parametrize('numeric_type', (Float, Int))
def test_unary_abs(numeric_type):
"""Test the ``__abs__`` unary operator for all ``NumericType`` subclasses"""
node_positive = numeric_type(1)
node_negative = numeric_type(-1)
# Test positive number
abs_positive = abs(node_positive)
assert abs_positive == node_positive
# Test negative number
abs_negative = abs(node_negative)
assert abs_negative != node_negative
assert abs_positive == abs_negative |
1,997 | worker init fn | import itertools
import random
from functools import partial
from typing import Any, Sequence, Union
import numpy as np
from torch.utils.data import DataLoader
from typeguard import check_argument_types
from espnet2.iterators.abs_iter_factory import AbsIterFactory
from espnet2.samplers.abs_sampler import AbsSampler
def METHOD_NAME(worker_id, base_seed=0):
"""Set random seed for each worker in DataLoader."""
seed = base_seed + worker_id
random.seed(seed)
np.random.seed(seed)
class RawSampler(AbsSampler):
def __init__(self, batches):
self.batches = batches
def __len__(self):
return len(self.batches)
def __iter__(self):
return iter(self.batches)
def generate(self, seed):
return list(self.batches)
class SequenceIterFactory(AbsIterFactory):
"""Build iterator for each epoch.
This class simply creates pytorch DataLoader except for the following points:
- The random seed is decided according to the number of epochs. This feature
guarantees reproducibility when resuming from middle of training process.
- Enable to restrict the number of samples for one epoch. This features
controls the interval number between training and evaluation.
"""
def __init__(
self,
dataset,
batches: Union[AbsSampler, Sequence[Sequence[Any]]],
num_iters_per_epoch: int = None,
seed: int = 0,
shuffle: bool = False,
shuffle_within_batch: bool = False,
num_workers: int = 0,
collate_fn=None,
pin_memory: bool = False,
):
assert check_argument_types()
if not isinstance(batches, AbsSampler):
self.sampler = RawSampler(batches)
else:
self.sampler = batches
self.dataset = dataset
self.num_iters_per_epoch = num_iters_per_epoch
self.shuffle = shuffle
self.shuffle_within_batch = shuffle_within_batch
self.seed = seed
self.num_workers = num_workers
self.collate_fn = collate_fn
# https://discuss.pytorch.org/t/what-is-the-disadvantage-of-using-pin-memory/1702
self.pin_memory = pin_memory
def build_iter(self, epoch: int, shuffle: bool = None) -> DataLoader:
if shuffle is None:
shuffle = self.shuffle
if self.num_iters_per_epoch is not None:
N = len(self.sampler)
# If corpus size is larger than the num_per_epoch
if self.num_iters_per_epoch < N:
N = len(self.sampler)
real_epoch, offset = divmod(self.num_iters_per_epoch * epoch, N)
if offset >= self.num_iters_per_epoch:
current_batches = self.sampler.generate(real_epoch + self.seed)
if shuffle:
np.random.RandomState(real_epoch + self.seed).shuffle(
current_batches
)
batches = current_batches[
offset - self.num_iters_per_epoch : offset
]
else:
prev_batches = self.sampler.generate(real_epoch - 1 + self.seed)
current_batches = self.sampler.generate(real_epoch + self.seed)
if shuffle:
np.random.RandomState(real_epoch - 1 + self.seed).shuffle(
prev_batches
)
np.random.RandomState(real_epoch + self.seed).shuffle(
current_batches
)
batches = (
prev_batches[offset - self.num_iters_per_epoch :]
+ current_batches[:offset]
)
# If corpus size is less than the num_per_epoch
else:
_epoch, _cursor = divmod(self.num_iters_per_epoch * (epoch - 1), N)
_remain = self.num_iters_per_epoch
batches = []
current_batches = self.sampler.generate(_epoch + self.seed)
if shuffle:
np.random.RandomState(_epoch + self.seed).shuffle(current_batches)
while _remain > 0:
_batches = current_batches[_cursor : _cursor + _remain]
batches += _batches
if _cursor + _remain >= N:
_epoch += 1
_cursor = 0
current_batches = self.sampler.generate(_epoch + self.seed)
if shuffle:
np.random.RandomState(_epoch + self.seed).shuffle(
current_batches
)
else:
_cursor = _cursor + _remain
_remain -= len(_batches)
assert len(batches) == self.num_iters_per_epoch
else:
batches = self.sampler.generate(epoch + self.seed)
if shuffle:
np.random.RandomState(epoch + self.seed).shuffle(batches)
# For backward compatibility for pytorch DataLoader
if self.collate_fn is not None:
kwargs = dict(collate_fn=self.collate_fn)
else:
kwargs = {}
# reshuffle whole 'batches' so that elements within a batch can move
# between different batches
if self.shuffle_within_batch:
_bs = len(batches[0])
batches = list(itertools.chain(*batches))
np.random.RandomState(epoch + self.seed).shuffle(batches)
_batches = []
for ii in range(0, len(batches), _bs):
_batches.append(batches[ii : ii + _bs])
batches = _batches
del _batches
return DataLoader(
dataset=self.dataset,
batch_sampler=batches,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
METHOD_NAME=partial(METHOD_NAME, base_seed=epoch + self.seed),
**kwargs,
) |
1,998 | message | # (C) Copyright 2004-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
""" Displays a message to the user as a modal window.
"""
from traits.api import HasPrivateTraits, Str, Float
from .view import View
from .group import HGroup
from .item import Item, spring
from pyface.timer.api import do_after
# -------------------------------------------------------------------------
# 'Message' class:
# -------------------------------------------------------------------------
class Message(HasPrivateTraits):
# -------------------------------------------------------------------------
# Trait definitions:
# -------------------------------------------------------------------------
#: The message to be displayed
METHOD_NAME = Str()
# -------------------------------------------------------------------------
# Displays a user specified message:
# -------------------------------------------------------------------------
def METHOD_NAME(METHOD_NAME="", title="Message", buttons=["OK"], parent=None):
"""Displays a message to the user as a model window with the specified
title and buttons.
If *buttons* is not specified, a single **OK** button is used, which is
appropriate for notifications, where no further action or decision on the
user's part is required.
"""
msg = Message(METHOD_NAME=METHOD_NAME)
ui = msg.edit_traits(
parent=parent,
view=View(
["message~", "|<>"], title=title, buttons=buttons, kind="modal"
),
)
return ui.result
def error(METHOD_NAME="", title="Message", buttons=["OK", "Cancel"], parent=None):
"""Displays a message to the user as a modal window with the specified
title and buttons.
If *buttons* is not specified, **OK** and **Cancel** buttons are used,
which is appropriate for confirmations, where the user must decide whether
to proceed. Be sure to word the message so that it is clear that clicking
**OK** continues the operation.
"""
msg = Message(METHOD_NAME=METHOD_NAME)
ui = msg.edit_traits(
parent=parent,
view=View(
["message~", "|<>"], title=title, buttons=buttons, kind="modal"
),
)
return ui.result
# -------------------------------------------------------------------------
# 'AutoCloseMessage' class:
# -------------------------------------------------------------------------
class AutoCloseMessage(HasPrivateTraits):
#: The message to be shown:
METHOD_NAME = Str("Please wait")
#: The time (in seconds) to show the message:
time = Float(2.0)
def show(self, parent=None, title=""):
"""Display the wait message for a limited duration."""
view = View(
HGroup(
spring,
Item("message", show_label=False, style="readonly"),
spring,
),
title=title,
)
do_after(
int(1000.0 * self.time),
self.edit_traits(parent=parent, view=view).dispose,
)
# -------------------------------------------------------------------------
# Displays a user specified message that closes automatically after a
# specified time interval:
# -------------------------------------------------------------------------
def auto_close_message(
METHOD_NAME="Please wait", time=2.0, title="Please wait", parent=None
):
"""Displays a message to the user as a modal window with no buttons. The
window closes automatically after a specified time interval (specified
in seconds).
"""
msg = AutoCloseMessage(METHOD_NAME=METHOD_NAME, time=time)
msg.show(parent=parent, title=title) |
1,999 | keys | # -*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.py3compat import *
from libopensesame.exceptions import FStringError, FStringSyntaxError
from libopensesame.item_stack import item_stack_singleton
import warnings
class BasePythonWorkspace:
"""Provides a basic Python workspace for use in the GUI. This avoids
unnecessarily importing the entire runtime API.
Parameters
----------
experiment : Experiment
"""
def __init__(self, experiment):
self.experiment = experiment
@property
def _globals(self):
return self.experiment.var.__vars__
def check_syntax(self, script):
"""Checks whether a Python script is syntactically correct.
Parameters
----------
script : unicode
A Python script.
Returns
-------
int
0 if script is correct, 1 if there is a syntax warning, and 2 if
there is a syntax error.
"""
with warnings.catch_warnings(record=True) as warning_list:
try:
self._compile(safe_decode(script))
except Exception as e:
return 2
if warning_list:
return 1
return 0
def run_file(self, path):
"""Reads and executes a files.
Parameters
----------
path : str
The full path to a Python file.
"""
with safe_open(path) as fd:
script = fd.read()
bytecode = self._compile(script)
self._exec(bytecode)
def _compile(self, script):
"""Compiles a script into bytecode.
Parameters
----------
script : unicode
A Python script.
Returns
-------
code
The compiled script.
"""
return compile(script, '<string>', 'exec') # __ignore_traceback__
def _exec(self, bytecode):
"""Executes bytecode.
Parameters
----------
bytecode : code
A chunk of bytecode.
"""
exec(bytecode, self._globals) # __ignore_traceback__
def _eval(self, bytecode):
"""Evaluates bytecode.
Parameters
----------
bytecode : code
A chunk of bytecode.
Returns
-------
The evaluated value of the bytecode
"""
return eval(bytecode, self._globals) # __ignore_traceback__
def eval_fstring(self, fs, include_local=False):
"""Evaluates an f-string.
Parameters
----------
fs : str
An f-string
include_local : bool, optional
If True, the variable store of the current item is merged into the
Python workspace. This allows items to evaluate f-strings that
take into account the item's local variables.
Returns
-------
A string corresponding to the evaluated f-string.
"""
if include_local:
item_name, phase = item_stack_singleton[-1]
_globals = self._globals.copy()
_globals.update(self.experiment.items[item_name].var.__vars__)
else:
_globals = self._globals
fs_escaped = fs.replace(r"'''", r"\'\'\'")
try:
return eval(f"f'''{fs_escaped}'''", _globals) # __ignore_traceback__
except SyntaxError:
raise FStringSyntaxError(
f'The following text contains invalid f-string expression:\n\n~~~ .text\n{fs}\n~~~\n\n')
except Exception:
raise FStringError(
f'Failed to evaluate f-string expression in the following text:\n\n~~~ .text\n{fs}\n~~~\n\n')
# The properties below emulate a dict interface.
@property
def __setitem__(self):
return self._globals.__setitem__
@property
def __delitem__(self):
return self._globals.__delitem__
@property
def __getitem__(self):
return self._globals.__getitem__
@property
def __len__(self):
return self._globals.__len__
@property
def __iter__(self):
return self._globals.__iter__
@property
def items(self):
return self._globals.items
@property
def METHOD_NAME(self):
return self._globals.METHOD_NAME
@property
def values(self):
return self._globals.values
@property
def copy(self):
return self._globals.copy
# Alias for backwards compatibility
base_python_workspace = BasePythonWorkspace |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.