id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
298,900 | test sample categorical copy | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import sys
import numpy
import pytest
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def elementwise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 257, 1470))
B = T.match_buffer(b, (128, 257, 1470))
for i, j, k in T.grid(128, 257, 1470):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def tiled_conv2d_with_padding(
inputs: T.Buffer((1, 224, 224, 3), "float32"),
weight: T.Buffer((7, 7, 3, 64), "float32"),
conv2d_nhwc: T.Buffer((1, 112, 112, 64), "float32"),
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
3 <= i1_1 and i1_1 < 227 and 3 <= i2_1 and i2_1 < 227,
inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1],
T.float32(0),
dtype="float32",
)
for (
i0_0,
i1_0,
i2_0,
i3_0,
i0_1_1,
i1_1_1,
i2_1_1,
i3_1_1,
i4_0,
i5_0,
i6_0,
i0_2,
i1_2,
i2_2,
i3_2,
i4_1,
i5_1,
i6_1,
i0_3,
i1_3,
i2_3,
i3_3,
) in T.grid(1, 1, 4, 1, 1, 2, 4, 1, 7, 7, 1, 1, 1, 1, 1, 1, 1, 3, 1, 56, 7, 64):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, 0)
h = T.axis.spatial(112, i1_1_1 * 56 + i1_3)
w = T.axis.spatial(112, i2_0 * 28 + i2_1_1 * 7 + i2_3)
co, rh, rw, rc = T.axis.remap("SRRR", [i3_3, i4_0, i5_0, i6_1])
T.reads(
conv2d_nhwc[n, h, w, co],
PadInput[n, h * 2 + rh, w * 2 + rw, co // 64 * 3 + rc],
weight[rh, rw, rc, co],
)
T.writes(conv2d_nhwc[n, h, w, co])
with T.init():
conv2d_nhwc[n, h, w, co] = T.float32(0)
conv2d_nhwc[n, h, w, co] = (
conv2d_nhwc[n, h, w, co]
+ PadInput[n, h * 2 + rh, w * 2 + rw, co // 64 * 3 + rc] * weight[rh, rw, rc, co]
)
# pylint: enable=no-member,invalid-name,unused-variable
def test_sample_categorical():
"""Test sample categorical sampling function"""
n = 1000
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
counter = defaultdict(int)
candidates = [5, 2, 7, 1]
probs = [0.15, 0.55, 0.05, 0.25]
for _ in range(n):
v = sch.get(sch.sample_categorical(candidates, probs))
counter[v] += 1
for i, prob in enumerate(probs):
assert (prob - 0.07) * n <= counter[candidates[i]] <= (prob + 0.07) * n
verify_trace_roundtrip(sch, mod=elementwise)
def METHOD_NAME():
"""Check the random variable sampling results after schedule copy"""
n = 100
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
candidates = [1, 2, 3, 4]
probs = [0.1, 0.2, 0.3, 0.4]
rv_decisions = []
for _ in range(n):
rv = sch.sample_categorical(candidates, probs) # pylint: disable=invalid-name
rv_decisions.append((rv, sch.get(rv)))
sch_copy = sch.copy()
for rv, decision in rv_decisions: # pylint: disable=invalid-name
decision_copy = sch_copy.get(rv)
assert int(decision) == int(decision_copy)
def test_sample_categorical_serialize():
"""Check the random variable sampling results after schedule serialization"""
n = 100
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
candidates = [5, 6, 7, 8]
probs = [0.23, 0.19, 0.37, 0.21]
decisions = []
for _ in range(n):
rv = sch.get(sch.sample_categorical(candidates, probs)) # pylint: disable=invalid-name
decisions.append(rv)
new_sch = verify_trace_roundtrip(sch, mod=elementwise)
for i, new_inst in enumerate(new_sch.trace.insts):
assert decisions[i] == candidates[new_sch.trace.decisions[new_inst].value]
def test_sample_perfect_tile_power_of_two():
sch = tir.Schedule(elementwise, debug_mask="all")
i, _, _ = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 128
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_perfect_tile_prime():
sch = tir.Schedule(elementwise, debug_mask="all")
_, i, _ = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 257
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_perfect_tile_composite():
sch = tir.Schedule(elementwise, debug_mask="all")
_, _, i = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 1470
verify_trace_roundtrip(sch, mod=elementwise)
use_sugared_block = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_sample_compute_location(use_sugared_block):
n = 100
sch = tir.Schedule(tiled_conv2d_with_padding, seed=42, debug_mask="all")
if use_sugared_block:
pad_input = "PadInput"
else:
pad_input = sch.get_block("PadInput")
decision_dict = dict()
for _ in range(n):
_ = sch.sample_compute_location(pad_input) # pylint: disable=invalid-name
decision = sch.trace.decisions[sch.trace.insts[-1]]
decision_dict[decision] = decision_dict[decision] + 1 if decision in decision_dict else 1
n_candidates = 8
expected_rate = 1.0 / n_candidates
for _, cnt in decision_dict.items():
numpy.testing.assert_allclose(expected_rate, cnt / n, atol=0.04)
def test_sample_perfect_tile_after_copy():
sch = tir.Schedule(elementwise, debug_mask="all")
sch_copy = sch.copy()
_, _, i = sch.get_loops(sch.get_block("B"))
sch.sample_perfect_tile(i, n=4)
_, _, i = sch_copy.get_loops(sch_copy.get_block("B"))
# Hangs if ForkSeed is not invoked when copying a schedule
sch_copy.sample_perfect_tile(i, n=4)
if __name__ == "__main__":
tvm.testing.main() |
298,901 | test multiline center | # -:- encoding: utf8 -:-
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from mapproxy.compat.image import Image, ImageDraw, ImageColor, ImageFont
from mapproxy.cache.tile import Tile
from mapproxy.image import ImageSource
from mapproxy.image.message import TextDraw, message_image
from mapproxy.image.opts import ImageOptions
from mapproxy.tilefilter import watermark_filter
PNG_FORMAT = ImageOptions(format="image/png")
class TestTextDraw(object):
def test_ul(self):
font = ImageFont.load_default()
td = TextDraw("Hello", font)
img = Image.new("RGB", (100, 100))
draw = ImageDraw.Draw(img)
total_box, boxes = td.text_boxes(draw, (100, 100))
assert total_box == boxes[0]
assert len(boxes) == 1
def test_multiline_ul(self):
font = ImageFont.load_default()
td = TextDraw("Hello\nWorld", font)
img = Image.new("RGB", (100, 100))
draw = ImageDraw.Draw(img)
total_box, boxes = td.text_boxes(draw, (100, 100))
assert total_box == (5, 5, 35, 30)
assert boxes == [(5, 5, 35, 16), (5, 19, 35, 30)]
def test_multiline_lr(self):
font = ImageFont.load_default()
td = TextDraw("Hello\nWorld", font, placement="lr")
img = Image.new("RGB", (100, 100))
draw = ImageDraw.Draw(img)
total_box, boxes = td.text_boxes(draw, (100, 100))
assert total_box == (65, 70, 95, 95)
assert boxes == [(65, 70, 95, 81), (65, 84, 95, 95)]
def METHOD_NAME(self):
font = ImageFont.load_default()
td = TextDraw("Hello\nWorld", font, placement="cc")
img = Image.new("RGB", (100, 100))
draw = ImageDraw.Draw(img)
total_box, boxes = td.text_boxes(draw, (100, 100))
assert total_box == (35, 38, 65, 63)
assert boxes == [(35, 38, 65, 49), (35, 52, 65, 63)]
def test_unicode(self):
font = ImageFont.load_default()
td = TextDraw(u"Héllö\nWørld", font, placement="cc")
img = Image.new("RGB", (100, 100))
draw = ImageDraw.Draw(img)
total_box, boxes = td.text_boxes(draw, (100, 100))
assert total_box == (35, 38, 65, 63)
assert boxes == [(35, 38, 65, 49), (35, 52, 65, 63)]
def _test_all(self):
for x in "c":
for y in "LR":
yield self.check_placement, x, y
def check_placement(self, x, y):
font = ImageFont.load_default()
td = TextDraw(
"Hello\nWorld\n%s %s" % (x, y),
font,
placement=x + y,
padding=5,
linespacing=2,
)
img = Image.new("RGB", (100, 100))
draw = ImageDraw.Draw(img)
td.draw(draw, img.size)
img.show()
def test_transparent(self):
font = ImageFont.load_default()
td = TextDraw("Hello\nWorld", font, placement="cc")
img = Image.new("RGBA", (100, 100), (0, 0, 0, 0))
draw = ImageDraw.Draw(img)
td.draw(draw, img.size)
assert len(img.getcolors()) == 2
# top color (bg) is transparent
assert sorted(img.getcolors())[1][1] == (0, 0, 0, 0)
class TestMessageImage(object):
def test_blank(self):
image_opts = PNG_FORMAT.copy()
image_opts.bgcolor = "#113399"
img = message_image("", size=(100, 150), image_opts=image_opts)
assert isinstance(img, ImageSource)
assert img.size == (100, 150)
pil_img = img.as_image()
assert pil_img.getpixel((0, 0)) == ImageColor.getrgb("#113399")
# 3 values in histogram (RGB)
assert [x for x in pil_img.histogram() if x > 0] == [
15000,
15000,
15000,
]
def test_message(self):
image_opts = PNG_FORMAT.copy()
image_opts.bgcolor = "#113399"
img = message_image("test", size=(100, 150), image_opts=image_opts)
assert isinstance(img, ImageSource)
assert img.size == (100, 150)
# 6 values in histogram (3xRGB for background, 3xRGB for text message)
assert [x for x in img.as_image().histogram() if x > 10] == [
14923,
77,
14923,
77,
14923,
77,
]
def test_transparent(self):
image_opts = ImageOptions(transparent=True)
print(image_opts)
img = message_image("", size=(100, 150), image_opts=image_opts)
assert isinstance(img, ImageSource)
assert img.size == (100, 150)
pil_img = img.as_image()
assert pil_img.getpixel((0, 0)) == (255, 255, 255, 0)
# 6 values in histogram (3xRGB for background, 3xRGB for text message)
assert [x for x in pil_img.histogram() if x > 0] == [
15000,
15000,
15000,
15000,
]
class TestWatermarkTileFilter(object):
def setup(self):
self.tile = Tile((0, 0, 0))
self.filter = watermark_filter("Test")
def test_filter(self):
img = Image.new("RGB", (200, 200))
orig_source = ImageSource(img)
self.tile.source = orig_source
filtered_tile = self.filter(self.tile)
assert self.tile is filtered_tile
assert orig_source != filtered_tile.source
pil_img = filtered_tile.source.as_image()
assert pil_img.getpixel((0, 0)) == (0, 0, 0)
colors = pil_img.getcolors()
colors.sort()
# most but not all parts are bg color
assert 39950 > colors[-1][0] > 39000
assert colors[-1][1] == (0, 0, 0)
def test_filter_with_alpha(self):
img = Image.new("RGBA", (200, 200), (10, 15, 20, 0))
orig_source = ImageSource(img)
self.tile.source = orig_source
filtered_tile = self.filter(self.tile)
assert self.tile is filtered_tile
assert orig_source != filtered_tile.source
pil_img = filtered_tile.source.as_image()
assert pil_img.getpixel((0, 0)) == (10, 15, 20, 0)
colors = pil_img.getcolors()
colors.sort()
# most but not all parts are bg color
assert 39950 > colors[-1][0] > 39000
assert colors[-1][1] == (10, 15, 20, 0) |
298,902 | test calling grep returns result parsed from | # -*- coding: utf-8 -*-
"""
Testing of grep command.
"""
__author__ = 'Julia Patacz'
__copyright__ = 'Copyright (C) 2018, Nokia'
_email_ = 'julia.patacz@nokia.com'
import pytest
from moler.cmd.unix.grep import Grep
def test_calling_grep_returns_result_parsed_from_command_output_with_path_and_lines_number_and_bytes(
buffer_connection,
command_output_and_expected_result_with_path_and_lines_number_and_bytes):
command_output, expected_result = command_output_and_expected_result_with_path_and_lines_number_and_bytes
buffer_connection.remote_inject_response([command_output])
grep_cmd = Grep(connection=buffer_connection.moler_connection, options='-bnH PREROUTING /etc/iptables/rules.v4')
grep_cmd._re_fail = None
result = grep_cmd()
assert expected_result == result
def test_calling_grep_returns_result_parsed_from_command_output_with_path_and_lines_number(
buffer_connection,
command_output_and_expected_result_with_path_and_lines_number):
command_output, expected_result = command_output_and_expected_result_with_path_and_lines_number
buffer_connection.remote_inject_response([command_output])
grep_cmd = Grep(connection=buffer_connection.moler_connection, options='-nH PREROUTING /etc/iptables/rules.v4')
result = grep_cmd()
assert expected_result == result
def test_calling_grep_returns_result_parsed_from_command_output_with_path(
buffer_connection,
command_output_and_expected_result_with_path):
command_output, expected_result = command_output_and_expected_result_with_path
buffer_connection.remote_inject_response([command_output])
grep_cmd = Grep(connection=buffer_connection.moler_connection, options='-H PREROUTING /etc/iptables/rules.v4')
result = grep_cmd()
assert expected_result == result
def test_calling_grep_returns_result_parsed_from_command_output_with_lines_number_and_bytes(
buffer_connection,
command_output_and_expected_result_with_lines_number_and_bytes):
command_output, expected_result = command_output_and_expected_result_with_lines_number_and_bytes
buffer_connection.remote_inject_response([command_output])
grep_cmd = Grep(connection=buffer_connection.moler_connection, options='-bn PREROUTING /etc/iptables/rules.v4')
result = grep_cmd()
assert expected_result == result
def METHOD_NAME(
buffer_connection,
command_output_and_expected_result_with_lines_number):
command_output, expected_result = command_output_and_expected_result_with_lines_number
buffer_connection.remote_inject_response([command_output])
grep_cmd = Grep(connection=buffer_connection.moler_connection, options='-n PREROUTING /etc/iptables/rules.v4')
result = grep_cmd()
assert expected_result == result
def test_calling_grep_returns_result_parsed_from_command_output(buffer_connection, command_output_and_expected_result):
command_output, expected_result = command_output_and_expected_result
buffer_connection.remote_inject_response([command_output])
grep_cmd = Grep(connection=buffer_connection.moler_connection, options='Mode debconf.conf')
result = grep_cmd()
assert expected_result == result
def test_grep_returns_proper_command_string(buffer_connection):
grep_cmd = Grep(buffer_connection, options="Mode debconf.conf")
assert "grep Mode debconf.conf" == grep_cmd.command_string
@pytest.fixture
def command_output_and_expected_result_with_path_and_lines_number_and_bytes():
from moler.cmd.unix.grep import COMMAND_OUTPUT_with_file_path_and_lines_number_and_bytes, \
COMMAND_RESULT_with_file_path_and_lines_number_and_bytes
data = COMMAND_OUTPUT_with_file_path_and_lines_number_and_bytes
result = COMMAND_RESULT_with_file_path_and_lines_number_and_bytes
return data, result
@pytest.fixture
def command_output_and_expected_result_with_path_and_lines_number():
from moler.cmd.unix.grep import COMMAND_OUTPUT_with_file_path_and_lines_number_or_bytes, \
COMMAND_RESULT_with_file_path_and_lines_number_or_bytes
data = COMMAND_OUTPUT_with_file_path_and_lines_number_or_bytes
result = COMMAND_RESULT_with_file_path_and_lines_number_or_bytes
return data, result
@pytest.fixture
def command_output_and_expected_result_with_path():
from moler.cmd.unix.grep import COMMAND_OUTPUT_with_file_path, COMMAND_RESULT_with_file_path
data = COMMAND_OUTPUT_with_file_path
result = COMMAND_RESULT_with_file_path
return data, result
@pytest.fixture
def command_output_and_expected_result_with_lines_number_and_bytes():
from moler.cmd.unix.grep import COMMAND_OUTPUT_with_lines_number_and_bytes, \
COMMAND_RESULT_with_lines_number_and_bytes
data = COMMAND_OUTPUT_with_lines_number_and_bytes
result = COMMAND_RESULT_with_lines_number_and_bytes
return data, result
@pytest.fixture
def command_output_and_expected_result_with_lines_number():
from moler.cmd.unix.grep import COMMAND_OUTPUT_with_lines_number_or_bytes, COMMAND_RESULT_with_lines_number_or_bytes
data = COMMAND_OUTPUT_with_lines_number_or_bytes
result = COMMAND_RESULT_with_lines_number_or_bytes
return data, result
@pytest.fixture
def command_output_and_expected_result():
from moler.cmd.unix.grep import COMMAND_OUTPUT_ver_human, COMMAND_RESULT_ver_human
data = COMMAND_OUTPUT_ver_human
result = COMMAND_RESULT_ver_human
return data, result |
298,903 | fail | from __future__ import annotations
from typing import TYPE_CHECKING
from simpleflow.base import Submittable, SubmittableContainer
from simpleflow.signal import WaitForSignal
from simpleflow.task import CancelTimerTask, TaskFailureContext, TimerTask
from . import canvas, task
from ._decorators import deprecated
from .activity import Activity
from .utils import issubclass_
if TYPE_CHECKING:
from typing import Any
from .marker import Marker
class Workflow(Submittable):
"""
Main interface to define a workflow by submitting tasks for asynchronous
execution.
The actual behavior depends on the executor backend.
:type executor: simpleflow.executor.Executor
"""
# These are needed for workflow on SWF
name = None
version = None
task_list = None
task_priority = None
retry = 0
raises_on_failure = True
INHERIT_TAG_LIST = "INHERIT_TAG_LIST"
def __init__(self, executor):
self._executor = executor
@property
def executor(self):
return self._executor
def submit(self, submittable, *args, **kwargs):
"""
Submit a function for asynchronous execution.
:param submittable: callable registered as an task.
:type submittable: base.Submittable
:param args: arguments passed to the task.
:type args: Sequence.
:param kwargs: keyword-arguments passed to the task.
:type kwargs: Mapping (dict).
:returns:
:rtype: simpleflow.futures.Future | simpleflow.canvas.GroupFuture
"""
# If the activity is a child workflow, call directly
# the executor
if issubclass_(submittable, Workflow):
return self._executor.submit(submittable, *args, **kwargs)
elif isinstance(submittable, (Activity, Workflow)):
return self._executor.submit(submittable, *args, **kwargs)
elif isinstance(submittable, (task.Task, WaitForSignal)):
return self._executor.submit(submittable)
elif isinstance(submittable, SubmittableContainer):
return submittable.submit(self._executor)
else:
raise TypeError(f"Bad type for {submittable} activity ({type(submittable)})")
def map(self, activity, iterable):
"""
Submit an activity for asynchronous execution for each value of
*iterable*.
:param activity: activity.
:type activity: Activity
:param iterable: collections of arguments passed to the task.
:type iterable: collection.Iterable[Any]
:rtype: list[simpleflow.futures.Future]
"""
group = canvas.Group(*[task.ActivityTask(activity, i) for i in iterable])
return self.submit(group).futures
def starmap(self, activity, iterable):
"""
Submit an activity for asynchronous execution for each value of
*iterable*.
:param activity: activity.
:type activity: Activity
:param iterable: collections of multiple-arguments passed to the task
as positional arguments. They are destructured using
the ``*`` operator.
:type iterable: collection.Iterable[Any]
:rtype: list[simpleflow.futures.Future]
"""
group = canvas.Group(*[task.ActivityTask(activity, *i) for i in iterable])
return self.submit(group).futures
def METHOD_NAME(self, reason, details=None):
"""
Fail the workflow. User-called.
:param reason:
:param details:
:return:
"""
self._executor.METHOD_NAME(reason, details)
def before_replay(self, history):
"""
Method called before playing the execution.
:param history:
:type history: simpleflow.history.History
"""
pass
def after_replay(self, history):
"""
Method called after playing the execution.
Either the replay is finished or the execution is blocked.
:param history:
:type history: simpleflow.history.History
"""
pass
def after_closed(self, history):
"""
Method called after closing the execution.
Either the replay is finished or it failed.
:param history:
:type history: simpleflow.history.History
"""
pass
@deprecated
def before_run(self, history):
return self.before_replay(history)
@deprecated
def after_run(self, history):
return self.after_closed(history)
def run(self, *args, **kwargs):
raise NotImplementedError
def on_failure(self, history, reason, details=None):
"""
Method called after the workflow failed.
:param history:
:type history: simpleflow.history.History
:param reason: failure reason
:type reason: str
:param details:
:type details: Optional[str]
"""
pass
def on_completed(self, history):
"""
Method called after successfully completing the execution.
:param history:
:type history: simpleflow.history.History
"""
pass
def on_canceled(self, history):
"""
Method called on canceling the execution.
:param history:
:type history: simpleflow.history.History
"""
pass
def get_run_context(self):
"""
Get a context from the executor.
The content is specific to each executor.
:return: context
:rtype: dict
"""
return self.executor.get_run_context()
@deprecated
def get_execution_context(self):
"""
Get a context from the executor.
The content is specific to each executor.
FIXME should be get_run_context; the execution context is something else in SWF.
:return: context
:rtype: dict
"""
return self.executor.get_run_context()
def signal(self, name, *args, workflow_id: str = None, run_id: str = None, propagate: bool = True, **kwargs):
return self.executor.signal(name, *args, workflow_id=workflow_id, run_id=run_id, propagate=propagate, **kwargs)
def wait_signal(self, name):
return self.executor.wait_signal(name)
def record_marker(self, name: str, details: Any = None) -> Submittable:
return self.executor.record_marker(name, details)
def list_markers(self, all: bool = False) -> list[Marker]:
return self.executor.list_markers(all)
def get_event_details(self, event_type: str, event_name: str) -> dict | None:
"""
Get details about an event.
Backend-dependent.
The SWF backend can handle 'marker' and 'signal' events, returning a dict
with name, input/details, event_id, ...
:param event_type:
:param event_name:
:return: backend-dependent details.
"""
return self.executor.get_event_details(event_type, event_name)
def start_timer(self, timer_id, timeout, control=None):
return TimerTask(timer_id, timeout, control)
def cancel_timer(self, timer_id):
return CancelTimerTask(timer_id)
def should_cancel(self, history):
"""
Called by the executor if cancel requested.
:param history:
:return:
"""
return True
def continue_as_new(self, *args, **kwargs):
return self.executor.continue_as_new(workflow=type(self), *args, **kwargs)
def on_task_failure(
self,
failure_context: TaskFailureContext,
):
"""
Called by the executor if a task or workflow failed.
:param failure_context:
:return:
"""
pass # no specific error handling |
298,904 | get object | import logging
from urllib.parse import urlencode
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.db import models
from django.db.models import Case, Q, Sum, Value, When
from django.shortcuts import get_object_or_404
from django.urls import reverse, reverse_lazy
from django.views.generic import CreateView, DeleteView, DetailView, ListView, UpdateView
from zentral.contrib.osquery.forms import QueryForm, QuerySearchForm
from zentral.contrib.osquery.models import PackQuery, Query
from zentral.core.stores.conf import stores
from zentral.core.stores.views import EventsView, FetchEventsView, EventsStoreRedirectView
from zentral.utils.text import encode_args
logger = logging.getLogger('zentral.contrib.osquery.views.queries')
class QueryListView(PermissionRequiredMixin, ListView):
permission_required = "osquery.view_query"
paginate_by = 50
model = Query
def get(self, request, *args, **kwargs):
self.form = QuerySearchForm(request.GET)
self.form.is_valid()
return super().get(request, *args, **kwargs)
def get_queryset(self):
return self.form.get_queryset()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["form"] = self.form
page = ctx["page_obj"]
if page.has_next():
qd = self.request.GET.copy()
qd['page'] = page.next_page_number()
ctx['next_url'] = "?{}".format(qd.urlencode())
if page.has_previous():
qd = self.request.GET.copy()
qd['page'] = page.previous_page_number()
ctx['previous_url'] = "?{}".format(qd.urlencode())
if page.number > 1:
qd = self.request.GET.copy()
qd.pop('page', None)
ctx['reset_link'] = "?{}".format(qd.urlencode())
return ctx
class CreateQueryView(PermissionRequiredMixin, CreateView):
permission_required = "osquery.add_query"
model = Query
form_class = QueryForm
class QueryView(PermissionRequiredMixin, DetailView):
permission_required = "osquery.view_query"
def get_queryset(self):
return Query.objects.select_related("compliance_check").prefetch_related("packquery__pack")
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
try:
ctx["pack_query"] = self.object.packquery
except PackQuery.DoesNotExist:
ctx["pack_query"] = None
# events
if self.request.user.has_perm(EventsMixin.permission_required):
ctx["events_url"] = reverse("osquery:query_events", args=(self.object.pk,))
store_links = []
for store in stores.iter_events_url_store_for_user("object", self.request.user):
url = "{}?{}".format(
reverse("osquery:query_events_store_redirect", args=(self.object.pk,)),
urlencode({"es": store.name,
"tr": EventsView.default_time_range})
)
store_links.append((url, store.name))
ctx["store_links"] = store_links
# distributed queries
match_value = Value(1, output_field=models.IntegerField())
miss_value = Value(0, output_field=models.IntegerField())
ctx["distributed_queries"] = (
self.object.distributedquery_set
.annotate(in_flight_count=Sum(
Case(When(Q(distributedquerymachine__serial_number__isnull=False) &
Q(distributedquerymachine__status__isnull=True),
then=match_value), default=miss_value)
))
.annotate(ok_count=Sum(
Case(When(distributedquerymachine__status=0, then=match_value), default=miss_value)
))
.annotate(error_count=Sum(
Case(When(distributedquerymachine__status__gte=1, then=match_value), default=miss_value)
))
.order_by("-pk")
)
ctx["distributed_query_count"] = ctx["distributed_queries"].count()
return ctx
class UpdateQueryView(PermissionRequiredMixin, UpdateView):
permission_required = "osquery.change_query"
model = Query
form_class = QueryForm
class DeleteQueryView(PermissionRequiredMixin, DeleteView):
permission_required = "osquery.delete_query"
model = Query
success_url = reverse_lazy("osquery:queries")
class EventsMixin:
permission_required = "osquery.view_query"
store_method_scope = "object"
def METHOD_NAME(self, **kwargs):
return get_object_or_404(Query, pk=kwargs["pk"])
def get_fetch_kwargs_extra(self):
return {"key": "osquery_query", "val": encode_args((self.object.pk,))}
def get_fetch_url(self):
return reverse("osquery:fetch_query_events", args=(self.object.pk,))
def get_redirect_url(self):
return reverse("osquery:query_events", args=(self.object.pk,))
def get_store_redirect_url(self):
return reverse("osquery:query_events_store_redirect", args=(self.object.pk,))
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["query"] = self.object
return ctx
class QueryEventsView(EventsMixin, EventsView):
template_name = "osquery/query_events.html"
class FetchQueryEventsView(EventsMixin, FetchEventsView):
pass
class QueryEventsStoreRedirectView(EventsMixin, EventsStoreRedirectView):
pass |
298,905 | parse args | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
from pprint import pprint
import paddle
from paddlenlp.transformers import BartForConditionalGeneration, BartTokenizer
from paddlenlp.utils.log import logger
def postprocess_seq(seq, bos_idx, eos_idx, output_bos=False, output_eos=False):
"""
Post-process the decoded sequence.
"""
eos_pos = len(seq) - 1
for i, idx in enumerate(seq):
if idx == eos_idx:
eos_pos = i
break
seq = [idx for idx in seq[: eos_pos + 1] if (output_bos or idx != bos_idx) and (output_eos or idx != eos_idx)]
return seq
def prepare_input(tokenizer, sentences):
tokenized = tokenizer(sentences, padding=True)
input_ids = paddle.to_tensor(tokenized["input_ids"], dtype="int64")
return input_ids
def METHOD_NAME():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name_or_path",
default="bart-base",
type=str,
help="The model name to specify the bart to use. Can be one of ['bart-base', 'bart-large',]. ",
)
parser.add_argument(
"--decoding_strategy",
default="beam_search",
type=str,
help="The decoding strategy. Can be one of [greedy_search, beam_search, sampling]",
)
parser.add_argument("--beam_size", default=5, type=int, help="The parameters for beam search. ")
parser.add_argument("--top_k", default=4, type=int, help="The number of candidate to procedure beam search. ")
parser.add_argument(
"--top_p", default=1.0, type=float, help="The probability threshold to procedure topp sampling. "
)
parser.add_argument("--max_length", default=20, type=int, help="Maximum output length. ")
parser.add_argument("--diversity_rate", default=0.0, type=float, help="The diversity of beam search. ")
parser.add_argument(
"--length_penalty", default=0.6, type=float, help="The power number in length penalty calculation"
)
parser.add_argument("--use_fp16_decoding", action="store_true", help="Whether to use fp16 decoding to predict. ")
args = parser.METHOD_NAME()
return args
def do_predict(args):
place = "gpu"
paddle.set_device(place)
tokenizer = BartTokenizer.from_pretrained(args.model_name_or_path)
logger.info("Loading the model parameters, please wait...")
model = BartForConditionalGeneration.from_pretrained(args.model_name_or_path)
# Set evaluate mode
model.eval()
sentences = [
"I love that girl, but <mask> does not <mask> me.",
"She is so <mask> that I can not help glance at <mask>.",
"Nothing's gonna <mask> my love for you.",
"Drop everything now. Meet me in the pouring <mask>. Kiss me on the sidewalk.",
]
bos_id = model.bart.config["bos_token_id"]
eos_id = model.bart.config["eos_token_id"]
input_ids = prepare_input(tokenizer, sentences)
# Define model
fast_bart = model
# Set evaluate mode
fast_bart.eval()
with paddle.no_grad():
for i in range(100):
# For warmup.
if 50 == i:
# PaddlePaddle >= 2.2
paddle.device.cuda.synchronize()
start = time.perf_counter()
finished_seq, _ = fast_bart.generate(
input_ids=input_ids,
max_length=args.max_length,
decode_strategy=args.decoding_strategy,
top_k=args.top_k,
top_p=args.top_p,
num_beams=args.beam_size,
diversity_rate=args.diversity_rate,
length_penalty=args.length_penalty,
use_fp16_decoding=args.use_fp16_decoding,
use_fast=True,
)
paddle.device.cuda.synchronize()
logger.info("Average test time for decoding is %f ms" % ((time.perf_counter() - start) / 50 * 1000))
# Output
finished_seq = finished_seq.numpy()
for ins in finished_seq:
generated_ids = postprocess_seq(ins, bos_id, eos_id)
print(tokenizer.convert_ids_to_string(generated_ids))
if __name__ == "__main__":
args = METHOD_NAME()
pprint(args)
do_predict(args) |
298,906 | ngroups | # Copyright (c) 2015-2020 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import os
import sys
import itertools
import numpy as np
from ._libtoast import Logger
from .pshmem import MPIShared, MPILock
use_mpi = None
MPI = None
if use_mpi is None:
# See if the user has explicitly disabled MPI.
if "MPI_DISABLE" in os.environ:
use_mpi = False
else:
# Special handling for running on a NERSC login node. This is for convenience.
# The same behavior could be implemented with environment variables set in a
# shell resource file.
at_nersc = False
if "NERSC_HOST" in os.environ:
at_nersc = True
in_slurm = False
if "SLURM_JOB_NAME" in os.environ:
in_slurm = True
if (not at_nersc) or in_slurm:
try:
import mpi4py.MPI as MPI
use_mpi = True
except:
# There could be many possible exceptions raised...
from ._libtoast import Logger
log = Logger.get()
log.info("mpi4py not found- using serial operations only")
use_mpi = False
def get_world():
"""Retrieve the default world communicator and its properties.
If MPI is enabled, this returns MPI.COMM_WORLD and the process rank and number of
processes. If MPI is disabled, this returns None for the communicator, zero
for the rank, and one for the number of processes.
Returns:
(tuple): The communicator, number of processes, and rank.
"""
rank = 0
procs = 1
world = None
if use_mpi:
world = MPI.COMM_WORLD
rank = world.rank
procs = world.size
return world, procs, rank
class Comm(object):
"""Class which represents a two-level hierarchy of MPI communicators.
A Comm object splits the full set of processes into groups of size
"group". If group_size does not divide evenly into the size of the given
communicator, then those processes remain idle.
A Comm object stores three MPI communicators: The "world" communicator
given here, which contains all processes to consider, a "group"
communicator (one per group), and a "rank" communicator which contains the
processes with the same group-rank across all groups.
If MPI is not enabled, then all communicators are set to None.
Args:
world (mpi4py.MPI.Comm): the MPI communicator containing all processes.
group (int): the size of each process group.
"""
def __init__(self, world=None, groupsize=0):
log = Logger.get()
if world is None:
if use_mpi:
# Default is COMM_WORLD
world = MPI.COMM_WORLD
else:
# MPI is disabled, leave world as None.
pass
else:
if use_mpi:
# We were passed a communicator to use. Check that it is
# actually a communicator, otherwise fall back to COMM_WORLD.
if not isinstance(world, MPI.Comm):
log.warning(
"Specified world communicator is not a valid "
"mpi4py.MPI.Comm object. Using COMM_WORLD."
)
world = MPI.COMM_WORLD
else:
log.warning(
"World communicator specified even though "
"MPI is disabled. Ignoring this constructor "
"argument."
)
world = None
# Special case, MPI available but the user want a serial
# data object
if world == MPI.COMM_SELF:
world = None
self._wcomm = world
self._wrank = 0
self._wsize = 1
if self._wcomm is not None:
self._wrank = self._wcomm.rank
self._wsize = self._wcomm.size
self._gsize = groupsize
if (self._gsize < 0) or (self._gsize > self._wsize):
log.warning(
"Invalid groupsize ({}). Should be between {} "
"and {}. Using single process group instead.".format(
groupsize, 0, self._wsize
)
)
self._gsize = 0
if self._gsize == 0:
self._gsize = self._wsize
self._ngroups = self._wsize // self._gsize
if self._ngroups * self._gsize != self._wsize:
msg = (
"World communicator size ({}) is not evenly divisible "
"by requested group size ({}).".format(self._wsize, self._gsize)
)
log.error(msg)
raise RuntimeError(msg)
self._group = self._wrank // self._gsize
self._grank = self._wrank % self._gsize
if self._ngroups == 1:
# We just have one group with all processes.
self._gcomm = self._wcomm
if use_mpi:
self._rcomm = MPI.COMM_SELF
else:
self._rcomm = None
else:
# We need to split the communicator. This code is never executed
# unless MPI is enabled and we have multiple groups.
self._gcomm = self._wcomm.Split(self._group, self._grank)
self._rcomm = self._wcomm.Split(self._grank, self._group)
@property
def world_size(self):
"""The size of the world communicator."""
return self._wsize
@property
def world_rank(self):
"""The rank of this process in the world communicator."""
return self._wrank
@property
def METHOD_NAME(self):
"""The number of process groups."""
return self._ngroups
@property
def group(self):
"""The group containing this process."""
return self._group
@property
def group_size(self):
"""The size of the group containing this process."""
return self._gsize
@property
def group_rank(self):
"""The rank of this process in the group communicator."""
return self._grank
@property
def comm_world(self):
"""The world communicator."""
return self._wcomm
@property
def comm_group(self):
"""The communicator shared by processes within this group."""
return self._gcomm
@property
def comm_rank(self):
"""The communicator shared by processes with the same group_rank."""
return self._rcomm
def __repr__(self):
lines = [
" World MPI communicator = {}".format(self._wcomm),
" World MPI size = {}".format(self._wsize),
" World MPI rank = {}".format(self._wrank),
" Group MPI communicator = {}".format(self._gcomm),
" Group MPI size = {}".format(self._gsize),
" Group MPI rank = {}".format(self._grank),
" Rank MPI communicator = {}".format(self._rcomm),
]
return "<toast.Comm\n{}\n>".format("\n".join(lines)) |
298,907 | change status | # Copyright 2005-2006 Sergey Fedoseev <fedoseev.sergey@gmail.com>
# Copyright 2007 Simon Morgan <zen84964@zen.co.uk>
# 2017 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import sys
if os.name == "nt" or sys.platform == "darwin":
from quodlibet.plugins import PluginNotSupportedError
raise PluginNotSupportedError
from gi.repository import GLib
from gi.repository import Gio
from gi.repository import Gtk
from quodlibet import _
from quodlibet.plugins.events import EventPlugin
from quodlibet.pattern import Pattern
from quodlibet.qltk import Frame, Icons
from quodlibet import config
# Translators: statuses relating to Instant Messenger apps
_STATUSES = {'online': _('online'),
'offline': _('offline'),
'chat': _('chat'),
'away': _('away'),
'xa': _('xa'),
'invisible': _('invisible')}
class GajimStatusMessage(EventPlugin):
PLUGIN_ID = 'Gajim status message'
PLUGIN_NAME = _('Gajim Status Message')
PLUGIN_DESC = _("Changes Gajim status message according to what "
"you are currently listening to.")
PLUGIN_ICON = Icons.FACE_SMILE
c_accounts = __name__ + '_accounts'
c_paused = __name__ + '_paused'
c_statuses = __name__ + '_statuses'
c_pattern = __name__ + '_pattern'
def __init__(self):
try:
self.accounts = config.get('plugins', self.c_accounts).split()
except:
self.accounts = []
config.set('plugins', self.c_accounts, '')
try:
self.paused = config.getboolean('plugins', self.c_paused)
except:
self.paused = True
config.set('plugins', self.c_paused, 'True')
try:
self.statuses = config.get('plugins', self.c_statuses).split()
except:
self.statuses = ['online', 'chat']
config.set('plugins', self.c_statuses, " ".join(self.statuses))
try:
self.pattern = config.get('plugins', self.c_pattern)
except:
self.pattern = '<artist> - <title>'
config.set('plugins', self.c_pattern, self.pattern)
def enabled(self):
self.interface = None
self.current = ''
def disabled(self):
if self.current != '':
self.METHOD_NAME(self.accounts, '')
def METHOD_NAME(self, enabled_accounts, status_message):
if not self.interface:
try:
self.interface = Gio.DBusProxy.new_for_bus_sync(
Gio.BusType.SESSION, Gio.DBusProxyFlags.NONE, None,
'org.gajim.dbus', '/org/gajim/dbus/RemoteObject',
'org.gajim.dbus.RemoteInterface', None)
except GLib.Error:
self.interface = None
if self.interface:
try:
for account in self.interface.list_accounts():
status = self.interface.get_status('(s)', account)
if enabled_accounts != [] and \
account not in enabled_accounts:
continue
if status in self.statuses:
self.interface.METHOD_NAME('(sss)',
status, status_message, account)
except GLib.Error:
self.interface = None
def plugin_on_song_started(self, song):
if song:
self.current = Pattern(self.pattern) % song
else:
self.current = ''
self.METHOD_NAME(self.accounts, self.current)
def plugin_on_paused(self):
if self.paused and self.current != '':
self.METHOD_NAME(self.accounts,
"%s [%s]" % (self.current, _("paused")))
def plugin_on_unpaused(self):
self.METHOD_NAME(self.accounts, self.current)
def accounts_changed(self, entry):
self.accounts = entry.get_text().split()
config.set('plugins', self.c_accounts, entry.get_text())
def pattern_changed(self, entry):
self.pattern = entry.get_text()
config.set('plugins', self.c_pattern, self.pattern)
def paused_changed(self, c):
config.set('plugins', self.c_paused, str(c.get_active()))
def statuses_changed(self, b):
if b.get_active() and b.get_name() not in self.statuses:
self.statuses.append(b.get_name())
elif b.get_active() is False and b.get_name() in self.statuses:
self.statuses.remove(b.get_name())
config.set('plugins', self.c_statuses, " ".join(self.statuses))
def PluginPreferences(self, parent):
vb = Gtk.VBox(spacing=6)
pattern_box = Gtk.HBox(spacing=6)
pattern_box.set_border_width(3)
pattern = Gtk.Entry()
pattern.set_text(self.pattern)
pattern.connect('changed', self.pattern_changed)
pattern_box.pack_start(Gtk.Label(label=_("Pattern:")), False, True, 0)
pattern_box.pack_start(pattern, True, True, 0)
accounts_box = Gtk.HBox(spacing=3)
accounts_box.set_border_width(3)
accounts = Gtk.Entry()
accounts.set_text(" ".join(self.accounts))
accounts.connect('changed', self.accounts_changed)
accounts.set_tooltip_text(
_("List accounts, separated by spaces, for "
"changing status message. If none are specified, "
"status message of all accounts will be changed."))
accounts_box.pack_start(Gtk.Label(label=_("Accounts:")),
False, True, 0)
accounts_box.pack_start(accounts, True, True, 0)
c = Gtk.CheckButton(label=_("Add '[paused]'"))
c.set_active(self.paused)
c.connect('toggled', self.paused_changed)
c.set_tooltip_text(_("If checked, '[paused]' will be added to "
"status message on pause"))
table = Gtk.Table()
self.list = []
i = 0
j = 0
for status, translated in _STATUSES.items():
button = Gtk.CheckButton(label=translated)
button.set_name(status)
if status in self.statuses:
button.set_active(True)
button.connect('toggled', self.statuses_changed)
self.list.append(button)
table.attach(button, i, i + 1, j, j + 1)
if i == 2:
i = 0
j += 1
else:
i += 1
vb.pack_start(pattern_box, True, True, 0)
vb.pack_start(accounts_box, True, True, 0)
vb.pack_start(c, True, True, 0)
frame = Frame(label=_("Statuses for which message will be changed"),
child=table)
vb.pack_start(frame, True, True, 6)
return vb |
298,908 | stop | from __future__ import annotations
import asyncio
import logging
import pickle
from contextlib import contextmanager
from typing import TYPE_CHECKING, Iterator, Set, Union
from uuid import UUID
from cloudevents.exceptions import DataUnmarshallerError
from cloudevents.http import CloudEvent, from_json
from google.protobuf.message import DecodeError
from websockets.legacy.server import WebSocketServerProtocol
from websockets.server import serve
from _ert_com_protocol import DispatcherMessage
from ert.serialization import evaluator_unmarshaller
from ._experiment_protocol import Experiment
from ._registry import Registry
if TYPE_CHECKING:
from ert.ensemble_evaluator.config import EvaluatorServerConfig
logger = logging.getLogger(__name__)
event_logger = logging.getLogger("ert.event_log")
class ExperimentServer:
""":class:`ExperimentServer` implements the experiment server API, allowing
creation, management and running of experiments as defined by the
:class:`ert.experiment_server._experiment_protocol.Experiment` protocol.
:class:`ExperimentServer` also runs a server to which clients and remote
workers can connect.
"""
def __init__(self, ee_config: EvaluatorServerConfig) -> None:
self._config = ee_config
self._registry = Registry()
self._clients: Set[WebSocketServerProtocol] = set()
self._server_done = asyncio.get_running_loop().create_future()
self._server_task = asyncio.create_task(self._server())
async def _handler(self, websocket: WebSocketServerProtocol, path: str) -> None:
elements = path.split("/")
if elements[1] == "client":
await self.handle_client(websocket, path)
elif elements[1] == "dispatch":
logger.debug("dispatcher connected")
await self.handle_dispatch(websocket, path)
else:
logger.info(f"Connection attempt to unknown path: {path}.")
async def METHOD_NAME(self) -> None:
"""Stop the server."""
logger.debug("stopping experiment server gracefully...")
try:
self._server_done.set_result(None)
except asyncio.InvalidStateError:
logger.debug("was already gracefully asked to stop.")
pass
await self._server_task
async def handle_dispatch(
self, websocket: WebSocketServerProtocol, path: str
) -> None:
"""Handle incoming "dispatch" connections, which refers to remote workers."""
event: Union[CloudEvent, DispatcherMessage]
async for msg in websocket:
if isinstance(msg, bytes):
# all Protobuf objects come in DispatcherMessage container
# which needs to be parsed
event = DispatcherMessage()
try:
event.ParseFromString(msg)
except DecodeError:
logger.error(f"Cannot parse pbuf event: {msg.decode()}")
raise
else:
try:
event = from_json(msg, data_unmarshaller=evaluator_unmarshaller)
except DataUnmarshallerError:
event = from_json(msg, data_unmarshaller=pickle.loads)
await self._registry.all_experiments[0].dispatch(event)
@contextmanager
def store_client(self, websocket: WebSocketServerProtocol) -> Iterator[None]:
"""Context manager for a client connection handler, allowing to know how
many clients are connected."""
logger.debug("client %s connected", websocket)
self._clients.add(websocket)
yield
self._clients.remove(websocket)
async def handle_client(
self, websocket: WebSocketServerProtocol, path: str
) -> None:
"""Handle incoming client connections."""
with self.store_client(websocket):
async for message in websocket:
client_event = from_json(
message, data_unmarshaller=evaluator_unmarshaller
)
logger.debug(f"got message from client: {client_event}")
async def _server(self) -> None:
try:
async with serve(
self._handler,
sock=self._config.get_socket(),
ssl=self._config.get_server_ssl_context(),
):
logger.debug("Running experiment server")
await self._server_done
logger.debug("Async server exiting.")
except Exception: # pylint: disable=broad-except
logger.exception("crash/burn")
def add_experiment(self, experiment: Experiment) -> UUID:
self._registry.add_experiment(experiment)
return experiment.id
async def run_experiment(self, experiment_id: UUID) -> None:
"""Run the experiment with the given experiment_id.
This is a helper method for use by the CLI, where only one experiment
at a time makes sense. This method therefore runs the experiment, and
attempts to gracefully shut down the server when complete.
"""
logger.debug("running experiment %s", experiment_id)
experiment = self._registry.get_experiment(experiment_id)
experiment_task = asyncio.create_task(experiment.run(self._config))
done, pending = await asyncio.wait(
[self._server_task, experiment_task], return_when=asyncio.FIRST_COMPLETED
)
if experiment_task in done:
logger.debug("experiment %s was done", experiment_id)
# raise experiment exception if any
try:
experiment_task.result()
successful_reals = await experiment.successful_realizations(0)
# This is currently API
print(f"Successful realizations: {successful_reals}")
except Exception as e: # pylint: disable=broad-except
print(f"Experiment failed: {str(e)}")
raise
finally:
# wait for shutdown of server
await self.METHOD_NAME()
return
# experiment is pending, but the server died, so try cancelling the experiment
# then raise the server's exception
for pending_task in pending:
logger.debug("task %s was pending, cancelling...", pending_task)
pending_task.cancel()
for done_task in done:
done_task.result() |
298,909 | prepare vm state | #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Copyright Red Hat
#
# SPDX-License-Identifier: GPL-2.0
# Author: Dan Zheng <dzheng@redhat.com>
#
"""
Test cases about memory snapshot deletion
"""
import os
from virttest import data_dir
from virttest import libvirt_version
from virttest import virsh
from virttest.libvirt_xml import vm_xml
from provider.snapshot import snapshot_base
from provider.virtual_disk import disk_base
def create_snapshots(test_obj):
"""
Create snapshots for the test
:param test_obj: SnapshotTest object
"""
for count in [1, 2]:
snap_name = 'snap%d' % count
mem_snap_file = os.path.join(data_dir.get_tmp_dir(), 'memory_%s.dump' % snap_name)
snap_dict = eval(test_obj.params.get('snapshot_dict') % (snap_name, mem_snap_file))
snap_disk_list = eval(test_obj.params.get('snapshot_disk_list'))
test_obj.test.log.debug("Step: Create snapshot '%s'" % snap_name)
test_obj.create_snapshot_by_xml(snap_dict, snap_disk_list)
test_obj.check_snap_list(snap_name, options='', expect_exist=True)
if count == 1 and test_obj.params.get('vm_status') == 'paused':
virsh.resume(test_obj.vm.name, **test_obj.virsh_dargs)
virsh.suspend(test_obj.vm.name, **test_obj.virsh_dargs)
def METHOD_NAME(test_obj):
"""
Make the vm in expected state
:param test_obj: SnapshotTest object
"""
vm_status = test_obj.params.get('vm_status')
virsh.start(test_obj.vm.name, **test_obj.virsh_dargs)
test_obj.vm.wait_for_login().close()
if vm_status == 'paused':
virsh.suspend(test_obj.vm.name, **test_obj.virsh_dargs)
def add_new_disk(test_obj):
"""
Add the second disk to the vm for the test
:param test_obj: SnapshotTest object
"""
disk_dict = eval(test_obj.params.get('disk_dict', '{}'))
disk_base_obj = disk_base.DiskBase(test_obj.test, test_obj.vm, test_obj.params)
disk_base_obj.new_image_path = disk_base_obj.add_vm_disk('file', disk_dict)
return disk_base_obj
def setup_test(test_obj):
"""
Create snapshots for the test
:param test_obj: SnapshotTest object
"""
disk_base_obj = add_new_disk(test_obj)
test_obj.params['disk_base_obj'] = disk_base_obj
METHOD_NAME(test_obj)
test_obj.test.log.debug("Now vm xml after adding the disk:\n"
"%s", vm_xml.VMXML.new_from_inactive_dumpxml(test_obj.vm.name))
create_snapshots(test_obj)
def run_test(test_obj):
"""
Create snapshots for the test
:param test_obj: SnapshotTest object
"""
for count in [1, 2]:
snap_name = 'snap%d' % count
test_obj.test.log.debug("Step: Delete snapshot '%s'" % snap_name)
options = '' if count == 1 else '--current'
snap_name_delete = '' if options == '--current' else snap_name
test_obj.delete_snapshot(snap_name_delete, options=options)
test_obj.check_snap_list(snap_name, expect_exist=False)
test_obj.test.log.debug("Checkpoint: The snapshot '%s' is deleted", snap_name)
mem_snap_file = os.path.join(data_dir.get_tmp_dir(), 'memory_%s.dump' % snap_name)
if os.path.exists(mem_snap_file):
test_obj.test.fail("The memory snapshot file '%s' "
"should not exist any more after "
"the snapshot is deleted" % mem_snap_file)
else:
test_obj.test.log.debug("Checkpoint: The memory snapshot file '%s' "
"does not exist any more as expected "
"after the snapshot is deleted", mem_snap_file)
def teardown_test(test_obj):
"""
Teardown for the test
:param test_obj: SnapshotTest object
"""
disk_base_obj = test_obj.params['disk_base_obj']
disk_base_obj.cleanup_disk_preparation('file')
test_obj.teardown_test()
def run(test, params, env):
"""
Deletion test for memory only snapshot
"""
libvirt_version.is_libvirt_feature_supported(params)
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
bkxml = vmxml.copy()
params['backup_vmxml'] = bkxml
test_obj = snapshot_base.SnapshotTest(vm, test, params)
try:
setup_test(test_obj)
run_test(test_obj)
finally:
teardown_test(test_obj) |
298,910 | hmc | #!/usr/bin/env python3
import gpt as g
import numpy as np
import os, sys
rng = g.random("test")
# cold start
U = g.qcd.gauge.unit(g.grid([24, 24, 24, 48], g.double))
latest_it = None
it0 = 0
dst = g.default.get("--root", None)
N = 4000
for it in range(N):
if os.path.exists(f"{dst}/ckpoint_lat.{it}"):
latest_it = it
if latest_it is not None:
g.copy(U, g.load(f"{dst}/ckpoint_lat.{latest_it}"))
rng = g.random(f"test{dst}{latest_it}", "vectorized_ranlux24_24_64")
it0 = latest_it + 1
pc = g.qcd.fermion.preconditioner
inv = g.algorithms.inverter
eofa_ratio = g.qcd.pseudofermion.action.exact_one_flavor_ratio
def two_flavor_ratio(fermion, m1, m2, solver):
M1 = fermion(m1, m1)
M2 = fermion(m2, m2)
return g.qcd.pseudofermion.action.two_flavor_ratio_evenodd_schur([M1, M2], solver)
def quark(U0, m_plus, m_minus):
return g.qcd.fermion.mobius(
U0,
mass_plus=m_plus,
mass_minus=m_minus,
M5=1.8,
b=1.5,
c=0.5,
Ls=32,
boundary_phases=[1, 1, 1, -1],
)
pc = g.qcd.fermion.preconditioner
inv = g.algorithms.inverter
sympl = g.algorithms.integrator.symplectic
F_grid_eo = quark(U, 1, 1).F_grid_eo
cg_e = inv.cg({"eps": 1e-10, "maxiter": 20000})
cg_s = inv.cg(
{"eps": 1e-7, "maxiter": 20000}
) # 1e-5 -> dH=O(5), 1e-6 -> dH=O(0.18), 1e-7 -> dH=O(0.048)
slv_e = inv.preconditioned(pc.eo2_ne(), cg_e)
slv_s = inv.mixed_precision(inv.preconditioned(pc.eo2_ne(), cg_s), g.single, g.double)
# conjugate momenta
U_mom = g.group.cartesian(U)
rng.normal_element(U_mom)
action_gauge_mom = g.qcd.scalar.action.mass_term()
action_gauge = g.qcd.gauge.action.iwasaki(2.13)
rat = g.algorithms.rational.zolotarev_inverse_square_root(1.0**0.5, 11**0.5, 7)
rat_fnc = g.algorithms.rational.rational_function(rat.zeros, rat.poles, rat.norm)
# see params.py for parameter motivation
hasenbusch_ratios = [ # Nf=2+1
(0.45, 1.0, None, two_flavor_ratio, cg_e, cg_s),
(0.18, 0.45, None, two_flavor_ratio, cg_e, cg_s),
(0.07, 0.18, None, two_flavor_ratio, cg_e, cg_s),
(0.017, 0.07, None, two_flavor_ratio, cg_e, cg_s),
(0.0055, 0.017, None, two_flavor_ratio, cg_e, cg_s),
(0.0368, 1.0, rat_fnc, eofa_ratio, slv_e, slv_s),
# (0.001477, 0.1, None, two_flavor_ratio, cg_e, cg_s),
# (1.0, 1.0, rat_fnc, eofa_ratio, slv_e, slv_s),
]
fields = [
(U + [g.vspincolor(F_grid_eo)]),
(U + [g.vspincolor(F_grid_eo)]),
(U + [g.vspincolor(F_grid_eo)]),
(U + [g.vspincolor(F_grid_eo)]),
(U + [g.vspincolor(F_grid_eo)]),
(U + [g.vspincolor(U[0].grid)]),
# (U + [g.vspincolor(F_grid_eo)]),
# (U + [g.vspincolor(U[0].grid)])
]
# test test
# rat = g.algorithms.rational.zolotarev_inverse_square_root(1.0**0.5, 4**0.5, 2)
# rat_fnc = g.algorithms.rational.rational_function(rat.zeros, rat.poles, rat.norm)
# hasenbusch_ratios = [ # Nf=2+1
# (0.6, 1.0, rat_fnc),
# (0.6, 1.0, rat_fnc),
# (0.6, 1.0, rat_fnc),
# (0.3, 0.6, rat_fnc),
# (0.3, 0.6, rat_fnc)
# ]
# test test end
# exact actions
action_fermions_e = [
af(lambda m_plus, m_minus: quark(U, m_plus, m_minus), m1, m2, se)
for m1, m2, rf, af, se, ss in hasenbusch_ratios
]
# sloppy actions
action_fermions_s = [
af(lambda m_plus, m_minus: quark(U, m_plus, m_minus), m1, m2, ss)
for m1, m2, rf, af, se, ss in hasenbusch_ratios
]
metro = g.algorithms.markov.metropolis(rng)
pure_gauge = True
split_rng = [
g.random(f"{[rng.cnormal() for i in range(4)]}") for j in range(len(hasenbusch_ratios))
]
# sd = g.split_map(
# U[0].grid,
# [
# lambda dst, ii=i:
# action_fermions_e[ii].draw(dst, split_rng[ii], hasenbusch_ratios[ii][2])
# if hasenbusch_ratios[ii][3] is eofa_ratio else
# action_fermions_e[ii].draw(dst, split_rng[ii])
# for i in range(len(hasenbusch_ratios))
# ],
# [1,2,2,2]
# )
def hamiltonian(draw):
if draw:
rng.normal_element(U_mom)
s = action_gauge(U)
if not pure_gauge:
# sp = sd(fields)
for i in range(len(hasenbusch_ratios)):
if hasenbusch_ratios[i][3] is eofa_ratio:
si = action_fermions_e[i].draw(fields[i], rng, hasenbusch_ratios[i][2])
# si = sp[i]
si_check = action_fermions_e[i](fields[i])
g.message("action", i, si_check)
r = f"{hasenbusch_ratios[i][0]}/{hasenbusch_ratios[i][1]}"
e = abs(si / si_check - 1)
g.message(f"Error of rational approximation for Hasenbusch ratio {r}: {e}")
else:
si = action_fermions_e[i].draw(fields[i], rng)
s += si
h = s + action_gauge_mom(U_mom)
else:
s = action_gauge(U)
if not pure_gauge:
for i in range(len(hasenbusch_ratios)):
s += action_fermions_e[i](fields[i])
h = s + action_gauge_mom(U_mom)
return h, s
log = sympl.log()
# sf = g.split_map(
# U[0].grid,
# [
# lambda dst, src, ii=i: g.eval(dst, action_fermions_s[ii].gradient(src, src[0:len(U)]))
# for i in range(len(hasenbusch_ratios))
# ],
# [1,2,2,2]
# )
def fermion_force():
x = [g.group.cartesian(u) for u in U]
for y in x:
y[:] = 0
if not pure_gauge:
forces = [[g.lattice(y) for y in x] for i in fields]
log.time("fermion forces")
for i in range(len(hasenbusch_ratios)):
forces[i] = action_fermions_s[i].gradient(fields[i], fields[i][0 : len(U)])
log.time()
for i in range(len(hasenbusch_ratios)):
log.gradient(forces[i], f"{hasenbusch_ratios[i][0]}/{hasenbusch_ratios[i][1]} {i}")
for j in range(len(x)):
x[j] += forces[i][j]
return x
iq = sympl.update_q(U, log(lambda: action_gauge_mom.gradient(U_mom, U_mom), "gauge_mom"))
ip_gauge = sympl.update_p(U_mom, log(lambda: action_gauge.gradient(U, U), "gauge"))
ip_fermion = sympl.update_p(U_mom, fermion_force)
# mdint = sympl.OMF4(1, ip_fermion, sympl.OMF2(4, ip_gauge, iq))
mdint = sympl.OMF2(15, ip_fermion, sympl.OMF2(4, ip_gauge, iq))
def METHOD_NAME(tau):
accrej = metro(U)
h0, s0 = hamiltonian(True)
mdint(tau)
h1, s1 = hamiltonian(False)
return [accrej(h1, h0), s1 - s0, h1 - h0]
accept, total = 0, 0
for it in range(it0, N):
pure_gauge = it < 10
g.message(pure_gauge)
a, dS, dH = METHOD_NAME(1.0)
accept += a
total += 1
plaq = g.qcd.gauge.plaquette(U)
g.message(f"HMC {it} has P = {plaq}, dS = {dS}, dH = {dH}, acceptance = {accept/total}")
for x in log.grad:
g.message(f"{x} force norm2/sites =", np.mean(log.get(x)), "+-", np.std(log.get(x)))
g.message(f"Timing:\n{log.time}")
if it % 10 == 0:
# reset statistics
log.reset()
g.message("Reset log")
g.save(f"{dst}/ckpoint_lat.{it}", U, g.format.nersc()) |
298,911 | rainbow cycle | #!/usr/bin/python
# Import library functions we need
import sys
import time
try:
from rpi_ws281x import __version__, PixelStrip, Adafruit_NeoPixel, Color
except ImportError:
from neopixel import Adafruit_NeoPixel as PixelStrip, Color
__version__ = "legacy"
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
# LED strip configuration:
LED_COUNT = 8 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # PWM channel
LED_GAMMA = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 11, 11,
11, 12, 12, 13, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18,
19, 19, 20, 21, 21, 22, 22, 23, 23, 24, 25, 25, 26, 27, 27, 28,
29, 29, 30, 31, 31, 32, 33, 34, 34, 35, 36, 37, 37, 38, 39, 40,
40, 41, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 76, 77, 78, 79, 80, 81, 83, 84, 85, 86, 88, 89,
90, 91, 93, 94, 95, 96, 98, 99,100,102,103,104,106,107,109,110,
111,113,114,116,117,119,120,121,123,124,126,128,129,131,132,134,
135,137,138,140,142,143,145,146,148,150,151,153,155,157,158,160,
162,163,165,167,169,170,172,174,176,178,179,181,183,185,187,189,
191,193,194,196,198,200,202,204,206,208,210,212,214,216,218,220,
222,224,227,229,231,233,235,237,239,241,244,246,248,250,252,255]
LED_COUNT = max(0,int(sys.argv[1]))
WAIT_MS = max(0,int(sys.argv[2]))
MODE = sys.argv[3]
LED_BRIGHTNESS = min(255,int(max(0,float(sys.argv[4])) * 255 / 100))
if (sys.argv[5].lower() != "true"):
LED_GAMMA = range(256)
LED_CHANNEL = int(sys.argv[6])
LED_PIN = int(sys.argv[7])
def getRGBfromI(RGBint):
blue = RGBint & 255
green = (RGBint >> 8) & 255
red = (RGBint >> 16) & 255
return red, green, blue
# Define functions which animate LEDs in various ways.
def setPixel(strip, i, color):
"""Set a single pixel"""
strip.setPixelColor(i, color)
strip.show()
def setPixels(strip, s, e, color, wait_ms=30):
"""Set pixels from s(tart) to e(nd)"""
if (wait_ms > 0):
for i in range(s, e+1):
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms/1000.0)
else:
for i in range(s, e+1):
strip.setPixelColor(i, color)
strip.show()
def setBrightness(strip, brightness, wait_ms=30):
"""Set overall brighness"""
strip.setBrightness(brightness)
strip.show()
time.sleep(wait_ms/1000.0)
def colorWipe(strip, color, wait_ms=30):
"""Wipe color across display a pixel at a time."""
if (wait_ms > 0):
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms/1000.0)
else:
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
def shiftUp(strip, color, wait_ms=30):
"""Shift all pixels one way."""
oldcolour = strip.getPixelColor(0)
strip.setPixelColor(0, color)
strip.show()
if (wait_ms > 0):
time.sleep(wait_ms/1000.0)
for i in range(1,LED_COUNT):
newcolour = oldcolour
oldcolour = strip.getPixelColor(i)
strip.setPixelColor(i, newcolour)
strip.show()
time.sleep(wait_ms/1000.0)
else:
for i in range(1,LED_COUNT):
newcolour = oldcolour
oldcolour = strip.getPixelColor(i)
strip.setPixelColor(i, newcolour)
strip.show()
def shiftDown(strip, color, wait_ms=30):
"""Shift all pixels the other way."""
oldcolour = strip.getPixelColor(LED_COUNT-1)
strip.setPixelColor(LED_COUNT-1, color)
strip.show()
if (wait_ms > 0):
time.sleep(wait_ms/1000.0)
for i in range(LED_COUNT-2,-1,-1):
newcolour = oldcolour
oldcolour = strip.getPixelColor(i)
strip.setPixelColor(i, newcolour)
strip.show()
time.sleep(wait_ms/1000.0)
else:
for i in range(LED_COUNT-2,-1,-1):
newcolour = oldcolour
oldcolour = strip.getPixelColor(i)
strip.setPixelColor(i, newcolour)
strip.show()
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, wait_ms=20, iterations=2):
"""Draw rainbow that fades across all pixels at once."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def METHOD_NAME(strip, wait_ms=20, iterations=2):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel(((i * 256 / strip.numPixels()) + j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
# Main loop:
if __name__ == '__main__':
# Create NeoPixel object with appropriate configuration.
#strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
if __version__ == "legacy":
strip = PixelStrip(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
else:
strip = PixelStrip(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_GAMMA)# Intialize the library (must be called once before other functions).
strip.begin()
## Color wipe animations.
colorWipe(strip, Color(127, 0, 0), WAIT_MS) # Red wipe
colorWipe(strip, Color(0, 127, 0), WAIT_MS) # Green wipe
colorWipe(strip, Color(0, 0, 127), WAIT_MS) # Blue wipe
colorWipe(strip, Color(0, 0, 0), WAIT_MS) # Off wipe
## Rainbow animations.
#rainbow(strip)
#rainbowCycle(strip)
#colorWipe(strip, Color(0, 0, 0)) # Off wipe
while True:
try:
data = raw_input()
bits = data.split(',')
if len(bits) == 2:
if bits[0] == "brightness":
setBrightness(strip, min(255,max(0,int(bits[1]))), WAIT_MS)
if len(bits) == 3:
if MODE == "shiftu":
shiftUp(strip, Color(int(bits[0]), int(bits[1]), int(bits[2])), WAIT_MS)
elif MODE == "shiftd":
shiftDown(strip, Color(int(bits[0]), int(bits[1]), int(bits[2])), WAIT_MS)
else:
colorWipe(strip, Color(int(bits[0]), int(bits[1]), int(bits[2])), WAIT_MS)
if (MODE[0] == 'p' and len(bits) == 4):
setPixel(strip, int(bits[0]), Color(int(bits[1]), int(bits[2]), int(bits[3]) ))
if (MODE[0] == 'p' and len(bits) == 5):
setPixels(strip, int(bits[0]), int(bits[1]), Color(int(bits[2]), int(bits[3]), int(bits[4]) ), WAIT_MS)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
sys.exit(0)
except Exception as ex:
print("bad data: "+data)
print(ex) |
298,912 | connect | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from pymongo import MongoClient, ReadPreference
from pymongo.errors import (
ConfigurationError,
ConnectionFailure,
OperationFailure,
ProtocolError,
ServerSelectionTimeoutError,
)
from datadog_checks.mongo.common import MongosDeployment, ReplicaSetDeployment, StandaloneDeployment
# The name of the application that created this MongoClient instance. MongoDB 3.4 and newer will print this value in
# the server log upon establishing each connection. It is also recorded in the slow query log and profile collections.
DD_APP_NAME = 'datadog-agent'
# We collect here all pymongo exceptions that would result in a CRITICAL service check.
CRITICAL_FAILURE = (
ConfigurationError, # This occurs when TLS is misconfigured.
ConnectionFailure, # This is a generic exception for any problems when connecting to mongodb.
OperationFailure, # This occurs when authentication is incorrect.
# This means either no server is available or a replicaset has not elected a primary in the timeout window.
# In both cases it makes sense to submit a CRITICAL service check to Datadog.
ServerSelectionTimeoutError,
# Errors at the level of the protocol result in a lost/degraded connection. We can issue a CRITICAL check for this.
ProtocolError,
)
class MongoApi(object):
"""Mongodb connection through pymongo.MongoClient
:params config: MongoConfig object.
:params log: Check log.
:params replicaset: If replication is enabled, this parameter specifies the name of the replicaset.
Valid for ReplicaSetDeployment deployments
"""
def __init__(self, config, log, replicaset: str = None):
self._config = config
self._log = log
options = {
'host': self._config.server if self._config.server else self._config.hosts,
'socketTimeoutMS': self._config.timeout,
'connectTimeoutMS': self._config.timeout,
'serverSelectionTimeoutMS': self._config.timeout,
'directConnection': True,
'read_preference': ReadPreference.PRIMARY_PREFERRED,
'appname': DD_APP_NAME,
}
if replicaset:
options['replicaSet'] = replicaset
options.update(self._config.additional_options)
options.update(self._config.tls_params)
if self._config.do_auth and not self._is_arbiter(options):
self._log.info("Using '%s' as the authentication database", self._config.auth_source)
if self._config.username:
options['username'] = self._config.username
if self._config.password:
options['password'] = self._config.password
if self._config.auth_source:
options['authSource'] = self._config.auth_source
self._log.debug("options: %s", options)
self._cli = MongoClient(**options)
self.deployment_type = None
def __getitem__(self, item):
return self._cli[item]
def METHOD_NAME(self):
try:
# The ping command is cheap and does not require auth.
self['admin'].command('ping')
except ConnectionFailure as e:
self._log.debug('ConnectionFailure: %s', e)
raise
def server_info(self, session=None):
return self._cli.server_info(session)
def list_database_names(self, session=None):
return self._cli.list_database_names(session)
def _is_arbiter(self, options):
cli = MongoClient(**options)
is_master_payload = cli['admin'].command('isMaster')
return is_master_payload.get('arbiterOnly', False)
@staticmethod
def _get_rs_deployment_from_status_payload(repl_set_payload, cluster_role):
replset_name = repl_set_payload["set"]
replset_state = repl_set_payload["myState"]
return ReplicaSetDeployment(replset_name, replset_state, cluster_role=cluster_role)
def refresh_deployment_type(self):
# getCmdLineOpts is the runtime configuration of the mongo instance. Helpful to know whether the node is
# a mongos or mongod, if the mongod is in a shard, if it's in a replica set, etc.
try:
options = self['admin'].command("getCmdLineOpts")['parsed']
except Exception as e:
self._log.debug(
"Unable to run `getCmdLineOpts`, got: %s. Assuming this is an Alibaba ApsaraDB instance.", str(e)
)
# `getCmdLineOpts` is forbidden on Alibaba ApsaraDB
self.deployment_type = self._get_alibaba_deployment_type()
return
cluster_role = None
if 'sharding' in options:
if 'configDB' in options['sharding']:
self._log.debug("Detected MongosDeployment. Node is principal.")
self.deployment_type = MongosDeployment()
return
elif 'clusterRole' in options['sharding']:
cluster_role = options['sharding']['clusterRole']
replication_options = options.get('replication', {})
if 'replSetName' in replication_options or 'replSet' in replication_options:
repl_set_payload = self['admin'].command("replSetGetStatus")
replica_set_deployment = self._get_rs_deployment_from_status_payload(repl_set_payload, cluster_role)
is_principal = replica_set_deployment.is_principal()
is_principal_log = "" if is_principal else "not "
self._log.debug("Detected ReplicaSetDeployment. Node is %sprincipal.", is_principal_log)
self.deployment_type = replica_set_deployment
return
self._log.debug("Detected StandaloneDeployment. Node is principal.")
self.deployment_type = StandaloneDeployment()
def _get_alibaba_deployment_type(self):
is_master_payload = self['admin'].command('isMaster')
if is_master_payload.get('msg') == 'isdbgrid':
return MongosDeployment()
# On alibaba cloud, a mongo node is either a mongos or part of a replica set.
repl_set_payload = self['admin'].command("replSetGetStatus")
if repl_set_payload.get('configsvr') is True:
cluster_role = 'configsvr'
elif self['admin'].command('shardingState').get('enabled') is True:
# Use `shardingState` command to know whether or not the replicaset
# is a shard or not.
cluster_role = 'shardsvr'
else:
cluster_role = None
return self._get_rs_deployment_from_status_payload(repl_set_payload, cluster_role) |
298,913 | return text | """
Faraday Penetration Test IDE
Copyright (C) 2018 Infobyte LLC (http://www.infobytesec.com/)
See the file 'doc/LICENSE' for the license information
"""
import re
from faraday_plugins.plugins.plugin import PluginXMLFormat
from faraday_plugins.plugins.plugins_utils import get_vulnweb_url_fields
import xml.etree.ElementTree as ET
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
class WebInspectParser():
def __init__(self, output):
self.xml = ET.fromstring(output)
self.issues = self.xml.findall("Issues/Issue")
def parse_severity(self, severity):
severity_dict = {
"0": "info",
"1": "low",
"2": "med",
"3": "high",
"4": "critical"}
result = severity_dict.get(severity)
if not result:
return "info"
else:
return result
def METHOD_NAME(self, tag,element):
try:
text = element.find(tag).text
return text
except:
return ""
def parse(self):
map_objects_fields = {
"Name": ["Vuln", "name"],
"URL": ["Vuln", "website"],
"Scheme": ["Service", "name"],
"Host": ["Host", "name"],
"Port": ["Service", "port"],
"AttackMethod": ["Vuln", "method"],
"VulnerableSession": ["Vuln", "request"],
"VulnerabilityID": ["Vuln", "reference"],
"RawResponse": ["Vuln", "response"],
"Summary": ["Vuln", "description"],
"Implication": ["Vuln", "data"],
"Fix": ["Vuln", "resolution"],
"Reference Info": ["Vuln", "reference"],
"Severity": ["Vuln", "severity"]
}
result = []
for issue in self.issues:
obj = {
"Host" : {},
"Service" : {},
"Interface" : {},
"Vuln": {
"reference" : []}
}
for tag, obj_property in map_objects_fields.items():
value = self.METHOD_NAME(tag,issue)
if value is not None:
faraday_obj_name = obj_property[0]
faraday_field = obj_property[1]
if faraday_field == "reference":
obj[faraday_obj_name].get("reference").append(value)
else:
obj[faraday_obj_name].update({faraday_field:value})
# This for loads Summary, Implication, Fix and Reference
for section in issue.findall("ReportSection"):
try:
field = section.find("Name").text
value = section.find("SectionText").text
faraday_obj_name = map_objects_fields.get(field)[0]
faraday_field = map_objects_fields.get(field)[1]
except: # nosec
continue
if faraday_field == "reference" and value != "":
obj[faraday_obj_name].get("reference").append(cleanhtml(value))
else:
obj[faraday_obj_name].update({faraday_field:value})
result.append(obj)
return result
class WebInspectPlugin(PluginXMLFormat):
"""
This plugin handles WebInspect reports.
"""
def __init__(self, *arg, **kwargs):
super().__init__(*arg, **kwargs)
self.id = "Webinspect"
self.name = "Webinspect"
self.plugin_version = "0.0.1"
self.version = "1.0.0"
self.identifier_tag = ["Scan"]
def parseOutputString(self, output):
parser = WebInspectParser(output)
vulns = parser.parse()
for vuln in vulns:
host_id = self.createAndAddHost(vuln.get("Host").get("name"))
service_id = self.createAndAddServiceToHost(host_id, vuln.get("Service").get("name"),
protocol=vuln.get("Service").get("name"),
ports=[vuln.get("Service").get("port")])
self.createAndAddVulnWebToService(
host_id, service_id,
vuln.get("Vuln").get("name"),
website=get_vulnweb_url_fields(vuln.get("Vuln").get("website")).get("website"),
path=get_vulnweb_url_fields(vuln.get("Vuln").get("website")).get("path"),
query=get_vulnweb_url_fields(vuln.get("Vuln").get("website")).get("query"),
method=vuln.get("Vuln").get("method"),
request=vuln.get("Vuln").get("request"),
ref=list(filter(None ,vuln.get("Vuln").get("reference"))),
response=vuln.get("Vuln").get("response"),
desc=cleanhtml(vuln.get("Vuln").get("description")),
resolution=cleanhtml(vuln.get("Vuln").get("resolution")),
severity=parser.parse_severity(vuln.get("Vuln").get("severity"))
)
def createPlugin(*args, **kwargs):
return WebInspectPlugin(*args, **kwargs) |
298,914 | is user already registered | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2021-present Kaleidos Ventures SL
#
from typing import Callable
import uuid
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import update_last_login
from django.db import IntegrityError
from django.db import transaction as tx
from django.utils.translation import gettext_lazy as _
from taiga.base import exceptions as exc
from taiga.base.mails import mail_builder
from taiga.users.models import User
from taiga.users.serializers import UserAdminSerializer
from taiga.users.services import get_and_validate_user
from taiga.projects.services.invitations import get_membership_by_token
from .exceptions import AuthenticationFailed, InvalidToken, TokenError
from .settings import api_settings
from .tokens import RefreshToken, CancelToken, UntypedToken
from .signals import user_registered as user_registered_signal
#####################
## AUTH PLUGINS
#####################
auth_plugins = {}
def register_auth_plugin(name: str, login_func: Callable):
auth_plugins[name] = {
"login_func": login_func,
}
def get_auth_plugins():
return auth_plugins
#####################
## AUTH SERVICES
#####################
def make_auth_response_data(user):
serializer = UserAdminSerializer(user)
data = dict(serializer.data)
refresh = RefreshToken.for_user(user)
data['refresh'] = str(refresh)
data['auth_token'] = str(refresh.access_token)
if api_settings.UPDATE_LAST_LOGIN:
update_last_login(None, user)
return data
def login(username: str, password: str):
try:
user = get_and_validate_user(username=username, password=password)
except exc.WrongArguments:
raise AuthenticationFailed(
_('No active account found with the given credentials'),
'invalid_credentials',
)
# Generate data
return make_auth_response_data(user)
def refresh_token(refresh_token: str):
try:
refresh = RefreshToken(refresh_token)
except TokenError:
raise InvalidToken()
data = {'auth_token': str(refresh.access_token)}
if api_settings.ROTATE_REFRESH_TOKENS:
if api_settings.DENYLIST_AFTER_ROTATION:
try:
# Attempt to denylist the given refresh token
refresh.denylist()
except AttributeError:
# If denylist app not installed, `denylist` method will
# not be present
pass
refresh.set_jti()
refresh.set_exp()
data['refresh'] = str(refresh)
return data
def verify_token(token: str):
UntypedToken(token)
return {}
#####################
## REGISTER SERVICES
#####################
def send_register_email(user) -> bool:
"""
Given a user, send register welcome email
message to specified user.
"""
cancel_token = CancelToken.for_user(user)
context = {"user": user, "cancel_token": str(cancel_token)}
email = mail_builder.registered_user(user, context)
return bool(email.send())
def METHOD_NAME(*, username:str, email:str) -> (bool, str):
"""
Checks if a specified user is already registred.
Returns a tuple containing a boolean value that indicates if the user exists
and in case he does whats the duplicated attribute
"""
user_model = get_user_model()
if user_model.objects.filter(username__iexact=username).exists():
return (True, _("Username is already in use."))
if user_model.objects.filter(email__iexact=email).exists():
return (True, _("Email is already in use."))
return (False, None)
@tx.atomic
def public_register(username:str, password:str, email:str, full_name:str):
"""
Given a parsed parameters, try register a new user
knowing that it follows a public register flow.
This can raise `exc.IntegrityError` exceptions in
case of conflics found.
:returns: User
"""
is_registered, reason = METHOD_NAME(username=username, email=email)
if is_registered:
raise exc.WrongArguments(reason)
user_model = get_user_model()
user = user_model(username=username,
email=email,
email_token=str(uuid.uuid4()),
new_email=email,
verified_email=False,
full_name=full_name,
read_new_terms=True)
user.set_password(password)
try:
user.save()
except IntegrityError:
raise exc.WrongArguments(_("User is already registered."))
send_register_email(user)
user_registered_signal.send(sender=user.__class__, user=user)
return user
@tx.atomic
def private_register_for_new_user(token:str, username:str, email:str,
full_name:str, password:str):
"""
Given a inviation token, try register new user matching
the invitation token.
"""
is_registered, reason = METHOD_NAME(username=username, email=email)
if is_registered:
raise exc.WrongArguments(reason)
user_model = get_user_model()
user = user_model(username=username,
email=email,
full_name=full_name,
email_token=str(uuid.uuid4()),
new_email=email,
verified_email=False,
read_new_terms=True)
user.set_password(password)
try:
user.save()
except IntegrityError:
raise exc.WrongArguments(_("Error while creating new user."))
membership = get_membership_by_token(token)
membership.user = user
membership.save(update_fields=["user"])
send_register_email(user)
user_registered_signal.send(sender=user.__class__, user=user)
return user |
298,915 | get schedule | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetScheduleResult',
'AwaitableGetScheduleResult',
'get_schedule',
'get_schedule_output',
]
@pulumi.output_type
class GetScheduleResult:
"""
Azure Resource Manager resource envelope.
"""
def __init__(__self__, id=None, name=None, schedule_properties=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if schedule_properties and not isinstance(schedule_properties, dict):
raise TypeError("Expected argument 'schedule_properties' to be a dict")
pulumi.set(__self__, "schedule_properties", schedule_properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="scheduleProperties")
def schedule_properties(self) -> 'outputs.ScheduleResponse':
"""
[Required] Additional attributes of the entity.
"""
return pulumi.get(self, "schedule_properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetScheduleResult(GetScheduleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetScheduleResult(
id=self.id,
name=self.name,
schedule_properties=self.schedule_properties,
system_data=self.system_data,
type=self.type)
def METHOD_NAME(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetScheduleResult:
"""
Azure Resource Manager resource envelope.
Azure REST API version: 2023-04-01.
:param str name: Schedule name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices:getSchedule', __args__, opts=opts, typ=GetScheduleResult).value
return AwaitableGetScheduleResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
schedule_properties=pulumi.get(__ret__, 'schedule_properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_schedule_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetScheduleResult]:
"""
Azure Resource Manager resource envelope.
Azure REST API version: 2023-04-01.
:param str name: Schedule name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
... |
298,916 | test lrp simple attributions alpha beta | #!/usr/bin/env python3
import torch
import torch.nn as nn
from captum.attr import LayerLRP
from captum.attr._utils.lrp_rules import Alpha1_Beta0_Rule, EpsilonRule, GammaRule
from ...helpers.basic import assertTensorAlmostEqual, BaseTest
from ...helpers.basic_models import BasicModel_ConvNet_One_Conv, SimpleLRPModel
def _get_basic_config():
input = torch.arange(16).view(1, 1, 4, 4).float()
return BasicModel_ConvNet_One_Conv(), input
def _get_simple_model(inplace=False):
model = SimpleLRPModel(inplace)
inputs = torch.tensor([[1.0, 2.0, 3.0]])
return model, inputs
def _get_simple_model2(inplace=False):
class MyModel(nn.Module):
def __init__(self, inplace) -> None:
super().__init__()
self.lin = nn.Linear(2, 2)
self.lin.weight = nn.Parameter(torch.ones(2, 2))
self.relu = torch.nn.ReLU(inplace=inplace)
def forward(self, input):
return self.relu(self.lin(input))[0].unsqueeze(0)
input = torch.tensor([[1.0, 2.0], [1.0, 3.0]])
model = MyModel(inplace)
return model, input
class Test(BaseTest):
def test_lrp_creator(self) -> None:
model, _ = _get_basic_config()
model.conv1.rule = 1
self.assertRaises(TypeError, LayerLRP, model, model.conv1)
def test_lrp_creator_activation(self) -> None:
model, inputs = _get_basic_config()
model.add_module("sigmoid", nn.Sigmoid())
lrp = LayerLRP(model, model.conv1)
self.assertRaises(TypeError, lrp.attribute, inputs)
def test_lrp_basic_attributions(self):
model, inputs = _get_basic_config()
logits = model(inputs)
score, classIndex = torch.max(logits, 1)
lrp = LayerLRP(model, model.conv1)
relevance, delta = lrp.attribute(
inputs, classIndex.item(), return_convergence_delta=True
)
assertTensorAlmostEqual(
self, relevance[0], torch.Tensor([[[0, 4], [31, 40]], [[0, 0], [-6, -15]]])
)
assertTensorAlmostEqual(self, delta, torch.Tensor([0]))
def test_lrp_simple_attributions(self):
model, inputs = _get_simple_model(inplace=False)
model.eval()
model.linear.rule = EpsilonRule()
model.linear2.rule = EpsilonRule()
lrp_upper = LayerLRP(model, model.linear2)
relevance_upper, delta = lrp_upper.attribute(
inputs, attribute_to_layer_input=True, return_convergence_delta=True
)
lrp_lower = LayerLRP(model, model.linear)
relevance_lower = lrp_lower.attribute(inputs)
assertTensorAlmostEqual(self, relevance_lower[0], relevance_upper[0])
self.assertEqual(delta.item(), 0)
def test_lrp_simple_repeat_attributions(self) -> None:
model, inputs = _get_simple_model()
model.eval()
model.linear.rule = GammaRule()
model.linear2.rule = Alpha1_Beta0_Rule()
output = model(inputs)
lrp = LayerLRP(model, model.linear)
_ = lrp.attribute(inputs)
output_after = model(inputs)
assertTensorAlmostEqual(self, output, output_after)
def test_lrp_simple_inplaceReLU(self) -> None:
model_default, inputs = _get_simple_model()
model_inplace, _ = _get_simple_model(inplace=True)
for model in [model_default, model_inplace]:
model.eval()
model.linear.rule = EpsilonRule()
model.linear2.rule = EpsilonRule()
lrp_default = LayerLRP(model_default, model_default.linear2)
lrp_inplace = LayerLRP(model_inplace, model_inplace.linear2)
relevance_default = lrp_default.attribute(inputs, attribute_to_layer_input=True)
relevance_inplace = lrp_inplace.attribute(inputs, attribute_to_layer_input=True)
assertTensorAlmostEqual(self, relevance_default[0], relevance_inplace[0])
def test_lrp_simple_tanh(self) -> None:
class Model(nn.Module):
def __init__(self) -> None:
super(Model, self).__init__()
self.linear = nn.Linear(3, 3, bias=False)
self.linear.weight.data.fill_(0.1)
self.tanh = torch.nn.Tanh()
self.linear2 = nn.Linear(3, 1, bias=False)
self.linear2.weight.data.fill_(0.1)
def forward(self, x):
return self.linear2(self.tanh(self.linear(x)))
model = Model()
_, inputs = _get_simple_model()
lrp = LayerLRP(model, model.linear)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(
self, relevance[0], torch.Tensor([0.0537, 0.0537, 0.0537])
) # Result if tanh is skipped for propagation
def test_lrp_simple_attributions_GammaRule(self) -> None:
model, inputs = _get_simple_model()
with torch.no_grad():
model.linear.weight.data[0][0] = -2
model.eval()
model.linear.rule = GammaRule(gamma=1)
model.linear2.rule = GammaRule()
lrp = LayerLRP(model, model.linear)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(self, relevance[0], torch.tensor([24.0, 36.0, 36.0]))
def METHOD_NAME(self) -> None:
model, inputs = _get_simple_model()
with torch.no_grad():
model.linear.weight.data[0][0] = -2
model.eval()
model.linear.rule = Alpha1_Beta0_Rule()
model.linear2.rule = Alpha1_Beta0_Rule()
lrp = LayerLRP(model, model.linear)
relevance = lrp.attribute(inputs)
assertTensorAlmostEqual(self, relevance[0], torch.tensor([24.0, 36.0, 36.0]))
def test_lrp_simple_attributions_all_layers(self) -> None:
model, inputs = _get_simple_model(inplace=False)
model.eval()
model.linear.rule = EpsilonRule()
model.linear2.rule = EpsilonRule()
layers = [model.linear, model.linear2]
lrp = LayerLRP(model, layers)
relevance = lrp.attribute(inputs, attribute_to_layer_input=True)
self.assertEqual(len(relevance), 2)
assertTensorAlmostEqual(self, relevance[0][0], torch.tensor([18.0, 36.0, 54.0]))
def test_lrp_simple_attributions_all_layers_delta(self) -> None:
model, inputs = _get_simple_model(inplace=False)
model.eval()
model.linear.rule = EpsilonRule()
model.linear2.rule = EpsilonRule()
layers = [model.linear, model.linear2]
lrp = LayerLRP(model, layers)
inputs = torch.cat((inputs, 2 * inputs))
relevance, delta = lrp.attribute(
inputs, attribute_to_layer_input=True, return_convergence_delta=True
)
self.assertEqual(len(relevance), len(delta))
assertTensorAlmostEqual(
self,
relevance[0],
torch.tensor([[18.0, 36.0, 54.0], [36.0, 72.0, 108.0]]),
) |
298,917 | mysql example secrets | from typing import Dict, Generator, List
from uuid import uuid4
import pytest
from sqlalchemy.orm import Session
from fides.api.db.session import get_db_engine, get_db_session
from fides.api.models.connectionconfig import (
AccessLevel,
ConnectionConfig,
ConnectionType,
)
from fides.api.models.datasetconfig import DatasetConfig
from fides.api.models.sql_models import Dataset as CtlDataset
from fides.api.service.connectors import MySQLConnector
from fides.config import CONFIG
from .application_fixtures import integration_secrets
@pytest.fixture(scope="function")
def METHOD_NAME():
return integration_secrets["mysql_example"]
@pytest.fixture(scope="function")
def dataset_config_mysql(
connection_config: ConnectionConfig,
db: Session,
) -> Generator:
dataset = {
"fides_key": "mysql_example_subscriptions_dataset",
"name": "Mysql Example Subscribers Dataset",
"description": "Example Mysql dataset created in test fixtures",
"dataset_type": "MySQL",
"location": "mysql_example.test",
"collections": [
{
"name": "subscriptions",
"fields": [
{
"name": "id",
"data_categories": ["system.operations"],
},
{
"name": "email",
"data_categories": ["user.contact.email"],
"fidesops_meta": {
"identity": "email",
},
},
],
},
],
}
ctl_dataset = CtlDataset.create_from_dataset_dict(db, dataset)
dataset_config = DatasetConfig.create(
db=db,
data={
"connection_config_id": connection_config.id,
"fides_key": "mysql_example_subscriptions_dataset",
"ctl_dataset_id": ctl_dataset.id,
},
)
yield dataset_config
dataset_config.delete(db)
ctl_dataset.delete(db)
# TODO: Consolidate these
@pytest.fixture
def mysql_example_test_dataset_config(
connection_config_mysql: ConnectionConfig,
db: Session,
example_datasets: List[Dict],
) -> Generator:
mysql_dataset = example_datasets[5]
fides_key = mysql_dataset["fides_key"]
connection_config_mysql.name = fides_key
connection_config_mysql.key = fides_key
connection_config_mysql.save(db=db)
ctl_dataset = CtlDataset.create_from_dataset_dict(db, mysql_dataset)
dataset = DatasetConfig.create(
db=db,
data={
"connection_config_id": connection_config_mysql.id,
"fides_key": fides_key,
"ctl_dataset_id": ctl_dataset.id,
},
)
yield dataset
dataset.delete(db=db)
ctl_dataset.delete(db=db)
@pytest.fixture(scope="function")
def connection_config_mysql(db: Session) -> Generator:
connection_config = ConnectionConfig.create(
db=db,
data={
"name": str(uuid4()),
"key": "my_mysql_db_1",
"connection_type": ConnectionType.mysql,
"access": AccessLevel.write,
"secrets": integration_secrets["mysql_example"],
},
)
yield connection_config
connection_config.delete(db)
@pytest.fixture(scope="function")
def mysql_integration_session_cls(connection_config_mysql):
example_postgres_uri = MySQLConnector(connection_config_mysql).build_uri()
engine = get_db_engine(database_uri=example_postgres_uri)
SessionLocal = get_db_session(
config=CONFIG,
engine=engine,
autocommit=True,
autoflush=True,
)
yield SessionLocal
@pytest.fixture(scope="function")
def mysql_integration_session(mysql_integration_session_cls):
yield mysql_integration_session_cls()
def truncate_all_tables(mysql_integration_session):
tables = [
"product",
"customer",
"employee",
"address",
"customer",
"employee",
"payment_card",
"orders",
"order_item",
"visit",
"login",
"service_request",
"report",
]
[mysql_integration_session.execute(f"TRUNCATE TABLE {table};") for table in tables]
@pytest.fixture(scope="function")
def mysql_integration_db(mysql_integration_session):
truncate_all_tables(mysql_integration_session)
statements = [
"""
INSERT INTO product VALUES
(1, 'Example Product 1', 10.00),
(2, 'Example Product 2', 20.00),
(3, 'Example Product 3', 50.00);
""",
"""
INSERT INTO address VALUES
(1, '123', 'Example Street', 'Exampletown', 'NY', '12345'),
(2, '4', 'Example Lane', 'Exampletown', 'NY', '12321'),
(3, '555', 'Example Ave', 'Example City', 'NY', '12000');
""",
"""
INSERT INTO customer VALUES
(1, 'customer-1@example.com', 'John Customer', '2020-04-01 11:47:42', 1),
(2, 'customer-2@example.com', 'Jill Customer', '2020-04-01 11:47:42', 2);
""",
"""
INSERT INTO employee VALUES
(1, 'employee-1@example.com', 'Jack Employee', 3),
(2, 'employee-2@example.com', 'Jane Employee', 3);
""",
"""
INSERT INTO payment_card VALUES
('pay_aaa-aaa', 'Example Card 1', 123456789, 321, true, 1, 1),
('pay_bbb-bbb', 'Example Card 2', 987654321, 123, false, 2, 1);
""",
"""
INSERT INTO orders VALUES
('ord_aaa-aaa', 1, 2, 'pay_aaa-aaa'),
('ord_bbb-bbb', 2, 1, 'pay_bbb-bbb'),
('ord_ccc-ccc', 1, 1, 'pay_aaa-aaa'),
('ord_ddd-ddd', 1, 1, 'pay_bbb-bbb');
""",
"""
INSERT INTO order_item VALUES
('ord_aaa-aaa', 1, 1, 1),
('ord_bbb-bbb', 1, 1, 1),
('ord_ccc-ccc', 1, 1, 1),
('ord_ccc-ccc', 2, 2, 1),
('ord_ddd-ddd', 1, 1, 1);
""",
"""
INSERT INTO visit VALUES
('customer-1@example.com', '2021-01-06 01:00:00'),
('customer-2@example.com', '2021-01-06 01:00:00');
""",
"""
INSERT INTO login VALUES
(1, 1, '2021-01-01 01:00:00'),
(2, 1, '2021-01-02 01:00:00'),
(3, 1, '2021-01-03 01:00:00'),
(4, 1, '2021-01-04 01:00:00'),
(5, 1, '2021-01-05 01:00:00'),
(6, 1, '2021-01-06 01:00:00'),
(7, 2, '2021-01-06 01:00:00');
""",
"""
INSERT INTO service_request VALUES
('ser_aaa-aaa', 'customer-1@example.com', 'customer-1-alt@example.com', '2021-01-01', '2021-01-03', 1),
('ser_bbb-bbb', 'customer-2@example.com', null, '2021-01-04', null, 1),
('ser_ccc-ccc', 'customer-3@example.com', null, '2021-01-05', '2020-01-07', 1),
('ser_ddd-ddd', 'customer-3@example.com', null, '2021-05-05', '2020-05-08', 2);
""",
"""
INSERT INTO report VALUES
(1, 'admin-account@example.com', 'Monthly Report', 2021, 8, 100),
(2, 'admin-account@example.com', 'Monthly Report', 2021, 9, 100),
(3, 'admin-account@example.com', 'Monthly Report', 2021, 10, 100),
(4, 'admin-account@example.com', 'Monthly Report', 2021, 11, 100);
""",
]
[mysql_integration_session.execute(stmt) for stmt in statements]
yield mysql_integration_session
truncate_all_tables(mysql_integration_session) |
298,918 | sequence reset by name sql | import json
from collections.abc import Iterable, Sequence
from datetime import date, time, timedelta
from datetime import datetime as real_datetime
from decimal import Decimal
from typing import Any
from django.core.management.color import Style
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.utils import CursorWrapper
from django.db.models.base import Model
from django.db.models.constants import OnConflict
from django.db.models.expressions import Case, Expression
from django.db.models.fields import Field
from django.db.models.sql.compiler import SQLCompiler
class BaseDatabaseOperations:
compiler_module: str
integer_field_ranges: dict[str, tuple[int, int]]
set_operators: dict[str, str]
cast_data_types: dict[Any, Any]
cast_char_field_without_max_length: Any
PRECEDING: str
FOLLOWING: str
UNBOUNDED_PRECEDING: str
UNBOUNDED_FOLLOWING: str
CURRENT_ROW: str
explain_prefix: str | None
connection: BaseDatabaseWrapper
def __init__(self, connection: BaseDatabaseWrapper) -> None: ...
def autoinc_sql(self, table: str, column: str) -> str | None: ...
def bulk_batch_size(self, fields: Any, objs: Any) -> int: ...
def cache_key_culling_sql(self) -> str: ...
def unification_cast_sql(self, output_field: Field) -> str: ...
def date_extract_sql(self, lookup_type: str, sql: Any, params: Any) -> tuple[str, Any]: ...
# def date_interval_sql(self, timedelta: None) -> Any: ...
def date_trunc_sql(self, lookup_type: str, sql: str, params: Any, tzname: str | None = ...) -> tuple[str, Any]: ...
def datetime_cast_date_sql(self, sql: str, params: Any, tzname: str | None) -> tuple[str, Any]: ...
def datetime_cast_time_sql(self, sql: str, params: Any, tzname: str | None) -> tuple[str, Any]: ...
def datetime_extract_sql(self, lookup_type: str, sql: str, params: Any, tzname: str | None) -> tuple[str, Any]: ...
def datetime_trunc_sql(self, lookup_type: str, sql: str, params: Any, tzname: str | None) -> str: ...
def time_trunc_sql(self, lookup_type: str, sql: str, params: Any, tzname: str | None = ...) -> str: ...
def time_extract_sql(self, lookup_type: str, sql: str, params: Any) -> str: ...
def deferrable_sql(self) -> str: ...
def distinct_sql(self, fields: list[str], params: list[Any] | None) -> tuple[list[str], list[str]]: ...
def fetch_returned_insert_columns(self, cursor: Any, returning_params: Any) -> Any: ...
def field_cast_sql(self, db_type: str | None, internal_type: str) -> str: ...
def force_no_ordering(self) -> list[Any]: ...
def for_update_sql(self, nowait: bool = ..., skip_locked: bool = ..., of: Any = ..., no_key: bool = ...) -> str: ...
def limit_offset_sql(self, low_mark: int, high_mark: int | None) -> str: ...
def last_executed_query(self, cursor: Any, sql: Any, params: Any) -> str: ...
def last_insert_id(self, cursor: CursorWrapper, table_name: str, pk_name: str) -> int: ...
def lookup_cast(self, lookup_type: str, internal_type: str | None = ...) -> str: ...
def max_in_list_size(self) -> int | None: ...
def max_name_length(self) -> int | None: ...
def no_limit_value(self) -> str | None: ...
def pk_default_value(self) -> str: ...
def prepare_sql_script(self, sql: Any) -> list[str]: ...
def process_clob(self, value: str) -> str: ...
def return_insert_columns(self, fields: Any) -> Any: ...
def compiler(self, compiler_name: str) -> type[SQLCompiler]: ...
def quote_name(self, name: str) -> str: ...
def regex_lookup(self, lookup_type: str) -> str: ...
def savepoint_create_sql(self, sid: str) -> str: ...
def savepoint_commit_sql(self, sid: str) -> str: ...
def savepoint_rollback_sql(self, sid: str) -> str: ...
def set_time_zone_sql(self) -> str: ...
def sql_flush(
self, style: Any, tables: Sequence[str], *, reset_sequences: bool = ..., allow_cascade: bool = ...
) -> list[str]: ...
def execute_sql_flush(self, sql_list: Iterable[str]) -> None: ...
def METHOD_NAME(self, style: Style | None, sequences: list[Any]) -> list[Any]: ...
def sequence_reset_sql(self, style: Style, model_list: Sequence[type[Model]]) -> list[Any]: ...
def start_transaction_sql(self) -> str: ...
def end_transaction_sql(self, success: bool = ...) -> str: ...
def tablespace_sql(self, tablespace: str | None, inline: bool = ...) -> str: ...
def prep_for_like_query(self, x: str) -> str: ...
prep_for_iexact_query: Any
def validate_autopk_value(self, value: int) -> int: ...
def adapt_unknown_value(self, value: Any) -> Any: ...
def adapt_datefield_value(self, value: date | None) -> str | None: ...
def adapt_datetimefield_value(self, value: real_datetime | None) -> str | None: ...
def adapt_timefield_value(self, value: real_datetime | time | None) -> str | None: ...
def adapt_decimalfield_value(
self, value: Decimal | None, max_digits: int | None = ..., decimal_places: int | None = ...
) -> str | None: ...
def adapt_ipaddressfield_value(self, value: str | None) -> str | None: ...
def adapt_json_value(self, value: Any, encoder: type[json.JSONEncoder] | None) -> str: ...
def adapt_integerfield_value(self, value: Any, internal_type: Any) -> Any: ...
def year_lookup_bounds_for_date_field(self, value: int, iso_year: bool = ...) -> list[str]: ...
def year_lookup_bounds_for_datetime_field(self, value: int, iso_year: bool = ...) -> list[str]: ...
def get_db_converters(self, expression: Expression) -> list[Any]: ...
def convert_durationfield_value(
self, value: float | None, expression: Expression, connection: BaseDatabaseWrapper
) -> timedelta | None: ...
def check_expression_support(self, expression: Any) -> None: ...
def conditional_expression_supported_in_where_clause(self, expression: Any) -> bool: ...
def combine_expression(self, connector: str, sub_expressions: list[str]) -> str: ...
def combine_duration_expression(self, connector: Any, sub_expressions: Any) -> str: ...
def binary_placeholder_sql(self, value: Case | None) -> str: ...
def modify_insert_params(self, placeholder: str, params: Any) -> Any: ...
def integer_field_range(self, internal_type: Any) -> tuple[int, int]: ...
def subtract_temporals(self, internal_type: Any, lhs: Any, rhs: Any) -> tuple[str, tuple[Any, ...]]: ...
def window_frame_start(self, start: Any) -> str: ...
def window_frame_end(self, end: Any) -> str: ...
def window_frame_rows_start_end(self, start: int | None = ..., end: int | None = ...) -> tuple[str, str]: ...
def window_frame_range_start_end(self, start: int | None = ..., end: int | None = ...) -> tuple[str, str]: ...
def explain_query_prefix(self, format: str | None = ..., **options: Any) -> str: ...
def insert_statement(self, on_conflict: OnConflict | None = ...) -> str: ...
def on_conflict_suffix_sql(
self, fields: Any, on_conflict: Any, update_fields: Any, unique_fields: Any
) -> str | Any: ...
def format_for_duration_arithmetic(self, sql: str) -> str: ... |
298,919 | test only owners can read restricted file | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
# Copyright (C) 2021 Northwestern University.
# Copyright (C) 2023 TU Wien.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Test some permissions on RDMRecordFilesResource.
Not every case is tested, but enough high-level ones for it to be useful.
"""
from io import BytesIO
import pytest
from tests.helpers import login_user, logout_user
def create_record_w_file(client, record, headers):
"""Create record with a file."""
# Create draft
record["files"] = {"enabled": True}
response = client.post("/records", json=record, headers=headers)
assert response.status_code == 201
recid = response.json["id"]
# Attach a file to it
response = client.post(
f"/records/{recid}/draft/files", headers=headers, json=[{"key": "test.pdf"}]
)
assert response.status_code == 201
response = client.put(
f"/records/{recid}/draft/files/test.pdf/content",
headers={
"content-type": "application/octet-stream",
"accept": "application/json",
},
data=BytesIO(b"testfile"),
)
assert response.status_code == 200
response = client.post(
f"/records/{recid}/draft/files/test.pdf/commit", headers=headers
)
assert response.status_code == 200
# Publish it
response = client.post(f"/records/{recid}/draft/actions/publish", headers=headers)
assert response.status_code == 202
return recid
@pytest.fixture(scope="function")
def record_w_restricted_file(client, headers, running_app, minimal_record, users):
# Login
login_user(client, users[0])
# NOTE: This covers all scenarios of file restriction.
# (e.g. restricted record has its files restricted too)
restricted_files_record = minimal_record
restricted_files_record["access"]["files"] = "restricted"
recid = create_record_w_file(client, restricted_files_record, headers)
# Logout
logout_user(client)
return recid
def test_only_owners_can_list_restricted_files(
client, headers, record_w_restricted_file, users
):
recid = record_w_restricted_file
url = f"/records/{recid}/files"
# Anonymous user can't list files
response = client.get(url, headers=headers)
assert response.status_code == 403
# Different user can't list files
login_user(client, users[1])
response = client.get(url, headers=headers)
assert response.status_code == 403
logout_user(client)
# Owner can list files
login_user(client, users[0])
response = client.get(url, headers=headers)
assert response.status_code == 200
def METHOD_NAME(
client, headers, record_w_restricted_file, users
):
recid = record_w_restricted_file
url = f"/records/{recid}/files/test.pdf"
# Anonymous user can't read file metadata
response = client.get(url, headers=headers)
assert response.status_code == 403
# Different user can't read file metadata
login_user(client, users[1])
response = client.get(url, headers=headers)
assert response.status_code == 403
logout_user(client)
# Owner can read file metadata
login_user(client, users[0])
response = client.get(url, headers=headers)
assert response.status_code == 200
def test_only_owners_can_download_restricted_file(
client, headers, record_w_restricted_file, users
):
recid = record_w_restricted_file
logout_user(client)
url = f"/records/{recid}/files/test.pdf/content"
# Anonymous user can't download file
response = client.get(url, headers=headers)
assert response.status_code == 403
# Different user can't download file
login_user(client, users[1])
response = client.get(url, headers=headers)
assert response.status_code == 403
logout_user(client)
# Owner can download file
login_user(client, users[0])
response = client.get(url, headers=headers)
assert response.status_code == 200
def test_record_files_cannot_be_deleted(
client, headers, record_w_restricted_file, users
):
recid = record_w_restricted_file
url = f"/records/{recid}/files/test.pdf"
# Anonymous user can't delete a file
response = client.delete(url, headers=headers)
assert response.status_code == 405
# Different user can't delete file
login_user(client, users[1])
response = client.delete(url, headers=headers)
assert response.status_code == 405
logout_user(client)
# Owner can't delete a file
login_user(client, users[0])
response = client.delete(url, headers=headers)
assert response.status_code == 405
def test_files_cannot_be_uploaded_to_records(
client, headers, record_w_restricted_file, users
):
recid = record_w_restricted_file
url = f"/records/{recid}/files"
# Anonymous user can't upload a file
response = client.post(url, headers=headers, json=[{"key": "test.pdf"}])
assert response.status_code == 405
# Different user can't upload a file
login_user(client, users[1])
response = client.post(url, headers=headers, json=[{"key": "test.pdf"}])
assert response.status_code == 405
logout_user(client)
# Owner can't upload a file
login_user(client, users[0])
response = client.post(url, headers=headers, json=[{"key": "test.pdf"}])
assert response.status_code == 405
def test_record_files_options_cannot_be_modified(
client, headers, record_w_restricted_file, users
):
recid = record_w_restricted_file
url = f"/records/{recid}/files"
# Anonymous user can't modify record file options
response = client.put(url, headers=headers, json=[{"default_preview": "test.pdf"}])
assert response.status_code == 405
# Different user can't modify record file options
login_user(client, users[1])
response = client.put(url, headers=headers, json=[{"default_preview": "test.pdf"}])
assert response.status_code == 405
logout_user(client)
# Owner can't modify record file options
login_user(client, users[0])
response = client.put(url, headers=headers, json=[{"default_preview": "test.pdf"}])
assert response.status_code == 405
def test_record_files_cannot_be_imported(
client, headers, record_w_restricted_file, users
):
recid = record_w_restricted_file
url = f"/records/{recid}/actions/files-import"
# Anonymous user can't import record files
response = client.post(url, headers=headers)
assert response.status_code == 404
# Different user can't modify record file options
login_user(client, users[1])
response = response = client.post(url, headers=headers)
assert response.status_code == 404
logout_user(client)
# Owner can't modify record file options
login_user(client, users[0])
response = response = client.post(url, headers=headers)
assert response.status_code == 404
def test_everybody_can_download_public_files(client, headers, minimal_record, users):
# Create minimal record with public files
login_user(client, users[0])
recid = create_record_w_file(client, minimal_record, headers)
logout_user(client)
# Unauthenticated user can list files
url = f"/records/{recid}/files"
response = client.get(url, headers=headers)
assert response.status_code == 200
assert response.json["entries"]
# Check if we can download every file
for file_entry in response.json["entries"]:
file_key = file_entry["key"]
resp = client.get(f"/records/{recid}/files/{file_key}/content", headers=headers)
assert resp.status_code == 200 |
298,920 | check sh | #!/usr/bin/env python3
"""
A script to verify that the environment is compliant with E3SM's software requirements.
Be sure to source your env_mach_specific file before running this check.
"""
from standard_script_setup import *
from CIME.utils import run_cmd
import sys, os, argparse
# Here's where we keep the various reports and instructions.
LOG = []
###############################################################################
def parse_command_line(args, description):
###############################################################################
parser = argparse.ArgumentParser(
usage="""\n{0} [--verbose]
OR
{0} --help
""".format(
os.path.basename(args[0])
),
description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
CIME.utils.setup_standard_logging_options(parser)
args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser)
###############################################################################
def METHOD_NAME():
###############################################################################
stat = run_cmd("sh --version")[0]
if stat != 0:
LOG.append("* sh appears not to be available in your environment.")
LOG.append(" Please make sure it exists in your PATH.")
###############################################################################
def check_csh(): # Can't believe I'm actually checking for csh. -JNJ
###############################################################################
stat = run_cmd("csh --version")[0]
if stat != 0: # Also tolerates tcsh
LOG.append("* csh appears not to be available in your environment.")
LOG.append(" Please make sure it exists in your PATH.")
###############################################################################
def check_perl_module(module_name):
###############################################################################
stat = run_cmd('perl -e "require {};"'.format(module_name)[0])
if stat != 0:
LOG.append(
"* E3SM requires the Perl module {}, but it is not available.".format(
module_name
)
)
LOG.append(" Please make sure that it exists in your @INC.")
###############################################################################
def check_perl():
###############################################################################
# First, make sure we have the right version of Perl.
e3sm_perl_major_version = 5
e3sm_perl_minor_version = 16
stat, output, _ = run_cmd("perl -e 'print $^V;'")
if stat != 0:
LOG.append("* Perl appears not to be available in your environment.")
LOG.append(" Please make sure it exists in your PATH.")
return
output = output[1:] # get rid of leading 'v' character
major_version, minor_version, _ = [int(item) for item in output.split(".")]
if (
major_version != e3sm_perl_major_version
or minor_version < e3sm_perl_minor_version
):
LOG.append(
"* E3SM requires Perl version {:d}.{:d}+. You appear to be using {:d}.{:d}.".format(
e3sm_perl_major_version,
e3sm_perl_minor_version,
major_version,
minor_version,
)
)
LOG.append(
" Please check to see whether an appropriate version exists on this machine,"
)
LOG.append(" possibly via a loadable module.")
# Okay, our version is good. What about all those pesky modules?
check_perl_module("XML::LibXML")
check_perl_module("XML::SAX")
check_perl_module("XML::SAX::Exception")
check_perl_module("Switch")
###############################################################################
def check_git():
###############################################################################
e3sm_git_major_version = 2
e3sm_git_minor_version = 0
stat, output, _ = run_cmd("git --version")
if stat != 0:
LOG.append("* Git appears not to be available in your environment.")
LOG.append(" Please make sure it exists in your PATH.")
return
version = output.split()[-1]
num_dots = version.count(".")
if num_dots == 1:
major_version, minor_version = [int(s) for s in version.split(".")]
elif num_dots == 2:
major_version, minor_version, _ = [int(s) for s in version.split(".")]
else:
LOG.append('* Unparseable git version string: "{}"'.format(output))
return
if (
major_version != e3sm_git_major_version
or minor_version < e3sm_git_minor_version
):
LOG.append(
"* E3SM requires Git version {:d}.{:d}+. You appear to be using version {:d}.{:d}.".format(
e3sm_git_major_version,
e3sm_git_minor_version,
major_version,
minor_version,
)
)
###############################################################################
def check_svn():
###############################################################################
e3sm_svn_major_version = 1
e3sm_svn_minor_version = 4
e3sm_svn_patch_version = 2
stat, output, _ = run_cmd("svn --version --quiet")
if stat != 0:
LOG.append("* Subversion appears not to be available in your environment.")
LOG.append(" Please make sure it exists in your PATH.")
return
major_version, minor_version, patch_version = [int(s) for s in output.split(".")]
if (
major_version < e3sm_svn_major_version
or minor_version < e3sm_svn_minor_version
or patch_version < e3sm_svn_patch_version
):
LOG.append(
"* E3SM requires Subversion version {:d}.{:d}.{:d}+. You appear to be using version {:d}.{:d}.{:d}.".format(
e3sm_svn_major_version,
e3sm_svn_minor_version,
e3sm_svn_patch_version,
major_version,
minor_version,
patch_version,
)
)
###############################################################################
def _main_func(description):
###############################################################################
parse_command_line(sys.argv, description)
METHOD_NAME()
check_csh()
check_perl()
check_git()
check_svn()
if len(LOG) > 0:
print("e3sm_check_env found problems with your E3SM development environment:\n")
for line in LOG:
print(line)
sys.exit(1)
else:
print(
"e3sm_check_env found no problems with your E3SM development environment."
)
sys.exit(0)
###############################################################################
if __name__ == "__main__":
_main_func(__doc__) |
298,921 | test tracking montblanc | #
# DeepLabCut Toolbox (deeplabcut.org)
# © A. & M.W. Mathis Labs
# https://github.com/DeepLabCut/DeepLabCut
#
# Please see AUTHORS for contributors.
# https://github.com/DeepLabCut/DeepLabCut/blob/master/AUTHORS
#
# Licensed under GNU Lesser General Public License v3.0
#
import numpy as np
import pytest
from deeplabcut.pose_estimation_tensorflow.lib import trackingutils
@pytest.fixture()
def ellipse():
params = {"x": 0, "y": 0, "width": 2, "height": 4, "theta": np.pi / 2}
return trackingutils.Ellipse(**params)
def test_ellipse(ellipse):
assert ellipse.aspect_ratio == 2
np.testing.assert_equal(
ellipse.contains_points(np.asarray([[0, 0], [10, 10]])), [True, False]
)
def test_ellipse_similarity(ellipse):
assert ellipse.calc_similarity_with(ellipse) == 1
def test_ellipse_fitter():
fitter = trackingutils.EllipseFitter()
assert fitter.fit(np.random.rand(2, 2)) is None
xy = np.asarray([[-2, 0], [2, 0], [0, 1], [0, -1]], dtype=float)
assert fitter.fit(xy) is not None
fitter.sd = 0
el = fitter.fit(xy)
assert np.isclose(el.parameters, [0, 0, 4, 2, 0]).all()
def test_ellipse_tracker(ellipse):
tracker1 = trackingutils.EllipseTracker(ellipse.parameters)
tracker2 = trackingutils.EllipseTracker(ellipse.parameters)
assert tracker1.id != tracker2.id
tracker1.update(ellipse.parameters)
assert tracker1.hit_streak == 1
state = tracker1.predict()
np.testing.assert_equal(ellipse.parameters, state)
_ = tracker1.predict()
assert tracker1.hit_streak == 0
def test_sort_ellipse():
tracklets = dict()
mot_tracker = trackingutils.SORTEllipse(1, 1, 0.6)
poses = np.random.rand(2, 10, 3)
trackers = mot_tracker.track(poses[..., :2])
assert trackers.shape == (2, 7)
trackingutils.fill_tracklets(tracklets, trackers, poses, imname=0)
assert all(id_ in tracklets for id_ in trackers[:, -2])
assert all(np.array_equal(tracklets[n][0], pose) for n, pose in enumerate(poses))
def test_tracking_ellipse(real_assemblies, real_tracklets):
tracklets_ref = real_tracklets.copy()
_ = tracklets_ref.pop("header", None)
tracklets = dict()
mot_tracker = trackingutils.SORTEllipse(1, 1, 0.6)
for ind, assemblies in real_assemblies.items():
animals = np.stack([ass.data for ass in assemblies])
trackers = mot_tracker.track(animals[..., :2])
trackingutils.fill_tracklets(tracklets, trackers, animals, ind)
assert len(tracklets) == len(tracklets_ref)
assert [len(tracklet) for tracklet in tracklets.values()] == [
len(tracklet) for tracklet in tracklets_ref.values()
]
assert all(
t.shape[1] == 4 for tracklet in tracklets.values() for t in tracklet.values()
)
def test_box_tracker():
bbox = 0, 0, 100, 100
tracker1 = trackingutils.BoxTracker(bbox)
tracker2 = trackingutils.BoxTracker(bbox)
assert tracker1.id != tracker2.id
tracker1.update(bbox)
assert tracker1.hit_streak == 1
state = tracker1.predict()
np.testing.assert_equal(bbox, state)
_ = tracker1.predict()
assert tracker1.hit_streak == 0
def test_tracking_box(real_assemblies, real_tracklets):
tracklets_ref = real_tracklets.copy()
_ = tracklets_ref.pop("header", None)
tracklets = dict()
mot_tracker = trackingutils.SORTBox(1, 1, 0.1)
for ind, assemblies in real_assemblies.items():
animals = np.stack([ass.data for ass in assemblies])
bboxes = trackingutils.calc_bboxes_from_keypoints(animals)
trackers = mot_tracker.track(bboxes)
trackingutils.fill_tracklets(tracklets, trackers, animals, ind)
assert len(tracklets) == len(tracklets_ref)
assert [len(tracklet) for tracklet in tracklets.values()] == [
len(tracklet) for tracklet in tracklets_ref.values()
]
assert all(
t.shape[1] == 4 for tracklet in tracklets.values() for t in tracklet.values()
)
def METHOD_NAME(
real_assemblies_montblanc,
real_tracklets_montblanc,
):
tracklets_ref = real_tracklets_montblanc.copy()
_ = tracklets_ref.pop("header", None)
tracklets = dict()
tracklets["single"] = real_assemblies_montblanc[1]
mot_tracker = trackingutils.SORTEllipse(1, 1, 0.6)
for ind, assemblies in real_assemblies_montblanc[0].items():
animals = np.stack([ass.data for ass in assemblies])
trackers = mot_tracker.track(animals[..., :2])
trackingutils.fill_tracklets(tracklets, trackers, animals, ind)
assert len(tracklets) == len(tracklets_ref)
assert [len(tracklet) for tracklet in tracklets.values()] == [
len(tracklet) for tracklet in tracklets_ref.values()
]
for k, assemblies in tracklets.items():
ref = tracklets_ref[k]
for ind, data in assemblies.items():
frame = f"frame{str(ind).zfill(3)}" if k != "single" else ind
np.testing.assert_equal(data, ref[frame])
def test_calc_bboxes_from_keypoints():
# Test bounding box from a single keypoint
xy = np.asarray([[[0, 0, 1]]])
np.testing.assert_equal(
trackingutils.calc_bboxes_from_keypoints(xy, 10), [[-10, -10, 10, 10, 1]]
)
np.testing.assert_equal(
trackingutils.calc_bboxes_from_keypoints(xy, 20, 10), [[-10, -20, 30, 20, 1]]
)
width = 200
height = width * 2
xyp = np.zeros((1, 2, 3))
xyp[:, 1, :2] = width, height
xyp[:, 1, 2] = 1
with pytest.raises(ValueError):
_ = trackingutils.calc_bboxes_from_keypoints(xyp[..., :2])
bboxes = trackingutils.calc_bboxes_from_keypoints(xyp)
np.testing.assert_equal(bboxes, [[0, 0, width, height, 0.5]])
slack = 20
bboxes = trackingutils.calc_bboxes_from_keypoints(xyp, slack=slack)
np.testing.assert_equal(
bboxes, [[-slack, -slack, width + slack, height + slack, 0.5]]
)
offset = 50
bboxes = trackingutils.calc_bboxes_from_keypoints(xyp, offset=offset)
np.testing.assert_equal(bboxes, [[offset, 0, width + offset, height, 0.5]]) |
298,922 | build schema on 200 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network perimeter link show",
)
class Show(AAZCommand):
"""Get the specified NSP link resource.
:example: Get NSP link
az network perimeter link list --perimeter-name nsp1 --resource-group rg1
"""
_aaz_info = {
"version": "2021-02-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/networksecurityperimeters/{}/links/{}", "2021-02-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.link_name = AAZStrArg(
options=["-n", "--name", "--link-name"],
help="The name of the NSP link.",
required=True,
id_part="child_name_1",
)
_args_schema.perimeter_name = AAZStrArg(
options=["--perimeter-name"],
help="The name of the network security perimeter.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.NspLinksGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class NspLinksGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityPerimeters/{networkSecurityPerimeterName}/links/{linkName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"linkName", self.ctx.args.link_name,
required=True,
),
**self.serialize_url_param(
"networkSecurityPerimeterName", self.ctx.args.perimeter_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-02-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self.METHOD_NAME
)
_schema_on_200 = None
@classmethod
def METHOD_NAME(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.auto_approved_remote_perimeter_resource_id = AAZStrType(
serialized_name="autoApprovedRemotePerimeterResourceId",
)
properties.description = AAZStrType()
properties.local_inbound_profiles = AAZListType(
serialized_name="localInboundProfiles",
)
properties.local_outbound_profiles = AAZListType(
serialized_name="localOutboundProfiles",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.remote_inbound_profiles = AAZListType(
serialized_name="remoteInboundProfiles",
)
properties.remote_outbound_profiles = AAZListType(
serialized_name="remoteOutboundProfiles",
flags={"read_only": True},
)
properties.remote_perimeter_guid = AAZStrType(
serialized_name="remotePerimeterGuid",
flags={"read_only": True},
)
properties.remote_perimeter_location = AAZStrType(
serialized_name="remotePerimeterLocation",
flags={"read_only": True},
)
properties.status = AAZStrType(
flags={"read_only": True},
)
local_inbound_profiles = cls._schema_on_200.properties.local_inbound_profiles
local_inbound_profiles.Element = AAZStrType()
local_outbound_profiles = cls._schema_on_200.properties.local_outbound_profiles
local_outbound_profiles.Element = AAZStrType()
remote_inbound_profiles = cls._schema_on_200.properties.remote_inbound_profiles
remote_inbound_profiles.Element = AAZStrType()
remote_outbound_profiles = cls._schema_on_200.properties.remote_outbound_profiles
remote_outbound_profiles.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
298,923 | create attention images summary | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Attention-based sequence-to-sequence model with dynamic RNN support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from . import model
from . import model_helper
__all__ = ["AttentionModel"]
class AttentionModel(model.Model):
"""Sequence-to-sequence dynamic model with attention.
This class implements a multi-layer recurrent neural network as encoder,
and an attention-based decoder. This is the same as the model described in
(Luong et al., EMNLP'2015) paper: https://arxiv.org/pdf/1508.04025v5.pdf.
This class also allows to use GRU cells in addition to LSTM cells with
support for dropout.
"""
def __init__(self,
hparams,
mode,
iterator,
source_vocab_table,
target_vocab_table,
reverse_target_vocab_table=None,
scope=None,
extra_args=None):
self.has_attention = hparams.attention_architecture and hparams.attention
# Set attention_mechanism_fn
if self.has_attention:
if extra_args and extra_args.attention_mechanism_fn:
self.attention_mechanism_fn = extra_args.attention_mechanism_fn
else:
self.attention_mechanism_fn = create_attention_mechanism
super(AttentionModel, self).__init__(
hparams=hparams,
mode=mode,
iterator=iterator,
source_vocab_table=source_vocab_table,
target_vocab_table=target_vocab_table,
reverse_target_vocab_table=reverse_target_vocab_table,
scope=scope,
extra_args=extra_args)
def _prepare_beam_search_decoder_inputs(
self, beam_width, memory, source_sequence_length, encoder_state):
memory = tf.contrib.seq2seq.tile_batch(
memory, multiplier=beam_width)
source_sequence_length = tf.contrib.seq2seq.tile_batch(
source_sequence_length, multiplier=beam_width)
encoder_state = tf.contrib.seq2seq.tile_batch(
encoder_state, multiplier=beam_width)
batch_size = self.batch_size * beam_width
return memory, source_sequence_length, encoder_state, batch_size
def _build_decoder_cell(self, hparams, encoder_outputs, encoder_state,
source_sequence_length):
"""Build a RNN cell with attention mechanism that can be used by decoder."""
# No Attention
if not self.has_attention:
return super(AttentionModel, self)._build_decoder_cell(
hparams, encoder_outputs, encoder_state, source_sequence_length)
elif hparams.attention_architecture != "standard":
raise ValueError(
"Unknown attention architecture %s" % hparams.attention_architecture)
num_units = hparams.num_units
num_layers = self.num_decoder_layers
num_residual_layers = self.num_decoder_residual_layers
infer_mode = hparams.infer_mode
dtype = tf.float32
# Ensure memory is batch-major
if self.time_major:
memory = tf.transpose(encoder_outputs, [1, 0, 2])
else:
memory = encoder_outputs
if (self.mode == tf.contrib.learn.ModeKeys.INFER and
infer_mode == "beam_search"):
memory, source_sequence_length, encoder_state, batch_size = (
self._prepare_beam_search_decoder_inputs(
hparams.beam_width, memory, source_sequence_length,
encoder_state))
else:
batch_size = self.batch_size
# Attention
attention_mechanism = self.attention_mechanism_fn(
hparams.attention, num_units, memory, source_sequence_length, self.mode)
cell = model_helper.create_rnn_cell(
unit_type=hparams.unit_type,
num_units=num_units,
num_layers=num_layers,
num_residual_layers=num_residual_layers,
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
num_gpus=self.num_gpus,
mode=self.mode,
single_cell_fn=self.single_cell_fn)
# Only generate alignment in greedy INFER mode.
alignment_history = (self.mode == tf.contrib.learn.ModeKeys.INFER and
infer_mode != "beam_search")
cell = tf.contrib.seq2seq.AttentionWrapper(
cell,
attention_mechanism,
attention_layer_size=num_units,
alignment_history=alignment_history,
output_attention=hparams.output_attention,
name="attention")
# TODO(thangluong): do we need num_layers, num_gpus?
cell = tf.contrib.rnn.DeviceWrapper(cell,
model_helper.get_device_str(
num_layers - 1, self.num_gpus))
if hparams.pass_hidden_state:
decoder_initial_state = cell.zero_state(batch_size, dtype).clone(
cell_state=encoder_state)
else:
decoder_initial_state = cell.zero_state(batch_size, dtype)
return cell, decoder_initial_state
def _get_infer_summary(self, hparams):
if not self.has_attention or hparams.infer_mode == "beam_search":
return tf.no_op()
return METHOD_NAME(self.final_context_state)
def create_attention_mechanism(attention_option, num_units, memory,
source_sequence_length, mode):
"""Create attention mechanism based on the attention_option."""
del mode # unused
# Mechanism
if attention_option == "luong":
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units, memory, memory_sequence_length=source_sequence_length)
elif attention_option == "scaled_luong":
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units,
memory,
memory_sequence_length=source_sequence_length,
scale=True)
elif attention_option == "bahdanau":
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units, memory, memory_sequence_length=source_sequence_length)
elif attention_option == "normed_bahdanau":
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units,
memory,
memory_sequence_length=source_sequence_length,
normalize=True)
else:
raise ValueError("Unknown attention option %s" % attention_option)
return attention_mechanism
def METHOD_NAME(final_context_state):
"""create attention image and attention summary."""
attention_images = (final_context_state.alignment_history.stack())
# Reshape to (batch, src_seq_len, tgt_seq_len,1)
attention_images = tf.expand_dims(
tf.transpose(attention_images, [1, 2, 0]), -1)
# Scale to range [0, 255]
attention_images *= 255
attention_summary = tf.summary.image("attention_images", attention_images)
return attention_summary |
298,924 | test fingerprint | # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from textwrap import dedent
import pytest
from pants.testutil.pants_integration_test import run_pants, setup_tmpdir
def test_system_binary_and_adhoc_tool() -> None:
sources = {
"src/test_file.txt": dedent(
"""\
I am a duck.
"""
),
"src/BUILD": dedent(
"""\
files(name="files", sources=["*.txt",])
system_binary(
name="cat",
binary_name="cat",
)
adhoc_tool(
name="adhoc",
runnable=":cat",
execution_dependencies=[":files",],
args=["test_file.txt",],
log_output=True,
stdout="stdout",
)
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=['pants.backend.experimental.adhoc',]",
f"--source-root-patterns=['{tmpdir}/src']",
"export-codegen",
f"{tmpdir}/src:adhoc",
]
result = run_pants(args)
assert "[INFO] I am a duck." in result.stderr.strip()
@pytest.mark.parametrize(
("fingerprint,passes"),
(
(r"Binary Name v6\.32\.1", True),
(r"(.*)v6\.(.*)", True),
(r"Binary Name v6\.99999\.1", False),
),
)
def METHOD_NAME(fingerprint: str, passes: bool) -> None:
sources = {
"src/BUILD": dedent(
f"""\
system_binary(
name="bash",
binary_name="bash",
fingerprint=r"{fingerprint}",
fingerprint_args=("-c", "echo Binary Name v6.32.1",),
)
adhoc_tool(
name="adhoc",
runnable=":bash",
args=["-c","echo I am a duck!"],
log_output=True,
stdout="stdout",
)
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=['pants.backend.experimental.adhoc',]",
f"--source-root-patterns=['{tmpdir}/src']",
"export-codegen",
f"{tmpdir}/src:adhoc",
]
result = run_pants(args)
if passes:
assert result.exit_code == 0
assert "[INFO] I am a duck!" in result.stderr.strip()
else:
assert result.exit_code != 0
assert "Could not find a binary with name `bash`" in result.stderr.strip()
def test_runnable_dependencies() -> None:
sources = {
"src/BUILD": dedent(
"""\
system_binary(
name="bash",
binary_name="bash",
)
system_binary(
name="awk",
binary_name="awk",
fingerprint_args=["--version"],
fingerprint=".*",
)
adhoc_tool(
name="adhoc",
runnable=":bash",
runnable_dependencies=[":awk",],
args=["-c", "awk 'BEGIN {{ print \\"I am a duck.\\" }}'"],
log_output=True,
stdout="stdout",
)
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=['pants.backend.experimental.adhoc',]",
f"--source-root-patterns=['{tmpdir}/src']",
"export-codegen",
f"{tmpdir}/src:adhoc",
]
result = run_pants(args)
assert "[INFO] I am a duck." in result.stderr.strip()
def test_external_env_vars() -> None:
sources = {
"src/BUILD": dedent(
"""\
system_binary(
name="bash",
binary_name="bash",
)
adhoc_tool(
name="adhoc",
runnable=":bash",
args=["-c", "echo $ENVVAR"],
log_output=True,
stdout="stdout",
extra_env_vars=["ENVVAR"],
)
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=['pants.backend.experimental.adhoc',]",
f"--source-root-patterns=['{tmpdir}/src']",
"export-codegen",
f"{tmpdir}/src:adhoc",
]
extra_env = {"ENVVAR": "clang"}
result = run_pants(args, extra_env=extra_env)
assert "[INFO] clang" in result.stderr.strip() |
298,925 | row | ######################################################################################################################
# Copyright (C) 2017-2022 Spine project consortium
# This file is part of Spine Toolbox.
# Spine Toolbox is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option)
# any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
# Public License for more details. You should have received a copy of the GNU Lesser General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
######################################################################################################################
"""
Project Tree items.
"""
import logging
import bisect
from PySide6.QtCore import Qt
from spinetoolbox.metaobject import MetaObject
class BaseProjectTreeItem(MetaObject):
"""Base class for all project tree items."""
def __init__(self, name, description):
"""
Args:
name (str): Object name
description (str): Object description
"""
super().__init__(name, description)
self._parent = None # Parent BaseProjectTreeItem. Set when add_child is called
self._children = list() # Child BaseProjectTreeItems. Appended when new items are inserted into model.
def flags(self): # pylint: disable=no-self-use
"""Returns the item flags."""
return Qt.NoItemFlags
def parent(self):
"""Returns parent project tree item."""
return self._parent
def child_count(self):
"""Returns the number of child project tree items."""
return len(self._children)
def children(self):
"""Returns the children of this project tree item."""
return self._children
def child(self, METHOD_NAME):
"""Returns child BaseProjectTreeItem on given row.
Args:
row (int): Row of child to return
Returns:
BaseProjectTreeItem: item on given row or None if it does not exist
"""
try:
item = self._children[METHOD_NAME]
except IndexError:
logging.error("[%s] has no child on row %s", self.name, METHOD_NAME)
return None
return item
def METHOD_NAME(self):
"""Returns the row on which this item is located."""
if self._parent is not None:
r = self._parent.children().index(self)
# logging.debug("{0} is on row:{1}".format(self.name, r))
return r
return 0
def add_child(self, child_item):
"""Base method that shall be overridden in subclasses."""
raise NotImplementedError()
def remove_child(self, METHOD_NAME):
"""Remove the child of this BaseProjectTreeItem from given row. Do not call this method directly.
This method is called by ProjectItemTreeModel when items are removed.
Args:
row (int): Row of child to remove
Returns:
bool: True if operation succeeded, False otherwise
"""
if METHOD_NAME < 0 or METHOD_NAME > len(self._children):
return False
child = self._children.pop(METHOD_NAME)
child._parent = None
return True
def custom_context_menu(self, toolbox):
"""Returns the context menu for this item. Implement in subclasses as needed.
Args:
toolbox (QWidget): The widget that is controlling the menu
Returns:
QMenu: context menu
"""
raise NotImplementedError()
class RootProjectTreeItem(BaseProjectTreeItem):
"""Class for the root project tree item."""
def __init__(self):
super().__init__("root", "The Root Project Tree Item.")
def add_child(self, child_item):
"""Adds given category item as the child of this root project tree item. New item is added as the last item.
Args:
child_item (CategoryProjectTreeItem): Item to add
Returns:
True for success, False otherwise
"""
if isinstance(child_item, CategoryProjectTreeItem):
self._children.append(child_item)
child_item._parent = self
return True
logging.error("You can only add a category item as a child of the root item")
return False
def custom_context_menu(self, toolbox):
"""See base class."""
raise NotImplementedError()
class CategoryProjectTreeItem(BaseProjectTreeItem):
"""Class for category project tree items."""
def flags(self):
"""Returns the item flags."""
return Qt.ItemIsEnabled
def add_child(self, child_item):
"""Adds given project tree item as the child of this category item. New item is added as the last item.
Args:
child_item (LeafProjectTreeTreeItem): Item to add
Returns:
True for success, False otherwise
"""
if not isinstance(child_item, LeafProjectTreeItem):
logging.error("You can only add a leaf item as a child of a category item")
return False
key = lambda x: x.name.lower()
pos = bisect.bisect_left([key(x) for x in self._children], key(child_item))
self._children.insert(pos, child_item)
child_item._parent = self
return True
def custom_context_menu(self, toolbox):
"""Returns the context menu for this item.
Args:
toolbox (ToolboxUI): Toolbox main window
Returns:
QMenu: context menu
"""
return toolbox.item_category_context_menu()
class LeafProjectTreeItem(BaseProjectTreeItem):
"""Class for leaf items in the project item tree."""
def __init__(self, project_item):
"""
Args:
project_item (ProjectItem): the real project item this item represents
"""
super().__init__(project_item.name, project_item.description)
self._project_item = project_item
@property
def project_item(self):
"""the project item linked to this leaf"""
return self._project_item
def add_child(self, child_item):
"""See base class."""
raise NotImplementedError()
def flags(self):
"""Returns the item flags."""
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
def custom_context_menu(self, toolbox):
"""Returns the context menu for this item.
Args:
toolbox (ToolboxUI): Toolbox main window
Returns:
QMenu: context menu
"""
return toolbox.project_item_context_menu(self._project_item.actions()) |
298,926 | resource apply dense | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adagrad optimizer for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import training_ops
class AdagradOptimizer(optimizer_v2.OptimizerV2):
"""Optimizer that implements the Adagrad algorithm.
See this [paper](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
or this
[intro](https://ppasupat.github.io/a9online/uploads/proximal_notes.pdf).
"""
def __init__(self, learning_rate, initial_accumulator_value=0.1,
use_locking=False, name="Adagrad"):
"""Construct a new Adagrad optimizer.
The learning_rate arg below is a hyperparameter, where a hyperparameter is
defined as a scalar Tensor, a regular Python value or a callable (which
will be evaluated when `apply_gradients` is called) returning a scalar
Tensor or a Python value.
Args:
learning_rate: A float hyperparameter. The learning rate.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adagrad".
Raises:
ValueError: If the `initial_accumulator_value` is invalid.
"""
if initial_accumulator_value <= 0.0:
raise ValueError("initial_accumulator_value must be positive: %s" %
initial_accumulator_value)
super(AdagradOptimizer, self).__init__(use_locking, name)
self._set_hyper("learning_rate", learning_rate)
self._initial_accumulator_value = initial_accumulator_value
def _create_vars(self, var_list, state):
for v in var_list:
dtype = v.dtype.base_dtype
if v.get_shape().is_fully_defined():
init = init_ops.constant_initializer(
self._initial_accumulator_value, dtype=dtype)
else:
def init(v=v, dtype=dtype):
# Use a Tensor instead of initializer if variable does not have
# static shape.
init_constant = gen_array_ops.fill(
array_ops.shape(v), self._initial_accumulator_value)
return math_ops.cast(init_constant, dtype)
state.create_slot_with_initializer(v, init, v.get_shape(), dtype,
"accumulator")
def _apply_dense(self, grad, var, state):
acc = state.get_slot(var, "accumulator")
return training_ops.apply_adagrad(
var,
acc,
state.get_hyper("learning_rate", var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def METHOD_NAME(self, grad, var, state):
acc = state.get_slot(var, "accumulator")
return training_ops.resource_apply_adagrad(
var.handle,
acc.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var, state):
acc = state.get_slot(var, "accumulator")
return training_ops.sparse_apply_adagrad(
var,
acc,
state.get_hyper("learning_rate", var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, state):
acc = state.get_slot(var, "accumulator")
return training_ops.resource_sparse_apply_adagrad(
var.handle,
acc.handle,
state.get_hyper("learning_rate", var.dtype.base_dtype),
grad,
indices,
use_locking=self._use_locking) |
298,927 | finish | # SPDX-License-Identifier: MIT
# Copyright (c) 2020 The Pybricks Authors
"""
:class:`RFCOMMServer` can be used to communicate with other Bluetooth RFCOMM
devices that don't support the EV3 mailbox protocol.
It is based on the standard library ``socketserver`` module and attempts to
remain a strict subset of that implementation when it comes to low-level
implementation details.
"""
from _thread import start_new_thread
from uctypes import addressof, sizeof, struct, ARRAY, UINT8, UINT16
from usocket import socket, SOCK_STREAM
from bluetooth_c import resolve
# stuff from bluetooth/bluetooth.h
AF_BLUETOOTH = 31
BTPROTO_RFCOMM = 3
BDADDR_ANY = "00:00:00:00:00:00"
sa_family_t = UINT16
bd_addr_t = {"b": (ARRAY | 0, UINT8 | 6)}
sockaddr_rc = {
"rc_family": sa_family_t | 0,
"rc_bdaddr": (2, bd_addr_t),
"rc_channel": UINT8 | 8,
}
def str2ba(string, ba):
"""Convert string to Bluetooth address"""
for i, v in enumerate(string.split(":")):
ba.b[5 - i] = int(v, 16)
def ba2str(ba):
"""Convert Bluetooth address to string"""
string = []
for b in ba.b:
string.append("{:02X}".format(b))
string.reverse()
return ":".join(string).upper()
class RFCOMMServer:
"""Object that simplifies setting up an RFCOMM socket server.
This is based on the ``socketserver.SocketServer`` class in the Python
standard library.
"""
request_queue_size = 1
def __init__(self, server_address, RequestHandlerClass):
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.socket = socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
try:
addr_data = bytearray(sizeof(sockaddr_rc))
addr = struct(addressof(addr_data), sockaddr_rc)
addr.rc_family = AF_BLUETOOTH
str2ba(server_address[0], addr.rc_bdaddr)
addr.rc_channel = server_address[1]
self.socket.bind(addr_data)
# self.server_address = self.socket.getsockname()
self.socket.listen(self.request_queue_size)
except:
self.server_close()
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.server_close()
def handle_request(self):
try:
request, addr_data = self.socket.accept()
except OSError:
return
try:
addr = struct(addressof(addr_data), sockaddr_rc)
client_address = (ba2str(addr.rc_bdaddr), addr.rc_channel)
self.process_request(request, client_address)
except:
request.close()
raise
def process_request(self, request, client_address):
self.finish_request(request, client_address)
request.close()
def finish_request(self, request, client_address):
self.RequestHandlerClass(request, client_address, self)
def server_close(self):
self.socket.close()
class ThreadingMixIn:
def process_request_thread(self, request, client_address):
try:
self.finish_request(request, client_address)
finally:
request.close()
def process_request(self, request, client_address):
start_new_thread(self.process_request_thread, (request, client_address))
class ThreadingRFCOMMServer(ThreadingMixIn, RFCOMMServer):
"""Version of :class:`RFCOMMServer` that handles connections in a new
thread.
"""
pass
class StreamRequestHandler:
"""Class that handles incoming requests.
This is based on ``socketserver.StreamRequestHandler`` from the Python
standard library.
"""
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
self.setup()
try:
self.handle()
finally:
self.METHOD_NAME()
def setup(self):
self.wfile = self.request
self.rfile = self.request
def handle(self):
pass
def METHOD_NAME(self):
pass
class RFCOMMClient:
def __init__(self, client_address, RequestHandlerClass):
self.client_address = client_address
self.RequestHandlerClass = RequestHandlerClass
self.socket = socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
def handle_request(self):
addr_data = bytearray(sizeof(sockaddr_rc))
addr = struct(addressof(addr_data), sockaddr_rc)
addr.rc_family = AF_BLUETOOTH
str2ba(self.client_address[0], addr.rc_bdaddr)
addr.rc_channel = self.client_address[1]
self.socket.connect(addr_data)
try:
self.process_request(self.socket, self.client_address)
except:
self.socket.close()
raise
def process_request(self, request, client_address):
self.finish_request(request, client_address)
request.close()
def finish_request(self, request, client_address):
self.RequestHandlerClass(request, client_address, self)
def client_close(self):
self.socket.close()
class ThreadingRFCOMMClient(ThreadingMixIn, RFCOMMClient):
pass |
298,928 | infer forward | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import math
import net
class DygraphModel():
# define model
def create_model(self, config):
vocab_text_size = config.get("hyper_parameters.vocab_text_size")
vocab_tag_size = config.get("hyper_parameters.vocab_tag_size")
emb_dim = config.get("hyper_parameters.emb_dim")
hid_dim = config.get("hyper_parameters.hid_dim")
win_size = config.get("hyper_parameters.win_size")
margin = config.get("hyper_parameters.margin")
neg_size = config.get("hyper_parameters.neg_size")
text_len = config.get("hyper_parameters.text_len")
tagspace_model = net.TagspaceLayer(vocab_text_size, vocab_tag_size,
emb_dim, hid_dim, win_size, margin,
neg_size, text_len)
return tagspace_model
# define feeds which convert numpy of batch data to paddle.tensor
def create_feeds(self, batch_data, text_len, neg_size):
text = paddle.to_tensor(batch_data[0].numpy().astype('int64').reshape(
-1, text_len))
pos_tag = paddle.to_tensor(batch_data[1].numpy().astype('int64')
.reshape(-1, 1))
neg_tag = paddle.to_tensor(batch_data[2].numpy().astype('int64')
.reshape(-1, neg_size))
return [text, pos_tag, neg_tag]
# define loss function by predicts and label
def create_loss(self, batch_size, margin, cos_pos, cos_neg):
loss_part1 = paddle.subtract(
paddle.full(
shape=[batch_size, 1], fill_value=margin, dtype='float32'),
cos_pos)
loss_part2 = paddle.add(loss_part1, cos_neg)
loss_part3 = paddle.maximum(
paddle.full(
shape=[batch_size, 1], fill_value=0.0, dtype='float32'),
loss_part2)
avg_cost = paddle.mean(loss_part3)
return avg_cost
# define optimizer
def create_optimizer(self, dy_model, config):
lr = config.get("hyper_parameters.optimizer.learning_rate", 0.001)
optimizer = paddle.optimizer.Adagrad(
learning_rate=lr, parameters=dy_model.parameters())
return optimizer
# define metrics such as auc/acc
# multi-task need to define multi metric
def get_acc(self, x, y, batch_size):
less = paddle.cast(paddle.less_than(x, y), dtype='float32')
label_ones = paddle.full(
dtype='float32', shape=[batch_size, 1], fill_value=1.0)
correct = paddle.sum(less)
total = paddle.sum(label_ones)
acc = paddle.divide(correct, total)
return acc
def create_metrics(self):
metrics_list_name = []
metrics_list = []
return metrics_list, metrics_list_name
# construct train forward phase
def train_forward(self, dy_model, metrics_list, batch_data, config):
neg_size = config.get("hyper_parameters.neg_size")
text_len = config.get("hyper_parameters.text_len")
margin = config.get("hyper_parameters.margin")
batch_size = config.get("runner.train_batch_size", 128)
inputs = self.create_feeds(batch_data, text_len, neg_size)
cos_pos, cos_neg = dy_model.forward(inputs)
loss = self.create_loss(batch_size, margin, cos_pos, cos_neg)
# update metrics
acc = self.get_acc(cos_neg, cos_pos, batch_size)
print_dict = {"loss": loss, "ACC": acc}
return loss, metrics_list, print_dict
def METHOD_NAME(self, dy_model, metrics_list, batch_data, config):
neg_size = config.get("hyper_parameters.neg_size")
text_len = config.get("hyper_parameters.text_len")
batch_size = config.get("runner.infer_batch_size", 128)
inputs = self.create_feeds(batch_data, text_len, neg_size)
cos_pos, cos_neg = dy_model.forward(inputs)
# update metrics
acc = self.get_acc(cos_neg, cos_pos, batch_size)
print_dict = {"ACC": acc}
return metrics_list, print_dict |
298,929 | patch tornado | """From https://github.com/erdewit/nest_asyncio"""
import asyncio
import asyncio.events as events
import os
import sys
import threading
from contextlib import contextmanager, suppress
from heapq import heappop
def apply(loop=None):
"""Patch asyncio to make its event loop reentrant."""
_patch_asyncio()
_patch_task()
METHOD_NAME()
loop = loop or asyncio.get_event_loop()
_patch_loop(loop)
def _patch_asyncio():
"""
Patch asyncio module to use pure Python tasks and futures,
use module level _current_tasks, all_tasks and patch run method.
"""
def run(main, *, debug=False):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
_patch_loop(loop)
loop.set_debug(debug)
task = asyncio.ensure_future(main)
try:
return loop.run_until_complete(task)
finally:
if not task.done():
task.cancel()
with suppress(asyncio.CancelledError):
loop.run_until_complete(task)
def _get_event_loop(stacklevel=3):
loop = events._get_running_loop()
if loop is None:
loop = events.get_event_loop_policy().get_event_loop()
return loop
if hasattr(asyncio, "_nest_patched"):
return
if sys.version_info >= (3, 6, 0):
asyncio.Task = asyncio.tasks._CTask = asyncio.tasks.Task = asyncio.tasks._PyTask
asyncio.Future = (
asyncio.futures._CFuture
) = asyncio.futures.Future = asyncio.futures._PyFuture
if sys.version_info < (3, 7, 0):
asyncio.tasks._current_tasks = asyncio.tasks.Task._current_tasks
asyncio.all_tasks = asyncio.tasks.Task.all_tasks
if sys.version_info >= (3, 9, 0):
events._get_event_loop = (
events.get_event_loop
) = asyncio.get_event_loop = _get_event_loop
_get_event_loop
asyncio.run = run
asyncio._nest_patched = True
def _patch_loop(loop):
"""Patch loop to make it reentrant."""
def run_forever(self):
with manage_run(self), manage_asyncgens(self):
while True:
self._run_once()
if self._stopping:
break
self._stopping = False
def run_until_complete(self, future):
with manage_run(self):
f = asyncio.ensure_future(future, loop=self)
if f is not future:
f._log_destroy_pending = False
while not f.done():
self._run_once()
if self._stopping:
break
if not f.done():
raise RuntimeError("Event loop stopped before Future completed.")
return f.result()
def _run_once(self):
"""
Simplified re-implementation of asyncio's _run_once that
runs handles as they become ready.
"""
ready = self._ready
scheduled = self._scheduled
while scheduled and scheduled[0]._cancelled:
heappop(scheduled)
timeout = (
0
if ready or self._stopping
else min(max(scheduled[0]._when - self.time(), 0), 86400)
if scheduled
else None
)
event_list = self._selector.select(timeout)
self._process_events(event_list)
end_time = self.time() + self._clock_resolution
while scheduled and scheduled[0]._when < end_time:
handle = heappop(scheduled)
ready.append(handle)
for _ in range(len(ready)):
if not ready:
break
handle = ready.popleft()
if not handle._cancelled:
handle._run()
handle = None
@contextmanager
def manage_run(self):
"""Set up the loop for running."""
self._check_closed()
old_thread_id = self._thread_id
old_running_loop = events._get_running_loop()
try:
self._thread_id = threading.get_ident()
events._set_running_loop(self)
self._num_runs_pending += 1
if self._is_proactorloop:
if self._self_reading_future is None:
self.call_soon(self._loop_self_reading)
yield
finally:
self._thread_id = old_thread_id
events._set_running_loop(old_running_loop)
self._num_runs_pending -= 1
if self._is_proactorloop:
if (
self._num_runs_pending == 0
and self._self_reading_future is not None
):
ov = self._self_reading_future._ov
self._self_reading_future.cancel()
if ov is not None:
self._proactor._unregister(ov)
self._self_reading_future = None
@contextmanager
def manage_asyncgens(self):
if not hasattr(sys, "get_asyncgen_hooks"):
# Python version is too old.
return
old_agen_hooks = sys.get_asyncgen_hooks()
try:
self._set_coroutine_origin_tracking(self._debug)
if self._asyncgens is not None:
sys.set_asyncgen_hooks(
firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook,
)
yield
finally:
self._set_coroutine_origin_tracking(False)
if self._asyncgens is not None:
sys.set_asyncgen_hooks(*old_agen_hooks)
def _check_running(self):
"""Do not throw exception if loop is already running."""
pass
if hasattr(loop, "_nest_patched"):
return
if not isinstance(loop, asyncio.BaseEventLoop):
raise ValueError("Can't patch loop of type %s" % type(loop))
cls = loop.__class__
cls.run_forever = run_forever
cls.run_until_complete = run_until_complete
cls._run_once = _run_once
cls._check_running = _check_running
cls._check_runnung = _check_running # typo in Python 3.7 source
cls._num_runs_pending = 0
cls._is_proactorloop = os.name == "nt" and issubclass(
cls, asyncio.ProactorEventLoop
)
if sys.version_info < (3, 7, 0):
cls._set_coroutine_origin_tracking = cls._set_coroutine_wrapper
cls._nest_patched = True
def _patch_task():
"""Patch the Task's step and enter/leave methods to make it reentrant."""
def step(task, exc=None):
curr_task = curr_tasks.get(task._loop)
try:
step_orig(task, exc)
finally:
if curr_task is None:
curr_tasks.pop(task._loop, None)
else:
curr_tasks[task._loop] = curr_task
Task = asyncio.Task
if hasattr(Task, "_nest_patched"):
return
if sys.version_info >= (3, 7, 0):
def enter_task(loop, task):
curr_tasks[loop] = task
def leave_task(loop, task):
curr_tasks.pop(loop, None)
asyncio.tasks._enter_task = enter_task
asyncio.tasks._leave_task = leave_task
curr_tasks = asyncio.tasks._current_tasks
step_orig = Task._Task__step
Task._Task__step = step
else:
curr_tasks = Task._current_tasks
step_orig = Task._step
Task._step = step
Task._nest_patched = True
def METHOD_NAME():
"""
If tornado is imported before nest_asyncio, make tornado aware of
the pure-Python asyncio Future.
"""
if "tornado" in sys.modules:
import tornado.concurrent as tc
tc.Future = asyncio.Future
if asyncio.Future not in tc.FUTURES:
tc.FUTURES += (asyncio.Future,) |
298,930 | maintenance configuration id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetConfigurationAssignmentsForSubscriptionResult',
'AwaitableGetConfigurationAssignmentsForSubscriptionResult',
'get_configuration_assignments_for_subscription',
'get_configuration_assignments_for_subscription_output',
]
@pulumi.output_type
class GetConfigurationAssignmentsForSubscriptionResult:
"""
Configuration Assignment
"""
def __init__(__self__, filter=None, id=None, location=None, METHOD_NAME=None, name=None, resource_id=None, system_data=None, type=None):
if filter and not isinstance(filter, dict):
raise TypeError("Expected argument 'filter' to be a dict")
pulumi.set(__self__, "filter", filter)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'maintenance_configuration_id' to be a str")
pulumi.set(__self__, "maintenance_configuration_id", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_id and not isinstance(resource_id, str):
raise TypeError("Expected argument 'resource_id' to be a str")
pulumi.set(__self__, "resource_id", resource_id)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def filter(self) -> Optional['outputs.ConfigurationAssignmentFilterPropertiesResponse']:
"""
Properties of the configuration assignment
"""
return pulumi.get(self, "filter")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier of the resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Location of the resource
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maintenanceConfigurationId")
def METHOD_NAME(self) -> Optional[str]:
"""
The maintenance configuration Id
"""
return pulumi.get(self, "maintenance_configuration_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
"""
The unique resourceId
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource
"""
return pulumi.get(self, "type")
class AwaitableGetConfigurationAssignmentsForSubscriptionResult(GetConfigurationAssignmentsForSubscriptionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConfigurationAssignmentsForSubscriptionResult(
filter=self.filter,
id=self.id,
location=self.location,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
resource_id=self.resource_id,
system_data=self.system_data,
type=self.type)
def get_configuration_assignments_for_subscription(configuration_assignment_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConfigurationAssignmentsForSubscriptionResult:
"""
Get configuration assignment for resource..
:param str configuration_assignment_name: Configuration assignment name
"""
__args__ = dict()
__args__['configurationAssignmentName'] = configuration_assignment_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:maintenance/v20230401:getConfigurationAssignmentsForSubscription', __args__, opts=opts, typ=GetConfigurationAssignmentsForSubscriptionResult).value
return AwaitableGetConfigurationAssignmentsForSubscriptionResult(
filter=pulumi.get(__ret__, 'filter'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
METHOD_NAME=pulumi.get(__ret__, 'maintenance_configuration_id'),
name=pulumi.get(__ret__, 'name'),
resource_id=pulumi.get(__ret__, 'resource_id'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_configuration_assignments_for_subscription)
def get_configuration_assignments_for_subscription_output(configuration_assignment_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConfigurationAssignmentsForSubscriptionResult]:
"""
Get configuration assignment for resource..
:param str configuration_assignment_name: Configuration assignment name
"""
... |
298,931 | get loss | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
from paddlex.ppdet.core.workspace import register
from ..layers import AnchorGeneratorSSD
from ..cls_utils import _get_class_default_kwargs
@register
class FaceHead(nn.Layer):
"""
Head block for Face detection network
Args:
num_classes (int): Number of output classes.
in_channels (int): Number of input channels.
anchor_generator(object): instance of anchor genertor method.
kernel_size (int): kernel size of Conv2D in FaceHead.
padding (int): padding of Conv2D in FaceHead.
conv_decay (float): norm_decay (float): weight decay for conv layer weights.
loss (object): loss of face detection model.
"""
__shared__ = ['num_classes']
__inject__ = ['anchor_generator', 'loss']
def __init__(
self,
num_classes=80,
in_channels=[96, 96],
anchor_generator=_get_class_default_kwargs(AnchorGeneratorSSD),
kernel_size=3,
padding=1,
conv_decay=0.,
loss='SSDLoss'):
super(FaceHead, self).__init__()
# add background class
self.num_classes = num_classes + 1
self.in_channels = in_channels
self.anchor_generator = anchor_generator
self.loss = loss
if isinstance(anchor_generator, dict):
self.anchor_generator = AnchorGeneratorSSD(**anchor_generator)
self.num_priors = self.anchor_generator.num_priors
self.box_convs = []
self.score_convs = []
for i, num_prior in enumerate(self.num_priors):
box_conv_name = "boxes{}".format(i)
box_conv = self.add_sublayer(
box_conv_name,
nn.Conv2D(
in_channels=self.in_channels[i],
out_channels=num_prior * 4,
kernel_size=kernel_size,
padding=padding))
self.box_convs.append(box_conv)
score_conv_name = "scores{}".format(i)
score_conv = self.add_sublayer(
score_conv_name,
nn.Conv2D(
in_channels=self.in_channels[i],
out_channels=num_prior * self.num_classes,
kernel_size=kernel_size,
padding=padding))
self.score_convs.append(score_conv)
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape], }
def forward(self, feats, image, gt_bbox=None, gt_class=None):
box_preds = []
cls_scores = []
prior_boxes = []
for feat, box_conv, score_conv in zip(feats, self.box_convs,
self.score_convs):
box_pred = box_conv(feat)
box_pred = paddle.transpose(box_pred, [0, 2, 3, 1])
box_pred = paddle.reshape(box_pred, [0, -1, 4])
box_preds.append(box_pred)
cls_score = score_conv(feat)
cls_score = paddle.transpose(cls_score, [0, 2, 3, 1])
cls_score = paddle.reshape(cls_score, [0, -1, self.num_classes])
cls_scores.append(cls_score)
prior_boxes = self.anchor_generator(feats, image)
if self.training:
return self.METHOD_NAME(box_preds, cls_scores, gt_bbox, gt_class,
prior_boxes)
else:
return (box_preds, cls_scores), prior_boxes
def METHOD_NAME(self, boxes, scores, gt_bbox, gt_class, prior_boxes):
return self.loss(boxes, scores, gt_bbox, gt_class, prior_boxes) |
298,932 | test keep alive constructor | # -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import ConstructorStats
from pybind11_tests import call_policies as m
@pytest.mark.xfail("env.PYPY", reason="sometimes comes out 1 off on PyPy", strict=False)
def test_keep_alive_argument(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.addChild(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert (
capture
== """
Allocating child.
Releasing child.
"""
)
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert capture == "Allocating child."
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
"""
)
p = m.Parent()
c = m.Child()
assert ConstructorStats.detail_reg_inst() == n_inst + 2
m.free_function(p, c)
del c
assert ConstructorStats.detail_reg_inst() == n_inst + 2
del p
assert ConstructorStats.detail_reg_inst() == n_inst
with pytest.raises(RuntimeError) as excinfo:
m.invalid_arg_index()
assert str(excinfo.value) == "Could not activate keep_alive!"
def test_keep_alive_return_value(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnChild()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert (
capture
== """
Allocating child.
Releasing child.
"""
)
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnChildKeepAlive()
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert capture == "Allocating child."
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
"""
)
p = m.Parent()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
with capture:
m.Parent.staticFunction(p)
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert capture == "Allocating child."
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
"""
)
# https://foss.heptapod.net/pypy/pypy/-/issues/2447
@pytest.mark.xfail("env.PYPY", reason="_PyObject_GetDictPtr is unimplemented")
def test_alive_gc(capture):
n_inst = ConstructorStats.detail_reg_inst()
p = m.ParentGC()
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
lst = [p]
lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
"""
)
def test_alive_gc_derived(capture):
class Derived(m.Parent):
pass
n_inst = ConstructorStats.detail_reg_inst()
p = Derived()
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
lst = [p]
lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
"""
)
def test_alive_gc_multi_derived(capture):
class Derived(m.Parent, m.Child):
def __init__(self):
m.Parent.__init__(self)
m.Child.__init__(self)
n_inst = ConstructorStats.detail_reg_inst()
p = Derived()
p.addChildKeepAlive(m.Child())
# +3 rather than +2 because Derived corresponds to two registered instances
assert ConstructorStats.detail_reg_inst() == n_inst + 3
lst = [p]
lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
Releasing child.
"""
)
def test_return_none(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnNullChildKeepAliveChild()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert capture == ""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnNullChildKeepAliveParent()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert capture == ""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
def METHOD_NAME(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert (
capture
== """
Allocating child.
Allocating parent.
"""
)
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
"""
)
def test_call_guard():
assert m.unguarded_call() == "unguarded"
assert m.guarded_call() == "guarded"
assert m.multiple_guards_correct_order() == "guarded & guarded"
assert m.multiple_guards_wrong_order() == "unguarded & guarded"
if hasattr(m, "with_gil"):
assert m.with_gil() == "GIL held"
assert m.without_gil() == "GIL released" |
298,933 | serialize binary message | """Kernel connection helpers."""
import json
import struct
from jupyter_client.session import Session
from tornado.websocket import WebSocketHandler
from traitlets import Float, Instance, Unicode, default
from traitlets.config import LoggingConfigurable
try:
from jupyter_client.jsonutil import json_default
except ImportError:
from jupyter_client.jsonutil import date_default as json_default
from jupyter_client.jsonutil import extract_dates
from jupyter_server.transutils import _i18n
from .abc import KernelWebsocketConnectionABC
def METHOD_NAME(msg):
"""serialize a message as a binary blob
Header:
4 bytes: number of msg parts (nbufs) as 32b int
4 * nbufs bytes: offset for each buffer as integer as 32b int
Offsets are from the start of the buffer, including the header.
Returns
-------
The message serialized to bytes.
"""
# don't modify msg or buffer list in-place
msg = msg.copy()
buffers = list(msg.pop("buffers"))
bmsg = json.dumps(msg, default=json_default).encode("utf8")
buffers.insert(0, bmsg)
nbufs = len(buffers)
offsets = [4 * (nbufs + 1)]
for buf in buffers[:-1]:
offsets.append(offsets[-1] + len(buf))
offsets_buf = struct.pack("!" + "I" * (nbufs + 1), nbufs, *offsets)
buffers.insert(0, offsets_buf)
return b"".join(buffers)
def deserialize_binary_message(bmsg):
"""deserialize a message from a binary blog
Header:
4 bytes: number of msg parts (nbufs) as 32b int
4 * nbufs bytes: offset for each buffer as integer as 32b int
Offsets are from the start of the buffer, including the header.
Returns
-------
message dictionary
"""
nbufs = struct.unpack("!i", bmsg[:4])[0]
offsets = list(struct.unpack("!" + "I" * nbufs, bmsg[4 : 4 * (nbufs + 1)]))
offsets.append(None)
bufs = []
for start, stop in zip(offsets[:-1], offsets[1:]):
bufs.append(bmsg[start:stop])
msg = json.loads(bufs[0].decode("utf8"))
msg["header"] = extract_dates(msg["header"])
msg["parent_header"] = extract_dates(msg["parent_header"])
msg["buffers"] = bufs[1:]
return msg
def serialize_msg_to_ws_v1(msg_or_list, channel, pack=None):
"""Serialize a message using the v1 protocol."""
if pack:
msg_list = [
pack(msg_or_list["header"]),
pack(msg_or_list["parent_header"]),
pack(msg_or_list["metadata"]),
pack(msg_or_list["content"]),
]
else:
msg_list = msg_or_list
channel = channel.encode("utf-8")
offsets: list = []
offsets.append(8 * (1 + 1 + len(msg_list) + 1))
offsets.append(len(channel) + offsets[-1])
for msg in msg_list:
offsets.append(len(msg) + offsets[-1])
offset_number = len(offsets).to_bytes(8, byteorder="little")
offsets = [offset.to_bytes(8, byteorder="little") for offset in offsets]
bin_msg = b"".join([offset_number, *offsets, channel, *msg_list])
return bin_msg
def deserialize_msg_from_ws_v1(ws_msg):
"""Deserialize a message using the v1 protocol."""
offset_number = int.from_bytes(ws_msg[:8], "little")
offsets = [
int.from_bytes(ws_msg[8 * (i + 1) : 8 * (i + 2)], "little") for i in range(offset_number)
]
channel = ws_msg[offsets[0] : offsets[1]].decode("utf-8")
msg_list = [ws_msg[offsets[i] : offsets[i + 1]] for i in range(1, offset_number - 1)]
return channel, msg_list
class BaseKernelWebsocketConnection(LoggingConfigurable):
"""A configurable base class for connecting Kernel WebSockets to ZMQ sockets."""
kernel_ws_protocol = Unicode(
None,
allow_none=True,
config=True,
help=_i18n(
"Preferred kernel message protocol over websocket to use (default: None). "
"If an empty string is passed, select the legacy protocol. If None, "
"the selected protocol will depend on what the front-end supports "
"(usually the most recent protocol supported by the back-end and the "
"front-end)."
),
)
@property
def kernel_manager(self):
"""The kernel manager."""
return self.parent
@property
def multi_kernel_manager(self):
"""The multi kernel manager."""
return self.kernel_manager.parent
@property
def kernel_id(self):
"""The kernel id."""
return self.kernel_manager.kernel_id
@property
def session_id(self):
"""The session id."""
return self.session.session
kernel_info_timeout = Float()
@default("kernel_info_timeout")
def _default_kernel_info_timeout(self):
return self.multi_kernel_manager.kernel_info_timeout
session = Instance(klass=Session, config=True)
@default("session")
def _default_session(self):
return Session(config=self.config)
websocket_handler = Instance(WebSocketHandler)
async def connect(self):
"""Handle a connect."""
raise NotImplementedError()
async def disconnect(self):
"""Handle a disconnect."""
raise NotImplementedError()
def handle_incoming_message(self, incoming_msg: str) -> None:
"""Handle an incoming message."""
raise NotImplementedError()
def handle_outgoing_message(self, stream: str, outgoing_msg: list) -> None:
"""Handle an outgoing message."""
raise NotImplementedError()
KernelWebsocketConnectionABC.register(BaseKernelWebsocketConnection) |
298,934 | start task | # @file NmakeSubdirs.py
# This script support parallel build for nmake in windows environment.
# It supports Python2.x and Python3.x both.
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#
# Import Modules
#
from __future__ import print_function
import argparse
import threading
import time
import os
import subprocess
import multiprocessing
import copy
import sys
__prog__ = 'NmakeSubdirs'
__version__ = '%s Version %s' % (__prog__, '0.10 ')
__copyright__ = 'Copyright (c) 2018, Intel Corporation. All rights reserved.'
__description__ = 'Replace for NmakeSubdirs.bat in windows ,support parallel build for nmake.\n'
cpu_count = multiprocessing.cpu_count()
output_lock = threading.Lock()
def RunCommand(WorkDir=None, *Args, **kwargs):
if WorkDir is None:
WorkDir = os.curdir
if "stderr" not in kwargs:
kwargs["stderr"] = subprocess.STDOUT
if "stdout" not in kwargs:
kwargs["stdout"] = subprocess.PIPE
p = subprocess.Popen(Args, cwd=WorkDir, stderr=kwargs["stderr"], stdout=kwargs["stdout"])
stdout, stderr = p.communicate()
message = ""
if stdout is not None:
message = stdout.decode(encoding='utf-8', errors='ignore') #for compatibility in python 2 and 3
if p.returncode != 0:
raise RuntimeError("Error while execute command \'{0}\' in direcotry {1}\n{2}".format(" ".join(Args), WorkDir, message))
output_lock.acquire(True)
print("execute command \"{0}\" in directory {1}".format(" ".join(Args), WorkDir))
print(message)
output_lock.release()
return p.returncode, stdout
class TaskUnit(object):
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __eq__(self, other):
return id(self).__eq__(id(other))
def run(self):
return self.func(*self.args, **self.kwargs)
def __str__(self):
para = list(self.args)
para.extend("{0}={1}".format(k, v)for k, v in self.kwargs.items())
return "{0}({1})".format(self.func.__name__, ",".join(para))
class ThreadControl(object):
def __init__(self, maxthread):
self._processNum = maxthread
self.pending = []
self.running = []
self.pendingLock = threading.Lock()
self.runningLock = threading.Lock()
self.error = False
self.errorLock = threading.Lock()
self.errorMsg = "errorMsg"
def addTask(self, func, *args, **kwargs):
self.pending.append(TaskUnit(func, args, kwargs))
def waitComplete(self):
self._schedule.join()
def startSchedule(self):
self._schedule = threading.Thread(target=self.Schedule)
self._schedule.start()
def Schedule(self):
for i in range(self._processNum):
task = threading.Thread(target=self.METHOD_NAME)
task.daemon = False
self.running.append(task)
self.runningLock.acquire(True)
for thread in self.running:
thread.start()
self.runningLock.release()
while len(self.running) > 0:
time.sleep(0.1)
if self.error:
print("subprocess not exit successfully")
print(self.errorMsg)
def METHOD_NAME(self):
while True:
if self.error:
break
self.pendingLock.acquire(True)
if len(self.pending) == 0:
self.pendingLock.release()
break
task = self.pending.pop(0)
self.pendingLock.release()
try:
task.run()
except RuntimeError as e:
if self.error: break
self.errorLock.acquire(True)
self.error = True
self.errorMsg = str(e)
time.sleep(0.1)
self.errorLock.release()
break
self.runningLock.acquire(True)
self.running.remove(threading.currentThread())
self.runningLock.release()
def Run():
curdir = os.path.abspath(os.curdir)
if len(args.subdirs) == 1:
args.jobs = 1
if args.jobs == 1:
try:
for dir in args.subdirs:
RunCommand(os.path.join(curdir, dir), "nmake", args.target, stdout=sys.stdout, stderr=subprocess.STDOUT)
except RuntimeError:
exit(1)
else:
controller = ThreadControl(args.jobs)
for dir in args.subdirs:
controller.addTask(RunCommand, os.path.join(curdir, dir), "nmake", args.target)
controller.startSchedule()
controller.waitComplete()
if controller.error:
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog=__prog__, description=__description__ + __copyright__, conflict_handler='resolve')
parser.add_argument("target", help="the target for nmake")
parser.add_argument("subdirs", nargs="+", help="the relative dir path of makefile")
parser.add_argument("--jobs", type=int, dest="jobs", default=cpu_count, help="thread number")
parser.add_argument('--version', action='version', version=__version__)
args = parser.parse_args()
Run()
|
298,935 | test voc | """Routines to help recognizing sound files.
Function whathdr() recognizes various types of sound file headers.
It understands almost all headers that SOX can decode.
The return tuple contains the following items, in this order:
- file type (as SOX understands it)
- sampling rate (0 if unknown or hard to decode)
- number of channels (0 if unknown or hard to decode)
- number of frames in the file (-1 if unknown or hard to decode)
- number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW
If the file doesn't have a recognizable type, it returns None.
If the file can't be opened, OSError is raised.
To compute the total time, divide the number of frames by the
sampling rate (a frame contains a sample for each channel).
Function what() calls whathdr(). (It used to also use some
heuristics for raw data, but this doesn't work very well.)
Finally, the function test() is a simple main program that calls
what() for all files mentioned on the argument list. For directory
arguments it calls what() for all files in that directory. Default
argument is "." (testing all files in the current directory). The
option -r tells it to recurse down directories found inside
explicitly given directories.
"""
# The file structure is top-down except that the test program and its
# subroutine come last.
__all__ = ['what', 'whathdr']
def what(filename):
"""Guess the type of a sound file."""
res = whathdr(filename)
return res
def whathdr(filename):
"""Recognize sound headers."""
with open(filename, 'rb') as f:
h = f.read(512)
for tf in tests:
res = tf(h, f)
if res:
return res
return None
#-----------------------------------#
# Subroutines per sound header type #
#-----------------------------------#
tests = []
def test_aifc(h, f):
import aifc
if not h.startswith(b'FORM'):
return None
if h[8:12] == b'AIFC':
fmt = 'aifc'
elif h[8:12] == b'AIFF':
fmt = 'aiff'
else:
return None
f.seek(0)
try:
a = aifc.open(f, 'r')
except (EOFError, aifc.Error):
return None
return (fmt, a.getframerate(), a.getnchannels(),
a.getnframes(), 8 * a.getsampwidth())
tests.append(test_aifc)
def test_au(h, f):
if h.startswith(b'.snd'):
func = get_long_be
elif h[:4] in (b'\0ds.', b'dns.'):
func = get_long_le
else:
return None
filetype = 'au'
hdr_size = func(h[4:8])
data_size = func(h[8:12])
encoding = func(h[12:16])
rate = func(h[16:20])
nchannels = func(h[20:24])
sample_size = 1 # default
if encoding == 1:
sample_bits = 'U'
elif encoding == 2:
sample_bits = 8
elif encoding == 3:
sample_bits = 16
sample_size = 2
else:
sample_bits = '?'
frame_size = sample_size * nchannels
if frame_size:
nframe = data_size / frame_size
else:
nframe = -1
return filetype, rate, nchannels, nframe, sample_bits
tests.append(test_au)
def test_hcom(h, f):
if h[65:69] != b'FSSD' or h[128:132] != b'HCOM':
return None
divisor = get_long_be(h[144:148])
if divisor:
rate = 22050 / divisor
else:
rate = 0
return 'hcom', rate, 1, -1, 8
tests.append(test_hcom)
def METHOD_NAME(h, f):
if not h.startswith(b'Creative Voice File\032'):
return None
sbseek = get_short_le(h[20:22])
rate = 0
if 0 <= sbseek < 500 and h[sbseek] == 1:
ratecode = 256 - h[sbseek+4]
if ratecode:
rate = int(1000000.0 / ratecode)
return 'voc', rate, 1, -1, 8
tests.append(METHOD_NAME)
def test_wav(h, f):
import wave
# 'RIFF' <len> 'WAVE' 'fmt ' <len>
if not h.startswith(b'RIFF') or h[8:12] != b'WAVE' or h[12:16] != b'fmt ':
return None
f.seek(0)
try:
w = wave.openfp(f, 'r')
except (EOFError, wave.Error):
return None
return ('wav', w.getframerate(), w.getnchannels(),
w.getnframes(), 8*w.getsampwidth())
tests.append(test_wav)
def test_8svx(h, f):
if not h.startswith(b'FORM') or h[8:12] != b'8SVX':
return None
# Should decode it to get #channels -- assume always 1
return '8svx', 0, 1, 0, 8
tests.append(test_8svx)
def test_sndt(h, f):
if h.startswith(b'SOUND'):
nsamples = get_long_le(h[8:12])
rate = get_short_le(h[20:22])
return 'sndt', rate, 1, nsamples, 8
tests.append(test_sndt)
def test_sndr(h, f):
if h.startswith(b'\0\0'):
rate = get_short_le(h[2:4])
if 4000 <= rate <= 25000:
return 'sndr', rate, 1, -1, 8
tests.append(test_sndr)
#-------------------------------------------#
# Subroutines to extract numbers from bytes #
#-------------------------------------------#
def get_long_be(b):
return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]
def get_long_le(b):
return (b[3] << 24) | (b[2] << 16) | (b[1] << 8) | b[0]
def get_short_be(b):
return (b[0] << 8) | b[1]
def get_short_le(b):
return (b[1] << 8) | b[0]
#--------------------#
# Small test program #
#--------------------#
def test():
import sys
recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2]
recursive = 1
try:
if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1)
else:
testall(['.'], recursive, 1)
except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n')
sys.exit(1)
def testall(list, recursive, toplevel):
import sys
import os
for filename in list:
if os.path.isdir(filename):
print(filename + '/:', end=' ')
if recursive or toplevel:
print('recursing down:')
import glob
names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0)
else:
print('*** directory (use -r) ***')
else:
print(filename + ':', end=' ')
sys.stdout.flush()
try:
print(what(filename))
except OSError:
print('*** not found ***')
if __name__ == '__main__':
test() |
298,936 | test get node from dn | # (C) Datadog, Inc. 2010-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import logging
from datadog_checks.cisco_aci.helpers import (
check_metric_can_be_zero,
get_app_from_dn,
get_attributes,
get_bd_from_dn,
get_cep_from_dn,
get_epg_from_dn,
get_event_tags_from_dn,
get_hostname_from_dn,
get_ip_from_dn,
get_node_from_dn,
get_pod_from_dn,
parse_capacity_tags,
)
log = logging.getLogger('test_cisco_aci')
def test_get_pod_from_dn():
assert get_pod_from_dn(None) is None
assert get_pod_from_dn("") is None
assert get_pod_from_dn("pod-") is None
assert get_pod_from_dn("pod-aa") is None
assert get_pod_from_dn("pod-1") == "1"
assert get_pod_from_dn("pod-100") == "100"
assert get_pod_from_dn("aapod-1") == "1"
assert get_pod_from_dn("aapod-1aaa") == "1"
assert get_pod_from_dn("pod-1pod-2") == "1"
def test_get_bd_from_dn():
assert get_bd_from_dn(None) is None
assert get_bd_from_dn("") is None
assert get_bd_from_dn("BD-") is None
assert get_bd_from_dn("BD-1a!") is None
assert get_bd_from_dn("aaBD-1a!") is None
assert get_bd_from_dn("/BD-1a!/") == "1a!"
assert get_bd_from_dn("aa/BD-1a!/aa") == "1a!"
assert get_bd_from_dn("aa/BD-/aa") is None
assert get_bd_from_dn("/BD-1a!/BD-1b!/") == "1a!"
def test_get_app_from_dn():
assert get_app_from_dn(None) is None
assert get_app_from_dn("") is None
assert get_app_from_dn("ap-") is None
assert get_app_from_dn("ap-1a!") is None
assert get_app_from_dn("aaap-1a!") is None
assert get_app_from_dn("/ap-1a!/") == "1a!"
assert get_app_from_dn("aa/ap-1a!/aa") == "1a!"
assert get_app_from_dn("aa/ap-/aa") is None
assert get_app_from_dn("/ap-1a!/ap-1b!/") == "1a!"
def test_get_cep_from_dn():
assert get_cep_from_dn(None) is None
assert get_cep_from_dn("") is None
assert get_cep_from_dn("cep-") is None
assert get_cep_from_dn("cep-1a!") is None
assert get_cep_from_dn("aacep-1a!") is None
assert get_cep_from_dn("/cep-1a!/") == "1a!"
assert get_cep_from_dn("aa/cep-1a!/aa") == "1a!"
assert get_cep_from_dn("aa/cep-/aa") is None
assert get_cep_from_dn("/cep-1a!/cep-1b!/") == "1a!"
def test_get_epg_from_dn():
assert get_epg_from_dn(None) is None
assert get_epg_from_dn("") is None
assert get_epg_from_dn("epg-") is None
assert get_epg_from_dn("epg-1a!") is None
assert get_epg_from_dn("aaepg-1a!") is None
assert get_epg_from_dn("/epg-1a!/") == "1a!"
assert get_epg_from_dn("aa/epg-1a!/aa") == "1a!"
assert get_epg_from_dn("aa/epg-/aa") is None
assert get_epg_from_dn("/epg-1a!/epg-1b!/") == "1a!"
def test_get_ip_from_dn():
assert get_ip_from_dn(None) is None
assert get_ip_from_dn("") is None
assert get_ip_from_dn("ip-") is None
assert get_ip_from_dn("ip-1a!") is None
assert get_ip_from_dn("aaip-1a!") is None
assert get_ip_from_dn("/ip-1a!/") == "1a!"
assert get_ip_from_dn("aa/ip-1a!/aa") == "1a!"
assert get_ip_from_dn("aa/ip-/aa") is None
assert get_ip_from_dn("/ip-1a!/ip-1b!/") == "1a!"
def METHOD_NAME():
assert get_node_from_dn(None) is None
assert get_node_from_dn("") is None
assert get_node_from_dn("node-") is None
assert get_node_from_dn("node-aa") is None
assert get_node_from_dn("node-1") == "1"
assert get_node_from_dn("node-100") == "100"
assert get_node_from_dn("aanode-1") == "1"
assert get_node_from_dn("aanode-1aaa") == "1"
assert get_node_from_dn("node-1node-2") == "1"
def test_parse_capacity_tags():
assert parse_capacity_tags(None) == []
assert parse_capacity_tags("") == []
res = parse_capacity_tags("aa/pod-1/node-2/aa")
assert all([a == b for a, b in zip(res, ['fabric_pod_id:1', 'node_id:2'])])
res = parse_capacity_tags("aa/pod-/node-2/aa")
assert all([a == b for a, b in zip(res, ['node_id:2'])])
res = parse_capacity_tags("aa/pod-1/node-/aa")
assert all([a == b for a, b in zip(res, ['fabric_pod_id:1'])])
def test_get_event_tags_from_dn():
assert get_event_tags_from_dn(None) == []
assert get_event_tags_from_dn("") == []
res = get_event_tags_from_dn("aa/ap-AA/epg-BB/pod-1/node-2/ip-CC/cep-DD/BD-EE/aa")
assert all([a == b for a, b in zip(res, ['node:2', 'app:AA', 'bd:EE', 'mac:DD', 'ip:CC', 'epg:BB', 'pod:1'])])
def test_get_hostname_from_dn():
assert get_hostname_from_dn(None) is None
assert get_hostname_from_dn("") is None
assert get_hostname_from_dn("aa/pod-/node-/aa") is None
assert get_hostname_from_dn("/pod-1/node-") is None
assert get_hostname_from_dn("/pod-/node-2/") is None
assert get_hostname_from_dn("/pod-1/node-2/") == "pod-1-node-2"
def test_get_attributes():
assert get_attributes(None) == {}
assert get_attributes("") == {}
assert get_attributes("attr") == {}
assert get_attributes({}) == {}
assert get_attributes({"attr": "val"}) == {"attr": "val"}
assert get_attributes({"attributes": ""}) == {"attributes": ""}
assert get_attributes({"attributes": "attr"}) == {"attributes": "attr"}
assert get_attributes({"attributes": {}}) == {}
assert get_attributes({"attributes": {"attr": "val"}}) == {"attr": "val"}
assert get_attributes({"obj1": {"b": ""}}) == {"obj1": {"b": ""}}
assert get_attributes({"obj1": {"attributes": ""}}) == {"obj1": {"attributes": ""}}
assert get_attributes({"obj1": {"attributes": "attr"}}) == {"obj1": {"attributes": "attr"}}
assert get_attributes({"obj1": {"attributes": {}}}) == {}
assert get_attributes({"obj1": {"attributes": {"attr": "val"}}}) == {"attr": "val"}
def test_check_metric_can_be_zero():
assert check_metric_can_be_zero("metric_name_last", None, None) is True
assert check_metric_can_be_zero("metric_name_Last", None, None) is True
assert check_metric_can_be_zero("metric_name_last", 1, None) is True
assert check_metric_can_be_zero("metric_name_Last", 1, None) is True
assert check_metric_can_be_zero("metric_name_last", 0, None) is True
assert check_metric_can_be_zero("metric_name_Last", 0, None) is True
assert check_metric_can_be_zero("metric_name", None, {}) is False
assert check_metric_can_be_zero("metric_name", 1, None) is True
assert check_metric_can_be_zero("metric_name", 1, {}) is True
assert check_metric_can_be_zero("metric_name", 1.0, {}) is True
assert check_metric_can_be_zero("metric_name", "1", None) is True
assert check_metric_can_be_zero("metric_name", "1", {}) is True
assert check_metric_can_be_zero("metric_name", "1.0", {}) is True
assert check_metric_can_be_zero("metric_name", 0, None) is False
assert check_metric_can_be_zero("metric_name", 0, {}) is False
assert check_metric_can_be_zero("metric_name", 0.0, {}) is False
assert check_metric_can_be_zero("metric_name", "0", {}) is False
assert check_metric_can_be_zero("metric_name", "0.0", {}) is False
assert check_metric_can_be_zero("metric_name", "aaa", {}) is False
assert check_metric_can_be_zero("metric_name", 1, {"cnt": 0}) is True
assert check_metric_can_be_zero("metric_name", 1, {"cnt": 0.0}) is True
assert check_metric_can_be_zero("metric_name", 1, {"cnt": "0"}) is True
assert check_metric_can_be_zero("metric_name", 1, {"cnt": "0.0"}) is True
assert check_metric_can_be_zero("metric_name", 1, {"cnt": "aaa"}) is True
assert check_metric_can_be_zero("metric_name", 1, {"cnt": 1}) is True
assert check_metric_can_be_zero("metric_name", 1, {"cnt": 1.0}) is True
assert check_metric_can_be_zero("metric_name", 1, {"cnt": "1"}) is True
assert check_metric_can_be_zero("metric_name", 1, {"cnt": "1.0"}) is True |
298,937 | input to internal | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on July 2, 2019
@author: talbpw
"""
#External Modules---------------------------------------------------------------
import numpy as np
#External Modules End-----------------------------------------------------------
#Internal Modules---------------------------------------------------------------
from .PostProcessorInterface import PostProcessorInterface
from ...utils import utils
from ...utils import InputData, InputTypes
#Internal Modules End-----------------------------------------------------------
class RealizationAverager(PostProcessorInterface):
"""
Does the average of multiple realizations along the RAVEN_sampleID dimension
ONLY, leaving the other dimensions as they are.
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
inSpec= super(RealizationAverager, cls).getInputSpecification()
inSpec.addSub(InputData.parameterInputFactory('target',
contentType=InputTypes.StringListType))
return inSpec
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
self.dynamic = True # from base class, indicates time-dependence is handled internally
self.targets = None # string, variables to apply postprocessor to
self.validDataType = ['DataSet'] # The list of accepted types of DataObject
self.outputMultipleRealizations = True # True indicate multiple realizations are returned
def _handleInput(self, paramInput):
"""
Function to handle the parsed paramInput for this class.
@ In, paramInput, ParameterInput, the already-parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
for child in paramInput.subparts:
tag = child.getName()
if tag == 'target':
self.targets = child.value
def METHOD_NAME(self, currentInp):
"""
Method to convert an input object into the internal format that is
understandable by this pp.
In this case, we only want data objects!
@ In, currentInp, list, an object that needs to be converted
@ Out, currentInp, DataObject.HistorySet, input data
"""
if len(currentInp) > 1:
self.raiseAnError(IOError, 'Expected 1 input DataObject, but received {} inputs!'.format(len(currentInp)))
currentInp = currentInp[0]
if currentInp.type not in ['DataSet']:
self.raiseAnError(IOError, 'RealizationAverager postprocessor "{}" requires a DataSet input! Got "{}".'
.format(self.name, currentInp.type))
return currentInp
def run(self, inputs):
"""
This method executes the postprocessor action.
@ In, inputs, list(object), objects containing the data to process.
@ Out, realizations, list, list of realizations obtained
"""
if not set(self.targets) <= set(inputs[0].getVars()):
self.raiseAnError(KeyError, 'The requested targets were not all found in the input data! ' +
'Unused: {}. '.format(set(inputs[0].getVars()) - set(self.targets)) +
'Missing: {}.'.format(set(self.targets) - set(inputs[0].getVars())))
dataSet = inputs[0].asDataset()[self.targets] # we checked for singularity earlier, so this should be the only one
averaged = dataSet.mean(dim='RAVEN_sample_ID')
averaged = averaged.expand_dims('RAVEN_sample_ID')
averaged['RAVEN_sample_ID'] = [0]
return averaged
def collectOutput(self, finishedJob, output):
"""
Function to place all of the computed data into the output object
@ In, finishedJob, JobHandler External or Internal instance, A JobHandler object that is in charge of running this post-processor
@ In, output, dataObjects, The object where we want to place our computed results
@ Out, None
"""
super().collectOutput(finishedJob, output) |
298,938 | previous sibling | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('_extensions'))
# -- Project information -----------------------------------------------------
from typing import List
version = os.getenv('VERSION','Live')
project = f'OpenQASM {version} Specification'
copyright = '2017-2023, Andrew W. Cross, Lev S. Bishop, John A. Smolin, Jay M. Gambetta'
author = 'Andrew W. Cross, Lev S. Bishop, John A. Smolin, Jay M. Gambetta'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinxcontrib.bibtex',
'reno.sphinxext',
'multifigure'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: List[str] = [
"openqasm/docs",
]
# Sets the default code-highlighting language. `.. code-block::` directives
# that are not OQ3 should specify the language manually. The value is
# interpreted as a Pygments lexer alias; this needs the dependency
# `openqasm_pygments`.
highlight_language = "qasm3"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
version_list_var = os.getenv('VERSION_LIST')
extra_nav_links = {'Live Version': '/index.html'} # default link to Live version
if version_list_var is not None:
version_list = version_list_var.split(',')
for ver in version_list:
extra_nav_links[f'Version {ver}'] = f'/versions/{ver}/index.html'
print(extra_nav_links)
# Theme specific options
html_theme_options = {
'extra_nav_links': extra_nav_links
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The URL which points to the root of the HTML documentation. It is used to
# indicate the location of document like canonical_url.
html_baseurl = os.getenv('HTML_BASEURL', '')
# Add css styles for colored text
html_css_files = ['colors.css']
# If True, figures, tables and code-blocks are automatically numbered
# if they have a caption.
numfig = True
# Necessary setting for sphinxcontrib-bibtex >= 2.0.0
bibtex_bibfiles = ['bibliography.bib']
# This is the list of local variables to export into sphinx by using the
# rst_epilogue below. Using this mechanism we can export the local 'version'
# variable, which can be defined by an environment variable, into the sphinx
# build system for changing the text to specify which specific version of the
# specification is being built
variables_to_export = [
"version",
]
frozen_locals = dict(locals())
rst_epilog = '\n'.join(map(lambda x: f".. |{x}| replace:: {frozen_locals[x]}", variables_to_export))
del frozen_locals
# Monkey-patch docutils 0.19.0 with a fix to `Node.previous_sibling` that is the
# root cause of incorrect HTML output for bibliograhy files (see gh-455).
# docutils is pinned in `constraints.txt` to a version that is known to work
# with this patch. If docutils releases a new version, this monkeypatching and
# the constraint may be able to be dropped.
import docutils.nodes
# This method is taken from docutils revision r9126, which is to a file
# explicitly placed in the public domain; there is no licence clause.
def METHOD_NAME(self):
if not self.parent:
return None
index = self.parent.index(self)
return self.parent[index - 1] if index > 0 else None
docutils.nodes.Node.METHOD_NAME = METHOD_NAME |
298,939 | test ows interfaces wms | # This test checks that the OWSLib service interfaces are the same across all service type:
# Author: Dominic Lowe, 17th September 2009
# Part of OWSLib package.
from tests.utils import service_ok
import pytest
from tests.utils import resource_file
import owslib
from owslib.csw import CatalogueServiceWeb
from owslib.wms import WebMapService
from owslib.wcs import WebCoverageService
from owslib.wfs import WebFeatureService
from owslib.util import OrderedDict
# TODO, we should run all these from local XML documents (as per the WMS and WFS services)
# CSW_SERVICE_URL = 'http://data.nodc.noaa.gov/geoportal/csw'
CSW_SERVICE_URL = 'https://demo.pycsw.org/cite/csw'
WCS_SERVICE_URL = 'http://thredds.ucar.edu/thredds/wcs/grib/NCEP/NAM/CONUS_80km/best'
@pytest.mark.online
@pytest.mark.skipif(not service_ok(CSW_SERVICE_URL),
reason='service is unreachable')
def test_ows_interfaces_csw():
service = CatalogueServiceWeb(CSW_SERVICE_URL)
# Check each service instance conforms to OWSLib interface
service.alias = 'CSW'
isinstance(service, owslib.catalogue.csw2.CatalogueServiceWeb)
# URL attribute
assert service.url == CSW_SERVICE_URL
# version attribute
assert service.version == '2.0.2'
# Identification object
assert hasattr(service, 'identification')
# Check all ServiceIdentification attributes
assert service.identification.type == 'CSW'
for attribute in ['type', 'version', 'title', 'abstract', 'keywords', 'accessconstraints', 'fees']:
assert hasattr(service.identification, attribute)
# Check all ServiceProvider attributes
for attribute in ['name', 'url', 'contact']:
assert hasattr(service.provider, attribute)
# Check all operations implement IOperationMetadata
for op in service.operations:
for attribute in ['name', 'formatOptions', 'methods']:
assert hasattr(op, attribute)
# Check all contents implement IContentMetadata as a dictionary
# CSW does not work in this way so use dummy
service.contents = {'dummy': '1'}
isinstance(service.contents, dict)
# Check any item (WCS coverage, WMS layer etc) from the contents of each service
# Check it conforms to IContentMetadata interface
# CSW does not conform to this
def METHOD_NAME():
wmsxml = open(resource_file('wms_JPLCapabilities.xml'), 'rb').read()
service = WebMapService('url', version='1.1.1', xml=wmsxml)
# Check each service instance conforms to OWSLib interface
service.alias = 'WMS'
isinstance(service, owslib.map.wms111.WebMapService_1_1_1)
# URL attribute
assert service.url == 'url'
# version attribute
assert service.version == '1.1.1'
# Identification object
assert hasattr(service, 'identification')
# Check all ServiceIdentification attributes
assert service.identification.type == 'OGC:WMS'
for attribute in ['type', 'version', 'title', 'abstract', 'keywords', 'accessconstraints', 'fees']:
assert hasattr(service.identification, attribute)
# Check all ServiceProvider attributes
for attribute in ['name', 'url', 'contact']:
assert hasattr(service.provider, attribute)
# Check all operations implement IOperationMetadata
for op in service.operations:
for attribute in ['name', 'formatOptions', 'methods']:
assert hasattr(op, attribute)
# Check all contents implement IContentMetadata as a dictionary
isinstance(service.contents, OrderedDict)
# Check any item (WCS coverage, WMS layer etc) from the contents of each service
# Check it conforms to IContentMetadata interface
# get random item from contents dictionary -has to be a nicer way to do this!
content = service.contents[list(service.contents.keys())[0]]
for attribute in ['id', 'title', 'boundingBox', 'boundingBoxWGS84', 'crsOptions', 'styles', 'timepositions']:
assert hasattr(content, attribute)
@pytest.mark.online
def test_ows_interfaces_wcs():
service = WebCoverageService(WCS_SERVICE_URL, version='1.0.0')
# Check each service instance conforms to OWSLib interface
service.alias = 'WCS'
isinstance(service, owslib.coverage.wcs100.WebCoverageService_1_0_0)
# URL attribute
assert service.url == WCS_SERVICE_URL
# version attribute
assert service.version == '1.0.0'
# Identification object
assert hasattr(service, 'identification')
# Check all ServiceIdentification attributes
assert service.identification.type == 'OGC:WCS'
for attribute in ['type', 'version', 'title', 'abstract', 'keywords', 'fees']:
assert hasattr(service.identification, attribute)
# Check all ServiceProvider attributes
for attribute in ['name', 'url', 'contact']:
assert hasattr(service.provider, attribute)
# Check all operations implement IOperationMetadata
for op in service.operations:
for attribute in ['name', 'methods']:
assert hasattr(op, attribute)
# Check all contents implement IContentMetadata as a dictionary
isinstance(service.contents, dict)
# Check any item (WCS coverage, WMS layer etc) from the contents of each service
# Check it conforms to IContentMetadata interface
# get random item from contents dictionary -has to be a nicer way to do this!
content = service.contents[list(service.contents.keys())[0]]
for attribute in ['id', 'title', 'boundingBox', 'boundingBoxWGS84', 'crsOptions', 'styles', 'timepositions']:
assert hasattr(content, attribute)
def test_ows_interfaces_wfs():
wfsxml = open(resource_file('mapserver-wfs-cap.xml'), 'rb').read()
service = WebFeatureService('url', version='1.0', xml=wfsxml)
# Check each service instance conforms to OWSLib interface
service.alias = 'CSW'
isinstance(service, owslib.feature.wfs100.WebFeatureService_1_0_0)
# URL attribute
assert service.url == 'url'
# version attribute
assert service.version == '1.0'
# Identification object
assert hasattr(service, 'identification')
# Check all ServiceIdentification attributes
assert service.identification.type == 'MapServer WFS'
for attribute in ['type', 'version', 'title', 'abstract', 'keywords', 'accessconstraints', 'fees']:
assert hasattr(service.identification, attribute)
# Check all ServiceProvider attributes
for attribute in ['name', 'url']:
assert hasattr(service.provider, attribute)
# Check all operations implement IOperationMetadata
for op in service.operations:
for attribute in ['name', 'formatOptions', 'methods']:
assert hasattr(op, attribute)
# Check all contents implement IContentMetadata as a dictionary
isinstance(service.contents, dict)
# Check any item (WCS coverage, WMS layer etc) from the contents of each service
# Check it conforms to IContentMetadata interface
# get random item from contents dictionary -has to be a nicer way to do this!
content = service.contents[list(service.contents.keys())[0]]
for attribute in ['id', 'title', 'boundingBox', 'boundingBoxWGS84', 'crsOptions', 'styles', 'timepositions']:
assert hasattr(content, attribute) |
298,940 | issue time in utc | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetCaCertificateResult',
'AwaitableGetCaCertificateResult',
'get_ca_certificate',
'get_ca_certificate_output',
]
@pulumi.output_type
class GetCaCertificateResult:
"""
The CA Certificate resource.
"""
def __init__(__self__, description=None, encoded_certificate=None, expiry_time_in_utc=None, id=None, METHOD_NAME=None, name=None, provisioning_state=None, system_data=None, type=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if encoded_certificate and not isinstance(encoded_certificate, str):
raise TypeError("Expected argument 'encoded_certificate' to be a str")
pulumi.set(__self__, "encoded_certificate", encoded_certificate)
if expiry_time_in_utc and not isinstance(expiry_time_in_utc, str):
raise TypeError("Expected argument 'expiry_time_in_utc' to be a str")
pulumi.set(__self__, "expiry_time_in_utc", expiry_time_in_utc)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'issue_time_in_utc' to be a str")
pulumi.set(__self__, "issue_time_in_utc", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description for the CA Certificate resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="encodedCertificate")
def encoded_certificate(self) -> Optional[str]:
"""
Base64 encoded PEM (Privacy Enhanced Mail) format certificate data.
"""
return pulumi.get(self, "encoded_certificate")
@property
@pulumi.getter(name="expiryTimeInUtc")
def expiry_time_in_utc(self) -> str:
"""
Certificate expiry time in UTC. This is a read-only field.
"""
return pulumi.get(self, "expiry_time_in_utc")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="issueTimeInUtc")
def METHOD_NAME(self) -> str:
"""
Certificate issue time in UTC. This is a read-only field.
"""
return pulumi.get(self, "issue_time_in_utc")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the CA Certificate resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to the CaCertificate resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetCaCertificateResult(GetCaCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCaCertificateResult(
description=self.description,
encoded_certificate=self.encoded_certificate,
expiry_time_in_utc=self.expiry_time_in_utc,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
type=self.type)
def get_ca_certificate(ca_certificate_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCaCertificateResult:
"""
Get properties of a CA certificate.
Azure REST API version: 2023-06-01-preview.
:param str ca_certificate_name: Name of the CA certificate.
:param str namespace_name: Name of the namespace.
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
__args__ = dict()
__args__['caCertificateName'] = ca_certificate_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:eventgrid:getCaCertificate', __args__, opts=opts, typ=GetCaCertificateResult).value
return AwaitableGetCaCertificateResult(
description=pulumi.get(__ret__, 'description'),
encoded_certificate=pulumi.get(__ret__, 'encoded_certificate'),
expiry_time_in_utc=pulumi.get(__ret__, 'expiry_time_in_utc'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'issue_time_in_utc'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_ca_certificate)
def get_ca_certificate_output(ca_certificate_name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCaCertificateResult]:
"""
Get properties of a CA certificate.
Azure REST API version: 2023-06-01-preview.
:param str ca_certificate_name: Name of the CA certificate.
:param str namespace_name: Name of the namespace.
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
... |
298,941 | get fanr speed | #!/usr/bin/env python
#
# Copyright (C) 2017 Accton Technology Corporation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------------
# HISTORY:
# mm/dd/yyyy (A.D.)
# 5/27/2019: Brandon_Chuang create
# ------------------------------------------------------------------
try:
import time
import logging
from collections import namedtuple
except ImportError as e:
raise ImportError('%s - required module not found' % str(e))
class FanUtil(object):
"""Platform-specific FanUtil class"""
FAN_NUM_ON_MAIN_BROAD = 5
FAN_NUM_1_IDX = 1
FAN_NUM_2_IDX = 2
FAN_NUM_3_IDX = 3
FAN_NUM_4_IDX = 4
FAN_NUM_5_IDX = 5
FAN_NODE_NUM_OF_MAP = 2
FAN_NODE_FAULT_IDX_OF_MAP = 1
FAN_NODE_DIR_IDX_OF_MAP = 2
BASE_VAL_PATH = '/sys/bus/i2c/devices/3-0063/{0}'
FAN_DUTY_PATH = '/sys/bus/i2c/devices/3-0063/fan_duty_cycle_percentage'
#logfile = ''
#loglevel = logging.INFO
""" Dictionary where
key1 = fan id index (integer) starting from 1
key2 = fan node index (interger) starting from 1
value = path to fan device file (string) """
_fan_to_device_path_mapping = {}
#fan1_direction
#fan1_fault
#fan1_present
#(FAN_NUM_2_IDX, FAN_NODE_DUTY_IDX_OF_MAP): 'fan2_duty_cycle_percentage',
_fan_to_device_node_mapping = {
(FAN_NUM_1_IDX, FAN_NODE_FAULT_IDX_OF_MAP): 'fan1_fault',
(FAN_NUM_1_IDX, FAN_NODE_DIR_IDX_OF_MAP): 'fan1_direction',
(FAN_NUM_2_IDX, FAN_NODE_FAULT_IDX_OF_MAP): 'fan2_fault',
(FAN_NUM_2_IDX, FAN_NODE_DIR_IDX_OF_MAP): 'fan2_direction',
(FAN_NUM_3_IDX, FAN_NODE_FAULT_IDX_OF_MAP): 'fan3_fault',
(FAN_NUM_3_IDX, FAN_NODE_DIR_IDX_OF_MAP): 'fan3_direction',
(FAN_NUM_4_IDX, FAN_NODE_FAULT_IDX_OF_MAP): 'fan4_fault',
(FAN_NUM_4_IDX, FAN_NODE_DIR_IDX_OF_MAP): 'fan4_direction',
(FAN_NUM_5_IDX, FAN_NODE_FAULT_IDX_OF_MAP): 'fan5_fault',
(FAN_NUM_5_IDX, FAN_NODE_DIR_IDX_OF_MAP): 'fan5_direction',
}
def _get_fan_to_device_node(self, fan_num, node_num):
return self._fan_to_device_node_mapping[(fan_num, node_num)]
def _get_fan_node_val(self, fan_num, node_num):
if fan_num < self.FAN_NUM_1_IDX or fan_num > self.FAN_NUM_ON_MAIN_BROAD:
logging.debug('GET. Parameter error. fan_num:%d', fan_num)
return None
if node_num < self.FAN_NODE_FAULT_IDX_OF_MAP or node_num > self.FAN_NODE_NUM_OF_MAP:
logging.debug('GET. Parameter error. node_num:%d', node_num)
return None
device_path = self.get_fan_to_device_path(fan_num, node_num)
try:
val_file = open(device_path, 'r')
except IOError as e:
logging.error('GET. unable to open file: %s', str(e))
return None
content = val_file.readline().rstrip()
if content == '':
logging.debug('GET. content is NULL. device_path:%s', device_path)
return None
try:
val_file.close()
except:
logging.debug('GET. unable to close file. device_path:%s', device_path)
return None
return int(content)
def _set_fan_node_val(self, fan_num, node_num, val):
if fan_num < self.FAN_NUM_1_IDX or fan_num > self.FAN_NUM_ON_MAIN_BROAD:
logging.debug('GET. Parameter error. fan_num:%d', fan_num)
return None
if node_num < self.FAN_NODE_FAULT_IDX_OF_MAP or node_num > self.FAN_NODE_NUM_OF_MAP:
logging.debug('GET. Parameter error. node_num:%d', node_num)
return None
content = str(val)
if content == '':
logging.debug('GET. content is NULL. device_path:%s', device_path)
return None
device_path = self.get_fan_to_device_path(fan_num, node_num)
try:
val_file = open(device_path, 'w')
except IOError as e:
logging.error('GET. unable to open file: %s', str(e))
return None
val_file.write(content)
try:
val_file.close()
except:
logging.debug('GET. unable to close file. device_path:%s', device_path)
return None
return True
def __init__(self):
fan_path = self.BASE_VAL_PATH
for fan_num in range(self.FAN_NUM_1_IDX, self.FAN_NUM_ON_MAIN_BROAD+1):
for node_num in range(self.FAN_NODE_FAULT_IDX_OF_MAP, self.FAN_NODE_NUM_OF_MAP+1):
self._fan_to_device_path_mapping[(fan_num, node_num)] = fan_path.format(
self._fan_to_device_node_mapping[(fan_num, node_num)])
def get_num_fans(self):
return self.FAN_NUM_ON_MAIN_BROAD
def get_idx_fan_start(self):
return self.FAN_NUM_1_IDX
def get_num_nodes(self):
return self.FAN_NODE_NUM_OF_MAP
def get_idx_node_start(self):
return self.FAN_NODE_FAULT_IDX_OF_MAP
def get_size_node_map(self):
return len(self._fan_to_device_node_mapping)
def get_size_path_map(self):
return len(self._fan_to_device_path_mapping)
def get_fan_to_device_path(self, fan_num, node_num):
return self._fan_to_device_path_mapping[(fan_num, node_num)]
def get_fan_fault(self, fan_num):
return self._get_fan_node_val(fan_num, self.FAN_NODE_FAULT_IDX_OF_MAP)
#def get_fan_speed(self, fan_num):
# return self._get_fan_node_val(fan_num, self.FAN_NODE_SPEED_IDX_OF_MAP)
def get_fan_dir(self, fan_num):
return self._get_fan_node_val(fan_num, self.FAN_NODE_DIR_IDX_OF_MAP)
def get_fan_duty_cycle(self):
#duty_path = self.FAN_DUTY_PATH
try:
val_file = open(self.FAN_DUTY_PATH)
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
content = val_file.readline().rstrip()
val_file.close()
return int(content)
def set_fan_duty_cycle(self, val):
try:
fan_file = open(self.FAN_DUTY_PATH, 'r+')
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
fan_file.write(str(val))
fan_file.close()
return True
#def get_fanr_fault(self, fan_num):
# return self._get_fan_node_val(fan_num, self.FANR_NODE_FAULT_IDX_OF_MAP)
def METHOD_NAME(self, fan_num):
return self._get_fan_node_val(fan_num, self.FANR_NODE_SPEED_IDX_OF_MAP)
def get_fan_status(self, fan_num):
if fan_num < self.FAN_NUM_1_IDX or fan_num > self.FAN_NUM_ON_MAIN_BROAD:
logging.debug('GET. Parameter error. fan_num, %d', fan_num)
return None
if self.get_fan_fault(fan_num) is not None and self.get_fan_fault(fan_num) > 0:
logging.debug('GET. FAN fault. fan_num, %d', fan_num)
return False
#if self.get_fanr_fault(fan_num) is not None and self.get_fanr_fault(fan_num) > 0:
# logging.debug('GET. FANR fault. fan_num, %d', fan_num)
# return False
return True
|
298,942 | test asgi headers | import json
import pytest
import httpx
from httpx import ASGITransport
async def hello_world(scope, receive, send):
status = 200
output = b"Hello, World!"
headers = [(b"content-type", "text/plain"), (b"content-length", str(len(output)))]
await send({"type": "http.response.start", "status": status, "headers": headers})
await send({"type": "http.response.body", "body": output})
async def echo_path(scope, receive, send):
status = 200
output = json.dumps({"path": scope["path"]}).encode("utf-8")
headers = [(b"content-type", "text/plain"), (b"content-length", str(len(output)))]
await send({"type": "http.response.start", "status": status, "headers": headers})
await send({"type": "http.response.body", "body": output})
async def echo_raw_path(scope, receive, send):
status = 200
output = json.dumps({"raw_path": scope["raw_path"].decode("ascii")}).encode("utf-8")
headers = [(b"content-type", "text/plain"), (b"content-length", str(len(output)))]
await send({"type": "http.response.start", "status": status, "headers": headers})
await send({"type": "http.response.body", "body": output})
async def echo_body(scope, receive, send):
status = 200
headers = [(b"content-type", "text/plain")]
await send({"type": "http.response.start", "status": status, "headers": headers})
more_body = True
while more_body:
message = await receive()
body = message.get("body", b"")
more_body = message.get("more_body", False)
await send({"type": "http.response.body", "body": body, "more_body": more_body})
async def echo_headers(scope, receive, send):
status = 200
output = json.dumps(
{"headers": [[k.decode(), v.decode()] for k, v in scope["headers"]]}
).encode("utf-8")
headers = [(b"content-type", "text/plain"), (b"content-length", str(len(output)))]
await send({"type": "http.response.start", "status": status, "headers": headers})
await send({"type": "http.response.body", "body": output})
async def raise_exc(scope, receive, send):
raise RuntimeError()
async def raise_exc_after_response(scope, receive, send):
status = 200
output = b"Hello, World!"
headers = [(b"content-type", "text/plain"), (b"content-length", str(len(output)))]
await send({"type": "http.response.start", "status": status, "headers": headers})
await send({"type": "http.response.body", "body": output})
raise RuntimeError()
@pytest.mark.anyio
async def test_asgi_transport():
async with httpx.ASGITransport(app=hello_world) as transport:
request = httpx.Request("GET", "http://www.example.com/")
response = await transport.handle_async_request(request)
await response.aread()
assert response.status_code == 200
assert response.content == b"Hello, World!"
@pytest.mark.anyio
async def test_asgi_transport_no_body():
async with httpx.ASGITransport(app=echo_body) as transport:
request = httpx.Request("GET", "http://www.example.com/")
response = await transport.handle_async_request(request)
await response.aread()
assert response.status_code == 200
assert response.content == b""
@pytest.mark.anyio
async def test_asgi():
async with httpx.AsyncClient(app=hello_world) as client:
response = await client.get("http://www.example.org/")
assert response.status_code == 200
assert response.text == "Hello, World!"
@pytest.mark.anyio
async def test_asgi_urlencoded_path():
async with httpx.AsyncClient(app=echo_path) as client:
url = httpx.URL("http://www.example.org/").copy_with(path="/user@example.org")
response = await client.get(url)
assert response.status_code == 200
assert response.json() == {"path": "/user@example.org"}
@pytest.mark.anyio
async def test_asgi_raw_path():
async with httpx.AsyncClient(app=echo_raw_path) as client:
url = httpx.URL("http://www.example.org/").copy_with(path="/user@example.org")
response = await client.get(url)
assert response.status_code == 200
assert response.json() == {"raw_path": "/user@example.org"}
@pytest.mark.anyio
async def test_asgi_upload():
async with httpx.AsyncClient(app=echo_body) as client:
response = await client.post("http://www.example.org/", content=b"example")
assert response.status_code == 200
assert response.text == "example"
@pytest.mark.anyio
async def METHOD_NAME():
async with httpx.AsyncClient(app=echo_headers) as client:
response = await client.get("http://www.example.org/")
assert response.status_code == 200
assert response.json() == {
"headers": [
["host", "www.example.org"],
["accept", "*/*"],
["accept-encoding", "gzip, deflate, br"],
["connection", "keep-alive"],
["user-agent", f"python-httpx/{httpx.__version__}"],
]
}
@pytest.mark.anyio
async def test_asgi_exc():
async with httpx.AsyncClient(app=raise_exc) as client:
with pytest.raises(RuntimeError):
await client.get("http://www.example.org/")
@pytest.mark.anyio
async def test_asgi_exc_after_response():
async with httpx.AsyncClient(app=raise_exc_after_response) as client:
with pytest.raises(RuntimeError):
await client.get("http://www.example.org/")
@pytest.mark.anyio
async def test_asgi_disconnect_after_response_complete():
disconnect = False
async def read_body(scope, receive, send):
nonlocal disconnect
status = 200
headers = [(b"content-type", "text/plain")]
await send(
{"type": "http.response.start", "status": status, "headers": headers}
)
more_body = True
while more_body:
message = await receive()
more_body = message.get("more_body", False)
await send({"type": "http.response.body", "body": b"", "more_body": False})
# The ASGI spec says of the Disconnect message:
# "Sent to the application when a HTTP connection is closed or if receive is
# called after a response has been sent."
# So if receive() is called again, the disconnect message should be received
message = await receive()
disconnect = message.get("type") == "http.disconnect"
async with httpx.AsyncClient(app=read_body) as client:
response = await client.post("http://www.example.org/", content=b"example")
assert response.status_code == 200
assert disconnect
@pytest.mark.anyio
async def test_asgi_exc_no_raise():
transport = ASGITransport(app=raise_exc, raise_app_exceptions=False)
async with httpx.AsyncClient(transport=transport) as client:
response = await client.get("http://www.example.org/")
assert response.status_code == 500 |
298,943 | test write | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import random
import sqlite3
import string
import tempfile
from datetime import datetime
from typing import Dict
from unittest.mock import MagicMock
import pytest
from airbyte_cdk.models import (
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
ConfiguredAirbyteStream,
DestinationSyncMode,
Status,
SyncMode,
Type,
)
from destination_sqlite import DestinationSqlite
@pytest.fixture(autouse=True)
def disable_destination_modification(monkeypatch, request):
if "disable_autouse" in request.keywords:
return
else:
monkeypatch.setattr(DestinationSqlite, "_get_destination_path", lambda _, x: x)
@pytest.fixture(scope="module")
def local_file_config() -> Dict[str, str]:
path = tempfile.NamedTemporaryFile()
yield {"destination_path": path.name}
path.close()
@pytest.fixture(scope="module")
def test_table_name() -> str:
letters = string.ascii_lowercase
rand_string = "".join(random.choice(letters) for _ in range(10))
return f"airbyte_integration_{rand_string}"
@pytest.fixture
def table_schema() -> str:
schema = {"type": "object", "properties": {"column1": {"type": ["null", "string"]}}}
return schema
@pytest.fixture
def configured_catalogue(test_table_name: str, table_schema: str) -> ConfiguredAirbyteCatalog:
append_stream = ConfiguredAirbyteStream(
stream=AirbyteStream(name=test_table_name, json_schema=table_schema, supported_sync_modes=[SyncMode.incremental]),
sync_mode=SyncMode.incremental,
destination_sync_mode=DestinationSyncMode.append,
)
return ConfiguredAirbyteCatalog(streams=[append_stream])
@pytest.fixture
def invalid_config() -> Dict[str, str]:
return {"destination_path": "/sqlite.db"}
@pytest.fixture
def airbyte_message1(test_table_name: str):
return AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
stream=test_table_name, data={"key1": "value1", "key2": 3}, emitted_at=int(datetime.now().timestamp()) * 1000
),
)
@pytest.fixture
def airbyte_message2(test_table_name: str):
return AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
stream=test_table_name, data={"key1": "value2", "key2": 2}, emitted_at=int(datetime.now().timestamp()) * 1000
),
)
@pytest.mark.parametrize("config", ["invalid_config"])
@pytest.mark.disable_autouse
def test_check_fails(config, request):
config = request.getfixturevalue(config)
destination = DestinationSqlite()
status = destination.check(logger=MagicMock(), config=config)
assert status.status == Status.FAILED
@pytest.mark.parametrize("config", ["local_file_config"])
def test_check_succeeds(config, request):
config = request.getfixturevalue(config)
destination = DestinationSqlite()
status = destination.check(logger=MagicMock(), config=config)
assert status.status == Status.SUCCEEDED
@pytest.mark.parametrize("config", ["local_file_config"])
def METHOD_NAME(
config: Dict[str, str],
request,
configured_catalogue: ConfiguredAirbyteCatalog,
airbyte_message1: AirbyteMessage,
airbyte_message2: AirbyteMessage,
test_table_name: str,
):
config = request.getfixturevalue(config)
destination = DestinationSqlite()
generator = destination.write(
config=config, configured_catalog=configured_catalogue, input_messages=[airbyte_message1, airbyte_message2]
)
result = list(generator)
assert len(result) == 0
con = sqlite3.connect(config.get("destination_path"))
with con:
cursor = con.execute(
f"SELECT _airbyte_ab_id, _airbyte_emitted_at, _airbyte_data FROM _airbyte_raw_{test_table_name} ORDER BY _airbyte_data"
)
result = cursor.fetchall()
assert len(result) == 2
assert result[0][2] == json.dumps(airbyte_message1.record.data)
assert result[1][2] == json.dumps(airbyte_message2.record.data) |
298,944 | get input buffer declarations | # Buffers allocated on the heap
from bgenOutput import *
from bgenType import OutputOnlyMixIn
from bgenBuffer import FixedInputOutputBufferType
class HeapInputOutputBufferType(FixedInputOutputBufferType):
"""Input-output buffer allocated on the heap -- passed as (inbuffer, outbuffer, size).
Instantiate without parameters.
Call from Python with input buffer.
"""
def __init__(self, datatype = 'char', sizetype = 'int', sizeformat = None):
FixedInputOutputBufferType.__init__(self, "0", datatype, sizetype, sizeformat)
def getOutputBufferDeclarations(self, name, constmode=False, outmode=False):
if constmode:
raise RuntimeError, "Cannot use const output buffer"
if outmode:
out = "*"
else:
out = ""
return ["%s%s *%s__out__" % (self.datatype, out, name)]
def getargsCheck(self, name):
Output("if ((%s__out__ = malloc(%s__in_len__)) == NULL)", name, name)
OutLbrace()
Output('PyErr_NoMemory();')
Output("goto %s__error__;", name)
self.label_needed = 1
OutRbrace()
Output("%s__len__ = %s__in_len__;", name, name)
def passOutput(self, name):
return "%s__in__, %s__out__, (%s)%s__len__" % \
(name, name, self.sizetype, name)
def mkvalueArgs(self, name):
return "%s__out__, (int)%s__len__" % (name, name)
def cleanup(self, name):
Output("free(%s__out__);", name)
FixedInputOutputBufferType.cleanup(self, name)
class VarHeapInputOutputBufferType(HeapInputOutputBufferType):
"""same as base class, but passed as (inbuffer, outbuffer, &size)"""
def passOutput(self, name):
return "%s__in__, %s__out__, &%s__len__" % (name, name, name)
class HeapCombinedInputOutputBufferType(HeapInputOutputBufferType):
"""same as base class, but passed as (inoutbuffer, size)"""
def passOutput(self, name):
return "(%s *)memcpy(%s__out__, %s__in__, %s__len__)" % \
(self.datatype, name, name, name)
class VarHeapCombinedInputOutputBufferType(HeapInputOutputBufferType):
"""same as base class, but passed as (inoutbuffer, &size)"""
def passOutput(self, name):
return "(%s *)memcpy(%s__out__, %s__in__, &%s__len__)" % \
(self.datatype, name, name, name)
class HeapOutputBufferType(OutputOnlyMixIn, HeapInputOutputBufferType):
"""Output buffer allocated on the heap -- passed as (buffer, size).
Instantiate without parameters.
Call from Python with buffer size.
"""
def METHOD_NAME(self, name, constmode=False):
return []
def getargsFormat(self):
return "i"
def getargsArgs(self, name):
return "&%s__in_len__" % name
def passOutput(self, name):
return "%s__out__, %s__len__" % (name, name)
class VarHeapOutputBufferType(HeapOutputBufferType):
"""Output buffer allocated on the heap -- passed as (buffer, &size).
Instantiate without parameters.
Call from Python with buffer size.
"""
def passOutput(self, name):
return "%s__out__, &%s__len__" % (name, name)
class VarVarHeapOutputBufferType(VarHeapOutputBufferType):
"""Output buffer allocated on the heap -- passed as (buffer, size, &size).
Instantiate without parameters.
Call from Python with buffer size.
"""
def passOutput(self, name):
return "%s__out__, %s__len__, &%s__len__" % (name, name, name)
class MallocHeapOutputBufferType(HeapOutputBufferType):
"""Output buffer allocated by the called function -- passed as (&buffer, &size).
Instantiate without parameters.
Call from Python without parameters.
"""
def getargsCheck(self, name):
Output("%s__out__ = NULL;", name)
def getAuxDeclarations(self, name):
return []
def passOutput(self, name):
return "&%s__out__, &%s__len__" % (name, name)
def getargsFormat(self):
return ""
def getargsArgs(self, name):
return None
def mkvalueFormat(self):
return "z#"
def cleanup(self, name):
Output("if( %s__out__ ) free(%s__out__);", name, name) |
298,945 | get config | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
from dynaconf import Dynaconf
from iotdb.mlnode.constant import (MLNODE_CONF_DIRECTORY_NAME,
MLNODE_CONF_FILE_NAME,
MLNODE_MODEL_STORAGE_DIRECTORY_NAME)
from iotdb.mlnode.exception import BadNodeUrlError
from iotdb.mlnode.log import logger
from iotdb.mlnode.util import parse_endpoint_url
from iotdb.thrift.common.ttypes import TEndPoint
class MLNodeConfig(object):
def __init__(self):
# Used for connection of DataNode/ConfigNode clients
self.__mn_rpc_address: str = "127.0.0.1"
self.__mn_rpc_port: int = 10810
# Directory to save models
self.__mn_model_storage_dir = MLNODE_MODEL_STORAGE_DIRECTORY_NAME
# Cache number of model storage to avoid repeated loading
self.__mn_model_storage_cache_size = 30
# Target ConfigNode to be connected by MLNode
self.__mn_target_config_node: TEndPoint = TEndPoint("127.0.0.1", 10710)
# Target DataNode to be connected by MLNode
self.__mn_target_data_node: TEndPoint = TEndPoint("127.0.0.1", 10780)
def get_mn_rpc_address(self) -> str:
return self.__mn_rpc_address
def set_mn_rpc_address(self, mn_rpc_address: str) -> None:
self.__mn_rpc_address = mn_rpc_address
def get_mn_rpc_port(self) -> int:
return self.__mn_rpc_port
def set_mn_rpc_port(self, mn_rpc_port: int) -> None:
self.__mn_rpc_port = mn_rpc_port
def get_mn_model_storage_dir(self) -> str:
return self.__mn_model_storage_dir
def set_mn_model_storage_dir(self, mn_model_storage_dir: str) -> None:
self.__mn_model_storage_dir = mn_model_storage_dir
def get_mn_model_storage_cache_size(self) -> int:
return self.__mn_model_storage_cache_size
def set_mn_model_storage_cache_size(self, mn_model_storage_cache_size: int) -> None:
self.__mn_model_storage_cache_size = mn_model_storage_cache_size
def get_mn_target_config_node(self) -> TEndPoint:
return self.__mn_target_config_node
def set_mn_target_config_node(self, mn_target_config_node: str) -> None:
self.__mn_target_config_node = parse_endpoint_url(mn_target_config_node)
def get_mn_target_data_node(self) -> TEndPoint:
return self.__mn_target_data_node
def set_mn_target_data_node(self, mn_target_data_node: str) -> None:
self.__mn_target_data_node = parse_endpoint_url(mn_target_data_node)
class MLNodeDescriptor(object):
def __init__(self):
self.__config = MLNodeConfig()
def load_config_from_file(self) -> None:
conf_file = os.path.join(os.getcwd(), MLNODE_CONF_DIRECTORY_NAME, MLNODE_CONF_FILE_NAME)
if not os.path.exists(conf_file):
logger.info("Cannot find MLNode config file '{}', use default configuration.".format(conf_file))
return
logger.info("Start to read MLNode config file '{}'...".format(conf_file))
# noinspection PyBroadException
try:
file_configs = Dynaconf(
envvar_prefix="DYNACONF",
settings_files=[conf_file],
)
if file_configs.mn_rpc_address is not None:
self.__config.set_mn_rpc_address(file_configs.mn_rpc_address)
if file_configs.mn_rpc_port is not None:
self.__config.set_mn_rpc_port(file_configs.mn_rpc_port)
if file_configs.mn_model_storage_dir is not None:
self.__config.set_mn_model_storage_dir(file_configs.mn_model_storage_dir)
if file_configs.mn_model_storage_cache_size is not None:
self.__config.set_mn_model_storage_cache_size(file_configs.mn_model_storage_cache_size)
if file_configs.mn_target_config_node is not None:
self.__config.set_mn_target_config_node(file_configs.mn_target_config_node)
if file_configs.mn_target_data_node is not None:
self.__config.set_mn_target_data_node(file_configs.mn_target_data_node)
except BadNodeUrlError:
logger.warn("Cannot load MLNode conf file, use default configuration.")
except Exception as e:
logger.warn("Cannot load MLNode conf file, use default configuration. {}".format(e))
def METHOD_NAME(self) -> MLNodeConfig:
return self.__config
# initialize a singleton
descriptor = MLNodeDescriptor() |
298,946 | test da to dict | import pytest
from pathlib import Path
import xarray as xr
import numpy as np
import pandas as pd
from pysd.tools.ncfiles import NCFile
@pytest.fixture(scope="session")
def sample_dataset():
# generation of data (slight modification from xarray.Dataset example)
np.random.seed(0)
temperature = 15 + 8 * np.random.randn(3, 2, 2)
precipitation = 10 * np.random.rand(3, 2, 2)
altitude = 1000 * np.random.rand(2, 2)
lon = [[-99.83, -99.32], [-99.79, -99.23]]
lat = [[42.25, 42.21], [42.63, 42.59]]
time = pd.date_range("2014-09-06", periods=3)
ds = xr.Dataset(
data_vars=dict(
temperature=(
["time", "x", "y"], temperature),
precipitation=(
["time", "x", "y"], precipitation),
altitude=(["x", "y"], altitude),
non_dimensional_var=([], np.array(5)),
time=(["time"], time)
),
coords=dict(
lon=(["x", "y"], lon),
lat=(["x", "y"], lat),
),
attrs=dict(description="Weather related data."),
)
return ds
@pytest.fixture(scope="session")
def nc_file(sample_dataset, tmp_path_factory):
path = tmp_path_factory.mktemp("data") / "dataset.nc"
sample_dataset.to_netcdf(
path, mode="w", format="NETCDF4", engine="netcdf4")
return path
class TestNCFile():
@pytest.mark.parametrize(
"file_path,raises,expect",
[(1, True, TypeError),
("unexisting_file.nc", True, FileNotFoundError),
(Path(__file__), True, ValueError),
("nc_file", False, "nc_file")
],
ids=["Wrong type", "Not found", "Wrong extension", "Correct path"]
)
def test__validate_nc_path_errors(
self, request, file_path, raises, expect):
if not raises:
nc_file = request.getfixturevalue(file_path)
assert NCFile._validate_nc_path(nc_file) == nc_file
else:
with pytest.raises(expect):
NCFile._validate_nc_path(file_path)
@pytest.mark.parametrize(
"subset,raises,expected",
[
([], False, [
"temperature",
"precipitation",
"altitude",
"non_dimensional_var",
"time"
]
),
(["temperature"], False, ["temperature", "time"]),
(["non_existing"], True, ValueError),
("I am not a list", True, TypeError)
],
ids=["empty", "one var", "non existing single var", "wrong type"]
)
@pytest.mark.filterwarnings("ignore")
def test__validate_ds_subset(
self, sample_dataset, subset, raises, expected):
if raises:
with pytest.raises(expected):
NCFile._validate_ds_subset(sample_dataset, subset)
else:
assert NCFile._validate_ds_subset(
sample_dataset, subset) == expected
@pytest.mark.parametrize(
"data_var,dims,coords,expected",
[
("temperature", ["x", "y"], (0,0), ("temperature[0,0]", np.array(
[29.11241876774131 ,29.94046392119974, 14.174249185651536]))
),
("altitude", ["x", "y"], (1,1), ("altitude[1,1]", np.array(
944.66891705))
),
],
ids = ["time-dependent data var", # returns vector
"constant data var"] # returns scalar
)
def test__index_da_by_coord_labels(
self, sample_dataset, data_var, dims, coords, expected):
da = sample_dataset.data_vars[data_var]
result = NCFile._index_da_by_coord_labels(
da, dims, coords)
assert result[0] == expected[0]
assert np.allclose(result[1], expected[1])
@pytest.mark.parametrize(
"data_var,expected_keys",
[
("temperature",
[
'temperature[0,0]',
'temperature[0,1]',
'temperature[1,0]',
'temperature[1,1]'
]),
("altitude",
[
'altitude[0,0]',
'altitude[0,1]',
'altitude[1,0]',
'altitude[1,1]'
]),
],
ids = ["time-dependent data var", "constant data var"]
)
def METHOD_NAME(self, sample_dataset, data_var, expected_keys):
idx = "time"
da = sample_dataset.data_vars[data_var]
serial = NCFile.da_to_dict(da, index_dim=idx)
assert all(map(lambda x: x in serial.keys(), expected_keys))
delayed = NCFile.da_to_dict_delayed(da, index_dim=idx)
assert all(
map(lambda key: np.allclose(serial[key], delayed[key]),
expected_keys
)
)
@pytest.mark.parametrize(
"d,raises,expected",
[
({"time": [1, 2, 3], "col1": 4, "col2": [2, 3, 4]}, False,
[[1, 2, 3], [4]*3]
),
({"col1": 4, "col2": [2, 3, 4]}, True, "irrelevant")
],
ids=["ok", "missing time"])
def test_dict_to_df(self, d, raises, expected):
if raises:
with pytest.raises(KeyError):
NCFile.dict_to_df(d)
else:
df = NCFile.dict_to_df(d)
assert all(df.index == expected[0])
assert all(df["col1"] == expected[1])
def test_df_to_text_file_errors(self, nc_file):
with pytest.raises(TypeError):
NCFile.df_to_text_file(None, str(nc_file), time_in_row=False)
csv_path = nc_file.parent / (nc_file.name + '.csv')
with pytest.raises(ValueError):
NCFile.df_to_text_file(None, csv_path, time_in_row="False")
@pytest.mark.parametrize(
"outfmt,time_in_row,parallel", [
(".csv", True, True),
(".csv", True, False),
(".csv", False, True),
(".csv", False, False),
(".tab", True, True),
(".tab", True, False),
(".tab", False, True),
(".tab", False, False)
])
def test_to_text_file(
self, shared_tmpdir, nc_file, outfmt, time_in_row, parallel):
obj = NCFile(nc_file, parallel=parallel)
assert obj.ncfile == nc_file
assert isinstance(obj.ds, xr.Dataset)
outfile = shared_tmpdir / f"data_from_nc{outfmt}"
obj.to_text_file(outfile, time_in_row=time_in_row)
assert outfile.is_file()
def test_ds_to_df(self, sample_dataset):
df = NCFile.ds_to_df(sample_dataset, subset=["temperature"])
assert all(map(lambda x: x.startswith("temperature"),df.columns))
assert df.shape == (3, 4) |
298,947 | schedule injective from existing | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""x86 declaration and schedules."""
from tvm import te
from tvm.topi import tag
from tvm.tir import IntImm
from tvm.topi.generic.injective import (
METHOD_NAME as schedule_injective_for_concat,
)
from ..utils import is_empty_shape
def METHOD_NAME(sch, out):
"""Schedule for injective op from existing schedule.
Parameters
----------
sch: Schedule
The schedule to update.
out: Tensor
The tensor representing the injective op.
Returns
-------
sch: Schedule
The updated schedule.
"""
if len(sch[out].op.axis) >= 5:
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1], sch[out].op.axis[2])
sch[out].parallel(fused)
elif len(sch[out].op.axis) >= 3:
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1])
sch[out].parallel(fused)
elif len(sch[out].op.axis) >= 1:
sch[out].parallel(sch[out].op.axis[0])
# Vectorize the inner most for loop. Tiling first to get a const extent
if len(sch[out].op.axis) >= 1:
l = sch[out].op.axis[-1]
lo, li = sch[out].split(l, factor=16)
sch[out].vectorize(li)
# for 1D loop, the above split will break the parallel axis
# Need to make the outer loop parallel again
if len(sch[out].op.axis) == 1:
sch[out].parallel(lo)
return sch
def schedule_injective(outs):
"""X86 schedule for injective op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
te.schedule.AutoInlineInjective(s)
for x in outs:
if not is_empty_shape(x.shape):
METHOD_NAME(s, x)
return s
def schedule_concatenate(outs):
"""X86 schedule for concatenate op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
def vectorize(sch, tensor, vectorize_limit):
"""Internal vectorization function for concatenate."""
inner_axis = s[tensor].op.axis[len(s[tensor].op.axis) - 1]
# Check that the tensor shape is static. Otherwise skip vectorization.
if isinstance(tensor.shape[len(tensor.shape) - 1], IntImm):
inner_length = tensor.shape[len(tensor.shape) - 1].value
if inner_length <= vectorize_limit:
sch[tensor].vectorize(inner_axis)
else:
split_factor = 1
for i in range(vectorize_limit, 1, -1):
if inner_length % i == 0:
split_factor = i
break
if split_factor > 1:
_, inner_i = sch[tensor].split(inner_axis, split_factor)
sch[tensor].vectorize(inner_i)
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
x = outs[0]
s = te.create_schedule([x.op for x in outs])
te.schedule.AutoInlineInjective(s)
if len(s[x].op.axis) >= 5:
fused = s[x].fuse(s[x].op.axis[0], s[x].op.axis[1], s[x].op.axis[2])
vectorize(s, x, 64)
s[x].parallel(fused)
elif len(s[x].op.axis) >= 3:
fused = s[x].fuse(s[x].op.axis[0], s[x].op.axis[1])
s[x].parallel(fused)
else:
s[x].parallel(s[x].op.axis[0])
return s
def schedule_concatenate_cpu(outs):
"""X86 schedule for concatenate op.
Parameters
----------
outs: Array of Tensor
The computation graph description in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
if tag.is_injective(op.tag):
schedule_injective_for_concat(s, op.output(0))
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
scheduled_ops.append(op)
for out in outs:
traverse(out.op)
return s
schedule_elemwise = schedule_injective
schedule_broadcast = schedule_injective |
298,948 | get events | from collections import defaultdict
from datetime import datetime
from typing import Dict, List, Any, Optional
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.schema import (
CBEvent,
CBEventType,
EventStats,
TIMESTAMP_FORMAT,
BASE_TRACE_EVENT,
)
class LlamaDebugHandler(BaseCallbackHandler):
"""Callback handler that keeps track of debug info.
NOTE: this is a beta feature. The usage within our codebase, and the interface
may change.
This handler simply keeps track of event starts/ends, separated by event types.
You can use this callback handler to keep track of and debug events.
Args:
event_starts_to_ignore (Optional[List[CBEventType]]): list of event types to
ignore when tracking event starts.
event_ends_to_ignore (Optional[List[CBEventType]]): list of event types to
ignore when tracking event ends.
"""
def __init__(
self,
event_starts_to_ignore: Optional[List[CBEventType]] = None,
event_ends_to_ignore: Optional[List[CBEventType]] = None,
print_trace_on_end: bool = True,
) -> None:
"""Initialize the llama debug handler."""
self._event_pairs_by_type: Dict[CBEventType, List[CBEvent]] = defaultdict(list)
self._event_pairs_by_id: Dict[str, List[CBEvent]] = defaultdict(list)
self._sequential_events: List[CBEvent] = []
self._cur_trace_id: Optional[str] = None
self._trace_map: Dict[str, List[str]] = defaultdict(list)
self.print_trace_on_end = print_trace_on_end
event_starts_to_ignore = (
event_starts_to_ignore if event_starts_to_ignore else []
)
event_ends_to_ignore = event_ends_to_ignore if event_ends_to_ignore else []
super().__init__(
event_starts_to_ignore=event_starts_to_ignore,
event_ends_to_ignore=event_ends_to_ignore,
)
def on_event_start(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
**kwargs: Any,
) -> str:
"""Store event start data by event type.
Args:
event_type (CBEventType): event type to store.
payload (Optional[Dict[str, Any]]): payload to store.
event_id (str): event id to store.
"""
event = CBEvent(event_type, payload=payload, id_=event_id)
self._event_pairs_by_type[event.event_type].append(event)
self._event_pairs_by_id[event.id_].append(event)
self._sequential_events.append(event)
return event.id_
def on_event_end(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
**kwargs: Any,
) -> None:
"""Store event end data by event type.
Args:
event_type (CBEventType): event type to store.
payload (Optional[Dict[str, Any]]): payload to store.
event_id (str): event id to store.
"""
event = CBEvent(event_type, payload=payload, id_=event_id)
self._event_pairs_by_type[event.event_type].append(event)
self._event_pairs_by_id[event.id_].append(event)
self._sequential_events.append(event)
self._trace_map = defaultdict(list)
def METHOD_NAME(self, event_type: Optional[CBEventType] = None) -> List[CBEvent]:
"""Get all events for a specific event type."""
if event_type is not None:
return self._event_pairs_by_type[event_type]
return self._sequential_events
def _get_event_pairs(self, events: List[CBEvent]) -> List[List[CBEvent]]:
"""Helper function to pair events according to their ID."""
event_pairs: Dict[str, List[CBEvent]] = defaultdict(list)
for event in events:
event_pairs[event.id_].append(event)
sorted_events = sorted(
event_pairs.values(),
key=lambda x: datetime.strptime(x[0].time, TIMESTAMP_FORMAT),
)
return sorted_events
def _get_time_stats_from_event_pairs(
self, event_pairs: List[List[CBEvent]]
) -> EventStats:
"""Calculate time-based stats for a set of event pairs"""
total_secs = 0.0
for event_pair in event_pairs:
start_time = datetime.strptime(event_pair[0].time, TIMESTAMP_FORMAT)
end_time = datetime.strptime(event_pair[-1].time, TIMESTAMP_FORMAT)
total_secs += (end_time - start_time).total_seconds()
stats = EventStats(
total_secs=total_secs,
average_secs=total_secs / len(event_pairs),
total_count=len(event_pairs),
)
return stats
def get_event_pairs(
self, event_type: Optional[CBEventType] = None
) -> List[List[CBEvent]]:
"""Pair events by ID, either all events or a sepcific type."""
if event_type is not None:
return self._get_event_pairs(self._event_pairs_by_type[event_type])
return self._get_event_pairs(self._sequential_events)
def get_llm_inputs_outputs(self) -> List[List[CBEvent]]:
"""Get the exact LLM inputs and outputs."""
return self._get_event_pairs(self._event_pairs_by_type[CBEventType.LLM])
def get_event_time_info(
self, event_type: Optional[CBEventType] = None
) -> EventStats:
event_pairs = self.get_event_pairs(event_type)
return self._get_time_stats_from_event_pairs(event_pairs)
def flush_event_logs(self) -> None:
"""Clear all events from memory."""
self._event_pairs_by_type = defaultdict(list)
self._event_pairs_by_id = defaultdict(list)
self._sequential_events = []
def start_trace(self, trace_id: Optional[str] = None) -> None:
"""Launch a trace."""
self._trace_map = defaultdict(list)
self._cur_trace_id = trace_id
def end_trace(
self,
trace_id: Optional[str] = None,
trace_map: Optional[Dict[str, List[str]]] = None,
) -> None:
"""Shutdown the current trace."""
self._trace_map = trace_map or defaultdict(list)
if self.print_trace_on_end:
self.print_trace_map()
def _print_trace_map(self, cur_event_id: str, level: int = 0) -> None:
"""Recursively print trace map to terminal for debugging."""
event_pair = self._event_pairs_by_id[cur_event_id]
if event_pair:
time_stats = self._get_time_stats_from_event_pairs([event_pair])
indent = " " * level * 2
print(
f"{indent}|_{event_pair[0].event_type} -> ",
f"{time_stats.total_secs} seconds",
flush=True,
)
child_event_ids = self._trace_map[cur_event_id]
for child_event_id in child_event_ids:
self._print_trace_map(child_event_id, level=level + 1)
def print_trace_map(self) -> None:
"""Print simple trace map to terminal for debugging of the most recent trace."""
print("*" * 10, flush=True)
print(f"Trace: {self._cur_trace_id}", flush=True)
self._print_trace_map(BASE_TRACE_EVENT, level=1)
print("*" * 10, flush=True)
@property
def event_pairs_by_type(self) -> Dict[CBEventType, List[CBEvent]]:
return self._event_pairs_by_type
@property
def events_pairs_by_id(self) -> Dict[str, List[CBEvent]]:
return self._event_pairs_by_id
@property
def sequential_events(self) -> List[CBEvent]:
return self._sequential_events |
298,949 | create loss | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
from net import DSIN_layer
class StaticModel():
def __init__(self, config):
self.cost = None
self.config = config
self._init_hyper_parameters()
def _init_hyper_parameters(self):
self.user_size = self.config.get("hyper_parameters.user_size")
self.cms_segid_size = self.config.get(
"hyper_parameters.cms_segid_size")
self.cms_group_size = self.config.get(
"hyper_parameters.cms_group_size")
self.final_gender_size = self.config.get(
"hyper_parameters.final_gender_size")
self.age_level_size = self.config.get(
"hyper_parameters.age_level_size")
self.pvalue_level_size = self.config.get(
"hyper_parameters.pvalue_level_size")
self.shopping_level_size = self.config.get(
"hyper_parameters.shopping_level_size")
self.occupation_size = self.config.get(
"hyper_parameters.occupation_size")
self.new_user_class_level_size = self.config.get(
"hyper_parameters.new_user_class_level_size")
self.adgroup_size = self.config.get("hyper_parameters.adgroup_size")
self.cate_size = self.config.get("hyper_parameters.cate_size")
self.campaign_size = self.config.get("hyper_parameters.campaign_size")
self.customer_size = self.config.get("hyper_parameters.customer_size")
self.brand_size = self.config.get("hyper_parameters.brand_size")
self.pid_size = self.config.get("hyper_parameters.pid_size")
self.feat_embed_size = self.config.get(
"hyper_parameters.feat_embed_size")
self.learning_rate = self.config.get(
"hyper_parameters.optimizer.learning_rate", 0.008)
def METHOD_NAME(self, pred, label):
return paddle.nn.BCELoss()(pred, label)
def create_feeds(self, is_infer=False):
sparse_input = paddle.static.data(
name="sparse_tensor", shape=[None, 15], dtype="int64")
dense_input = paddle.static.data(
name="dense_tensor", shape=[None, ], dtype="float32")
sess_input = paddle.static.data(
name="sess_tensor", shape=[None, 10, 10], dtype="int64")
sess_length_input = paddle.static.data(
name="sess_length_tensor", shape=[None, ], dtype="int64")
label = paddle.static.data(name="label", shape=[None, ], dtype="int64")
feed_list = [
sparse_input, dense_input, sess_input, sess_length_input, label
]
#input = paddle.static.data(
# name="sparse_tensor", shape=[None, 4], dtype="float32")
#feed_list = [input]
return feed_list
def net(self, input, is_infer=False):
inputs, label = (input[0], input[1], input[2], input[3]), input[-1]
label = label.reshape([-1, 1])
DSIN_model = DSIN_layer(
self.user_size,
self.adgroup_size,
self.pid_size,
self.cms_segid_size,
self.cms_group_size,
self.final_gender_size,
self.age_level_size,
self.pvalue_level_size,
self.shopping_level_size,
self.occupation_size,
self.new_user_class_level_size,
self.campaign_size,
self.customer_size,
self.cate_size,
self.brand_size,
sparse_embed_size=self.feat_embed_size,
l2_reg_embedding=1e-6)
pred = DSIN_model.forward(inputs)
loss = self.METHOD_NAME(pred, paddle.cast(label, "float32"))
predict_2d = paddle.concat(x=[1 - pred, pred], axis=1)
auc, batch_auc, _ = paddle.static.auc(input=predict_2d,
label=label,
num_thresholds=2**12,
slide_steps=20)
auc = paddle.cast(auc, "float32")
if is_infer:
fetch_dict = {"auc": auc}
return fetch_dict
self._cost = loss
fetch_dict = {'auc': auc, 'cost': loss}
return fetch_dict
def create_optimizer(self, strategy=None):
optimizer = paddle.optimizer.Adam(
learning_rate=self.learning_rate, lazy_mode=False)
if strategy != None:
import paddle.distributed.fleet as fleet
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(self._cost)
def infer_net(self, input):
return self.net(input, is_infer=True) |
298,950 | get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_location_request(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-01-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages"
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"location": _SERIALIZER.url("location", location, "str"),
}
_url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class UsagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2021_01_01.StorageManagementClient`'s
:attr:`usages` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list_by_location(self, location: str, **kwargs: Any) -> Iterable["_models.Usage"]:
"""Gets the current usage count and the limit for the resources of the location under the
subscription.
:param location: The location of the Azure Storage resource. Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Usage or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_01_01.models.Usage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2021-01-01"))
cls: ClsType[_models.UsageListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_location.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("UsageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(METHOD_NAME, extract_data)
list_by_location.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages"
} |
298,951 | run id | import logging
import os
import time
from datetime import datetime
from shlex import quote, split
from threading import RLock
import kubernetes
from kubernetes.config.config_exception import ConfigException
import docker
from mlflow.entities import RunStatus
from mlflow.exceptions import ExecutionException
from mlflow.projects.submitted_run import SubmittedRun
_logger = logging.getLogger(__name__)
_DOCKER_API_TIMEOUT = 300
def push_image_to_registry(image_tag):
client = docker.from_env(timeout=_DOCKER_API_TIMEOUT)
_logger.info("=== Pushing docker image %s ===", image_tag)
for line in client.images.push(repository=image_tag, stream=True, decode=True):
if "error" in line and line["error"]:
raise ExecutionException(
"Error while pushing to docker registry: {error}".format(error=line["error"])
)
return client.images.get_registry_data(image_tag).id
def _get_kubernetes_job_definition(
project_name, image_tag, image_digest, command, env_vars, job_template
):
container_image = image_tag + "@" + image_digest
timestamp = datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f")
job_name = f"{project_name}-{timestamp}"
_logger.info("=== Creating Job %s ===", job_name)
if os.environ.get("KUBE_MLFLOW_TRACKING_URI") is not None:
env_vars["MLFLOW_TRACKING_URI"] = os.environ["KUBE_MLFLOW_TRACKING_URI"]
environment_variables = [{"name": k, "value": v} for k, v in env_vars.items()]
job_template["metadata"]["name"] = job_name
job_template["spec"]["template"]["spec"]["containers"][0]["name"] = project_name
job_template["spec"]["template"]["spec"]["containers"][0]["image"] = container_image
job_template["spec"]["template"]["spec"]["containers"][0]["command"] = command
if "env" not in job_template["spec"]["template"]["spec"]["containers"][0].keys():
job_template["spec"]["template"]["spec"]["containers"][0]["env"] = []
job_template["spec"]["template"]["spec"]["containers"][0]["env"] += environment_variables
return job_template
def _get_run_command(entrypoint_command):
formatted_command = []
for cmd in entrypoint_command:
formatted_command.extend([quote(s) for s in split(cmd)])
return formatted_command
def _load_kube_context(context=None):
try:
# trying to load either the context passed as arg or, if None,
# the one provided as env var `KUBECONFIG` or in `~/.kube/config`
kubernetes.config.load_kube_config(context=context)
except (OSError, ConfigException) as e:
_logger.debug('Error loading kube context "%s": %s', context, e)
_logger.info("No valid kube config found, using in-cluster configuration")
kubernetes.config.load_incluster_config()
def run_kubernetes_job(
project_name,
active_run,
image_tag,
image_digest,
command,
env_vars,
kube_context=None,
job_template=None,
):
job_template = _get_kubernetes_job_definition(
project_name, image_tag, image_digest, _get_run_command(command), env_vars, job_template
)
job_name = job_template["metadata"]["name"]
job_namespace = job_template["metadata"]["namespace"]
_load_kube_context(context=kube_context)
api_instance = kubernetes.client.BatchV1Api()
api_instance.create_namespaced_job(namespace=job_namespace, body=job_template, pretty=True)
return KubernetesSubmittedRun(active_run.info.METHOD_NAME, job_name, job_namespace)
class KubernetesSubmittedRun(SubmittedRun):
"""
Instance of SubmittedRun corresponding to a Kubernetes Job run launched to run an MLflow
project.
:param mlflow_run_id: ID of the MLflow project run.
:param job_name: Kubernetes job name.
:param job_namespace: Kubernetes job namespace.
"""
# How often to poll run status when waiting on a run
POLL_STATUS_INTERVAL = 5
def __init__(self, mlflow_run_id, job_name, job_namespace):
super().__init__()
self._mlflow_run_id = mlflow_run_id
self._job_name = job_name
self._job_namespace = job_namespace
self._status = RunStatus.SCHEDULED
self._status_lock = RLock()
self._kube_api = kubernetes.client.BatchV1Api()
@property
def METHOD_NAME(self):
return self._mlflow_run_id
def wait(self):
while not RunStatus.is_terminated(self._update_status()):
time.sleep(self.POLL_STATUS_INTERVAL)
return self._status == RunStatus.FINISHED
def _update_status(self):
api_response = self._kube_api.read_namespaced_job_status(
name=self._job_name, namespace=self._job_namespace, pretty=True
)
status = api_response.status
with self._status_lock:
if RunStatus.is_terminated(self._status):
return self._status
if self._status == RunStatus.SCHEDULED:
if api_response.status.start_time is None:
_logger.info("Waiting for Job to start")
else:
_logger.info("Job started.")
self._status = RunStatus.RUNNING
if status.conditions is not None:
for condition in status.conditions:
if condition.status == "True":
_logger.info(condition.message)
if condition.type == "Failed":
self._status = RunStatus.FAILED
elif condition.type == "Complete":
self._status = RunStatus.FINISHED
return self._status
def get_status(self):
status = self._status
return status if RunStatus.is_terminated(status) else self._update_status()
def cancel(self):
with self._status_lock:
if not RunStatus.is_terminated(self._status):
_logger.info("Cancelling job.")
self._kube_api.delete_namespaced_job(
name=self._job_name,
namespace=self._job_namespace,
body=kubernetes.client.V1DeleteOptions(),
pretty=True,
)
self._status = RunStatus.KILLED
_logger.info("Job cancelled.")
else:
_logger.info("Attempting to cancel a job that is already terminated.") |
298,952 | is service in last servicelist |
import copy
import json
def checkForEmpty(response):
if response.status_code == 404:
return []
else:
return parseAllResearch(json.loads(response.text))
def parsePropBack(prop):
types = ["fileStorage", "metadata"]
data = [
{"portType": typ, "value": True}
for typ in types
if typ in prop["type"]
]
customProp = [{"key": key, "value": value}
for key, value in prop.get("customProperties", {}).items()]
if len(customProp) > 0:
data.append({"portType": "customProperties", "value": customProp})
return data
def parsePortBack(port):
return {
"port": port["port"],
"properties": parsePropBack(port["properties"])
}
def parseResearchBack(response):
data = {
"portIn": [parsePortBack(port) for port in response["portIn"]],
"portOut": [parsePortBack(port) for port in response["portOut"]]
}
d = copy.deepcopy(response)
d.update(data)
return d
def parseCustomProp(customProp):
return {val["key"]: val["value"] for val in customProp}
def parseProp(prop):
propList = {"type": []}
for val in prop:
if val["portType"] == "customProperties":
propList["customProperties"] = parseCustomProp(val["value"])
else:
propList["type"].append(val["portType"])
return propList
def parsePort(port):
return {
"port": port["port"],
"properties": parseProp(port["properties"])
}
def parseResearch(response):
data = {
"portIn": [parsePort(port) for port in response["portIn"]],
"portOut": [parsePort(port) for port in response["portOut"]]
}
d = copy.deepcopy(response)
d.update(data)
return d
def parseAllResearch(response):
return [parseResearch(research) for research in response]
def parseAllResearchBack(response):
return [parseResearchBack(research) for research in response]
def listContainsService(arr: list, service: dict) -> bool:
for el in arr:
try:
if el["servicename"] == service["servicename"]:
return True
except Exception as e:
pass
try:
if el["informations"]["servicename"] == service["informations"]["servicename"]:
return True
except Exception as e:
pass
return False
def removeDuplicates(response: list) -> list:
"""Removes duplicate entries of equal servicenames
Args:
response (list): list of service entries
Returns:
Same as response, but duplicates removed.
"""
withoutDuplicates = []
for service in response:
if not listContainsService(withoutDuplicates, service):
withoutDuplicates.append(service)
return withoutDuplicates
def applyFilters(response: list, helperSession=None) -> list:
"""Applies filters for services
Args:
response (list): Take a look into openapi spec for services properties.
filters (list): List of Dicts. Expected fieldnames in dict: `name` for servicename to filter, `only` for domainnames and `except` same as `only`.
Returns:
Filtered services, if domains said to do.
"""
if helperSession is not None:
session = helperSession
else:
from flask import session
result = response
filters = session["oauth"]
if "filters" in filters:
filters = session["oauth"]["filters"]
onlyFiltered = []
if "only" in filters and len(filters["only"]) > 0:
onlyFiltered = [service for service in response if service["informations"]["servicename"] in filters["only"]]
else:
onlyFiltered = response
if "except" in filters and len(filters["except"]) > 0:
exceptFiltered = [service for service in onlyFiltered if service["informations"]["servicename"] not in filters["except"]]
else:
exceptFiltered = onlyFiltered
result = exceptFiltered
session["servicelist"] = result
return result
def METHOD_NAME(servicename: str, helperSession=None) -> bool:
"""Checks if servicename was in latest servicelist response for the current session.
Args:
servicename (_type_): The servicename yo want to know, if it is in latest servicelist.
helperSession (_type_, optional): Use this session for unittests. Defaults to None.
Returns:
bool: True, if servicename was in latest servicelist response.
"""
if helperSession is not None:
session = helperSession
else:
from flask import session
if isinstance(servicename, dict):
servicename = servicename["servicename"]
servicelist = session["servicelist"]
found_service = [servicename == service["informations"]["servicename"] for service in servicelist]
result = "servicelist" in session and any(found_service)
return result
|
298,953 | notify by to representation | import time
from datetime import timedelta
from rest_framework import serializers
from apps.base.models import UserNotificationPolicy
from apps.base.models.user_notification_policy import NotificationChannelAPIOptions
from apps.user_management.models import User
from common.api_helpers.custom_fields import OrganizationFilteredPrimaryKeyRelatedField
from common.api_helpers.exceptions import Forbidden
from common.api_helpers.mixins import EagerLoadingMixin
# This serializer should not be user directly
class UserNotificationPolicyBaseSerializer(EagerLoadingMixin, serializers.ModelSerializer):
id = serializers.CharField(read_only=True, source="public_primary_key")
notify_by = serializers.ChoiceField(
read_only=False,
required=False,
default=UserNotificationPolicy.NotificationChannel.SLACK,
choices=NotificationChannelAPIOptions.AVAILABLE_FOR_USE,
)
step = serializers.ChoiceField(
read_only=False,
required=False,
default=UserNotificationPolicy.Step.NOTIFY,
choices=UserNotificationPolicy.Step.choices,
)
SELECT_RELATED = [
"user",
]
class Meta:
model = UserNotificationPolicy
fields = ["id", "step", "notify_by", "wait_delay", "important", "user"]
# Field "order" is not consumed by the plugin frontend, but is used by the mobile app
# TODO: remove this field when the mobile app is updated
fields += ["order"]
read_only_fields = ["order"]
def to_internal_value(self, data):
if data.get("wait_delay", None):
try:
time.strptime(data["wait_delay"], "%H:%M:%S")
except ValueError:
try:
data["wait_delay"] = str(timedelta(seconds=float(data["wait_delay"])))
except ValueError:
raise serializers.ValidationError("Invalid wait delay format")
data = self._notify_by_to_internal_value(data)
return super().to_internal_value(data)
def to_representation(self, instance):
result = super().to_representation(instance)
result = self.METHOD_NAME(instance, result)
return result
# _notify_by_to_internal_value and _notify_by_to_representation are exists because of in EscalationPolicy model
# notify_by field has default value NotificationChannel.SLACK and not nullable
# We don't want any notify_by value in response if step != Step.NOTIFY
def _notify_by_to_internal_value(self, data):
if not data.get("notify_by", None):
data["notify_by"] = UserNotificationPolicy.NotificationChannel.SLACK
return data
def METHOD_NAME(self, instance, result):
if instance.step != UserNotificationPolicy.Step.NOTIFY:
result["notify_by"] = None
return result
class UserNotificationPolicySerializer(UserNotificationPolicyBaseSerializer):
user = OrganizationFilteredPrimaryKeyRelatedField(
queryset=User.objects,
required=False,
allow_null=True,
many=False,
display_func=lambda instance: instance.username,
)
notify_by = serializers.ChoiceField(
choices=NotificationChannelAPIOptions.AVAILABLE_FOR_USE,
default=NotificationChannelAPIOptions.DEFAULT_NOTIFICATION_CHANNEL,
)
def create(self, validated_data):
user = validated_data.get("user") or self.context["request"].user
organization = self.context["request"].auth.organization
self_or_admin = user.self_or_admin(user_to_check=self.context["request"].user, organization=organization)
if not self_or_admin:
raise Forbidden()
instance = UserNotificationPolicy.objects.create(**validated_data)
return instance
class UserNotificationPolicyUpdateSerializer(UserNotificationPolicyBaseSerializer):
user = OrganizationFilteredPrimaryKeyRelatedField(
many=False,
read_only=True,
display_func=lambda instance: instance.username,
)
class Meta(UserNotificationPolicyBaseSerializer.Meta):
read_only_fields = UserNotificationPolicyBaseSerializer.Meta.read_only_fields + ["user", "important"]
def update(self, instance, validated_data):
self_or_admin = instance.user.self_or_admin(
user_to_check=self.context["request"].user, organization=self.context["request"].user.organization
)
if not self_or_admin:
raise Forbidden()
if validated_data.get("step") == UserNotificationPolicy.Step.WAIT and not validated_data.get("wait_delay"):
validated_data["wait_delay"] = UserNotificationPolicy.FIVE_MINUTES
return super().update(instance, validated_data) |
298,954 | test no class | #!/usr/bin/env python3
import torch
from captum.attr import ClassSummarizer, CommonStats
from tests.helpers.basic import BaseTest
class Test(BaseTest):
def class_test(self, data, classes, x_sizes):
summarizer = ClassSummarizer(stats=CommonStats())
for x, y in data:
summarizer.update(x, y)
summ = summarizer.summary
self.assertIsNotNone(summ)
self.assertIsInstance(summ, list)
for s, size in zip(summ, x_sizes):
self.assertIsInstance(s, dict)
for key in s:
self.assertEqual(s[key].size(), size)
self.assertIsNotNone(summarizer.class_summaries)
all_classes = torch.zeros(len(classes))
class_summaries = summarizer.class_summaries
all_keys = set(class_summaries.keys())
for i, clazz in enumerate(classes):
self.assertTrue(clazz in class_summaries)
all_keys.remove(clazz)
all_classes[i] = 1
summ = class_summaries[clazz]
self.assertIsNotNone(summ)
self.assertIsInstance(summ, list)
for s, size in zip(summ, x_sizes):
self.assertIsInstance(s, dict)
for key in s:
self.assertEqual(s[key].size(), size)
self.assertEqual(len(all_keys), 0)
self.assertEqual(all_classes.sum(), len(classes))
def test_classes(self):
sizes_to_test = [
# ((1,),),
((3, 2, 10, 3), (1,)),
# ((20,),),
]
list_of_classes = [
list(range(100)),
["%d" % i for i in range(100)],
list(range(300, 400)),
]
for batch_size in [None, 1, 4]:
for sizes, classes in zip(sizes_to_test, list_of_classes):
def create_batch_labels(batch_idx):
if batch_size is None:
# batch_size = 1
return classes[batch_idx]
return classes[
batch_idx * batch_size : (batch_idx + 1) * batch_size
]
bs = 1 if batch_size is None else batch_size
num_batches = len(classes) // bs
sizes_plus_batch = tuple((bs,) + si for si in sizes)
data = [
(
tuple(torch.randn(si) for si in sizes_plus_batch),
create_batch_labels(batch_idx),
)
for batch_idx in range(num_batches)
]
with self.subTest(
batch_size=batch_size, sizes=sizes_plus_batch, classes=classes
):
self.class_test(data, classes, sizes)
def METHOD_NAME(self) -> None:
size = (30, 20)
summarizer = ClassSummarizer(stats=CommonStats())
for _ in range(10):
x = torch.randn(size)
summarizer.update(x)
summ = summarizer.summary
self.assertIsNotNone(summ)
self.assertIsInstance(summ, dict)
for key in summ:
self.assertTrue(summ[key].size() == size)
self.assertIsNotNone(summarizer.class_summaries)
self.assertIsInstance(summarizer.class_summaries, dict)
self.assertEqual(len(summarizer.class_summaries), 0)
def test_single_label(self) -> None:
size = (4, 3, 2, 1)
data = torch.randn((100,) + size)
single_labels = [1, "apple"]
for label in single_labels:
summarizer = ClassSummarizer(stats=CommonStats())
summarizer.update(data, label)
summ1 = summarizer.summary
summ2 = summarizer.class_summaries
self.assertIsNotNone(summ1)
self.assertIsNotNone(summ2)
self.assertIsInstance(summ1, list)
self.assertTrue(len(summ1) == 1)
self.assertIsInstance(summ2, dict)
self.assertTrue(label in summ2)
self.assertTrue(len(summ1) == len(summ2[label]))
for key in summ1[0].keys():
self.assertTrue((summ1[0][key] == summ2[label][0][key]).all()) |
298,955 | provisioning state | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetWorkloadNetworkPortMirroringResult',
'AwaitableGetWorkloadNetworkPortMirroringResult',
'get_workload_network_port_mirroring',
'get_workload_network_port_mirroring_output',
]
@pulumi.output_type
class GetWorkloadNetworkPortMirroringResult:
"""
NSX Port Mirroring
"""
def __init__(__self__, destination=None, direction=None, display_name=None, id=None, name=None, METHOD_NAME=None, revision=None, source=None, status=None, type=None):
if destination and not isinstance(destination, str):
raise TypeError("Expected argument 'destination' to be a str")
pulumi.set(__self__, "destination", destination)
if direction and not isinstance(direction, str):
raise TypeError("Expected argument 'direction' to be a str")
pulumi.set(__self__, "direction", direction)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", METHOD_NAME)
if revision and not isinstance(revision, float):
raise TypeError("Expected argument 'revision' to be a float")
pulumi.set(__self__, "revision", revision)
if source and not isinstance(source, str):
raise TypeError("Expected argument 'source' to be a str")
pulumi.set(__self__, "source", source)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def destination(self) -> Optional[str]:
"""
Destination VM Group.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter
def direction(self) -> Optional[str]:
"""
Direction of port mirroring profile.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
Display name of the port mirroring profile.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def METHOD_NAME(self) -> str:
"""
The provisioning state
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def revision(self) -> Optional[float]:
"""
NSX revision number.
"""
return pulumi.get(self, "revision")
@property
@pulumi.getter
def source(self) -> Optional[str]:
"""
Source VM Group.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def status(self) -> str:
"""
Port Mirroring Status.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWorkloadNetworkPortMirroringResult(GetWorkloadNetworkPortMirroringResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkloadNetworkPortMirroringResult(
destination=self.destination,
direction=self.direction,
display_name=self.display_name,
id=self.id,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
revision=self.revision,
source=self.source,
status=self.status,
type=self.type)
def get_workload_network_port_mirroring(port_mirroring_id: Optional[str] = None,
private_cloud_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkloadNetworkPortMirroringResult:
"""
NSX Port Mirroring
Azure REST API version: 2022-05-01.
:param str port_mirroring_id: NSX Port Mirroring identifier. Generally the same as the Port Mirroring display name
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['portMirroringId'] = port_mirroring_id
__args__['privateCloudName'] = private_cloud_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:avs:getWorkloadNetworkPortMirroring', __args__, opts=opts, typ=GetWorkloadNetworkPortMirroringResult).value
return AwaitableGetWorkloadNetworkPortMirroringResult(
destination=pulumi.get(__ret__, 'destination'),
direction=pulumi.get(__ret__, 'direction'),
display_name=pulumi.get(__ret__, 'display_name'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'provisioning_state'),
revision=pulumi.get(__ret__, 'revision'),
source=pulumi.get(__ret__, 'source'),
status=pulumi.get(__ret__, 'status'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_workload_network_port_mirroring)
def get_workload_network_port_mirroring_output(port_mirroring_id: Optional[pulumi.Input[str]] = None,
private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkloadNetworkPortMirroringResult]:
"""
NSX Port Mirroring
Azure REST API version: 2022-05-01.
:param str port_mirroring_id: NSX Port Mirroring identifier. Generally the same as the Port Mirroring display name
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
298,956 | prepare request | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualApplianceSkusOperations:
"""VirtualApplianceSkusOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.NetworkVirtualApplianceSkuListResult"]:
"""List all SKUs available for a virtual appliance.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkVirtualApplianceSkuListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.NetworkVirtualApplianceSkuListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualApplianceSkuListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def METHOD_NAME(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkVirtualApplianceSkuListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = METHOD_NAME(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualApplianceSkus'} # type: ignore
async def get(
self,
sku_name: str,
**kwargs
) -> "_models.NetworkVirtualApplianceSku":
"""Retrieves a single available sku for network virtual appliance.
:param sku_name: Name of the Sku.
:type sku_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkVirtualApplianceSku, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.NetworkVirtualApplianceSku
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualApplianceSku"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'skuName': self._serialize.url("sku_name", sku_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkVirtualApplianceSku', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualApplianceSkus/{skuName}'} # type: ignore |
298,957 | test self class instantiation | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from omegaconf import DictConfig
from nemo.collections.asr.models import EncDecCTCModel
from nemo.collections.asr.modules import SpectrogramAugmentation
from nemo.core.classes.common import Serialization
def get_class_path(cls):
return f"{cls.__module__}.{cls.__name__}"
class MockSerializationImpl(Serialization):
def __init__(self, cfg: DictConfig):
self.cfg = cfg
self.value = self.__class__.__name__
class MockSerializationImplV2(MockSerializationImpl):
pass
class TestSerialization:
@pytest.mark.unit
def test_from_config_dict_with_cls(self):
"""Here we test that instantiation works for configs with cls class path in them.
Note that just Serialization.from_config_dict can be used to create an object"""
config = DictConfig(
{
'cls': 'nemo.collections.asr.modules.SpectrogramAugmentation',
'params': {'rect_freq': 50, 'rect_masks': 5, 'rect_time': 120,},
}
)
obj = Serialization.from_config_dict(config=config)
assert isinstance(obj, SpectrogramAugmentation)
@pytest.mark.unit
def test_from_config_dict_without_cls(self):
"""Here we test that instantiation works for configs without cls class path in them.
IMPORTANT: in this case, correct class type should call from_config_dict. This should work for Models."""
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 1024,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'cls': 'nemo.collections.asr.modules.ConvASRDecoder',
'params': {
'feat_in': 1024,
'num_classes': 28,
'vocabulary': [
' ',
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
"'",
],
},
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder)}
)
obj = EncDecCTCModel.from_config_dict(config=modelConfig)
assert isinstance(obj, EncDecCTCModel)
@pytest.mark.unit
def test_config_updated(self):
config = DictConfig(
{
'cls': 'nemo.collections.asr.modules.SpectrogramAugmentation',
'params': {'rect_freq': 50, 'rect_masks': 5, 'rect_time': 120,},
}
)
obj = Serialization.from_config_dict(config=config)
new_config = obj.to_config_dict()
assert config != new_config
assert 'params' not in new_config
assert 'cls' not in new_config
assert '_target_' in new_config
@pytest.mark.unit
def test_base_class_instantiation(self):
# Target class is V2 impl, calling class is Serialization (base class)
config = DictConfig({'target': get_class_path(MockSerializationImplV2)})
obj = Serialization.from_config_dict(config=config)
new_config = obj.to_config_dict()
assert config == new_config
assert isinstance(obj, MockSerializationImplV2)
assert obj.value == "MockSerializationImplV2"
@pytest.mark.unit
def METHOD_NAME(self):
# Target class is V1 impl, calling class is V1 (same class)
config = DictConfig({'target': get_class_path(MockSerializationImpl)})
obj = MockSerializationImpl.from_config_dict(config=config) # Serialization is base class
new_config = obj.to_config_dict()
assert config == new_config
assert isinstance(obj, MockSerializationImpl)
assert obj.value == "MockSerializationImpl"
@pytest.mark.unit
def test_sub_class_instantiation(self):
# Target class is V1 impl, calling class is V2 (sub class)
config = DictConfig({'target': get_class_path(MockSerializationImpl)})
obj = MockSerializationImplV2.from_config_dict(config=config) # Serialization is base class
new_config = obj.to_config_dict()
assert config == new_config
assert isinstance(obj, MockSerializationImplV2)
assert obj.value == "MockSerializationImplV2" |
298,958 | test call | from __future__ import annotations
from datetime import datetime
from unittest.mock import Mock, patch
import pytest
from kombu.asynchronous.timer import Entry, Timer, to_timestamp
class test_to_timestamp:
def test_timestamp(self):
assert to_timestamp(3.13) == 3.13
def test_datetime(self):
assert to_timestamp(datetime.utcnow())
class test_Entry:
def METHOD_NAME(self):
fun = Mock(name='fun')
tref = Entry(fun, (4, 4), {'moo': 'baz'})
tref()
fun.assert_called_with(4, 4, moo='baz')
def test_cancel(self):
tref = Entry(lambda x: x, (1,), {})
assert not tref.canceled
assert not tref.cancelled
tref.cancel()
assert tref.canceled
assert tref.cancelled
def test_repr(self):
tref = Entry(lambda x: x(1,), {})
assert repr(tref)
def test_hash(self):
assert hash(Entry(lambda: None))
def test_ordering(self):
# we don't care about results, just that it's possible
Entry(lambda x: 1) < Entry(lambda x: 2)
Entry(lambda x: 1) > Entry(lambda x: 2)
Entry(lambda x: 1) >= Entry(lambda x: 2)
Entry(lambda x: 1) <= Entry(lambda x: 2)
def test_eq(self):
x = Entry(lambda x: 1)
y = Entry(lambda x: 1)
assert x == x
assert x != y
class test_Timer:
def test_enter_exit(self):
x = Timer()
x.stop = Mock(name='timer.stop')
with x:
pass
x.stop.assert_called_with()
def test_supports_Timer_interface(self):
x = Timer()
x.stop()
tref = Mock()
x.cancel(tref)
tref.cancel.assert_called_with()
assert x.schedule is x
def test_handle_error(self):
from datetime import datetime
on_error = Mock(name='on_error')
s = Timer(on_error=on_error)
with patch('kombu.asynchronous.timer.to_timestamp') as tot:
tot.side_effect = OverflowError()
s.enter_at(Entry(lambda: None, (), {}),
eta=datetime.now())
s.enter_at(Entry(lambda: None, (), {}), eta=None)
s.on_error = None
with pytest.raises(OverflowError):
s.enter_at(Entry(lambda: None, (), {}),
eta=datetime.now())
on_error.assert_called_once()
exc = on_error.call_args[0][0]
assert isinstance(exc, OverflowError)
def test_call_repeatedly(self):
t = Timer()
try:
t.schedule.enter_after = Mock()
myfun = Mock()
myfun.__name__ = 'myfun'
t.call_repeatedly(0.03, myfun)
assert t.schedule.enter_after.call_count == 1
args1, _ = t.schedule.enter_after.call_args_list[0]
sec1, tref1, _ = args1
assert sec1 == 0.03
tref1()
assert t.schedule.enter_after.call_count == 2
args2, _ = t.schedule.enter_after.call_args_list[1]
sec2, tref2, _ = args2
assert sec2 == 0.03
tref2.canceled = True
tref2()
assert t.schedule.enter_after.call_count == 2
finally:
t.stop()
@patch('kombu.asynchronous.timer.logger')
def test_apply_entry_error_handled(self, logger):
t = Timer()
t.schedule.on_error = None
fun = Mock()
fun.side_effect = ValueError()
t.schedule.apply_entry(fun)
logger.error.assert_called()
def test_apply_entry_error_not_handled(self, stdouts):
t = Timer()
t.schedule.on_error = Mock()
fun = Mock()
fun.side_effect = ValueError()
t.schedule.apply_entry(fun)
fun.assert_called_with()
assert not stdouts.stderr.getvalue()
def test_enter_after(self):
t = Timer()
t._enter = Mock()
fun = Mock(name='fun')
time = Mock(name='time')
time.return_value = 10
t.enter_after(10, fun, time=time)
time.assert_called_with()
t._enter.assert_called_with(20, 0, fun)
def test_cancel(self):
t = Timer()
tref = Mock()
t.cancel(tref)
tref.cancel.assert_called_with() |
298,959 | report | """ !Changing this line will break Test_findfile.test_found!
Non-gui unit tests for grep.GrepDialog methods.
dummy_command calls grep_it calls findfiles.
An exception raised in one method will fail callers.
Otherwise, tests are mostly independent.
Currently only test grep_it, coverage 51%.
"""
from idlelib import grep
import unittest
from test.support import captured_stdout
from idlelib.idle_test.mock_tk import Var
import os
import re
class Dummy_searchengine:
'''GrepDialog.__init__ calls parent SearchDiabolBase which attaches the
passed in SearchEngine instance as attribute 'engine'. Only a few of the
many possible self.engine.x attributes are needed here.
'''
def getpat(self):
return self._pat
searchengine = Dummy_searchengine()
class Dummy_grep:
# Methods tested
#default_command = GrepDialog.default_command
grep_it = grep.GrepDialog.grep_it
# Other stuff needed
recvar = Var(False)
engine = searchengine
def close(self): # gui method
pass
_grep = Dummy_grep()
class FindfilesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.realpath = os.path.realpath(__file__)
cls.path = os.path.dirname(cls.realpath)
@classmethod
def tearDownClass(cls):
del cls.realpath, cls.path
def test_invaliddir(self):
with captured_stdout() as s:
filelist = list(grep.findfiles('invaliddir', '*.*', False))
self.assertEqual(filelist, [])
self.assertIn('invalid', s.getvalue())
def test_curdir(self):
# Test os.curdir.
ff = grep.findfiles
save_cwd = os.getcwd()
os.chdir(self.path)
filename = 'test_grep.py'
filelist = list(ff(os.curdir, filename, False))
self.assertIn(os.path.join(os.curdir, filename), filelist)
os.chdir(save_cwd)
def test_base(self):
ff = grep.findfiles
readme = os.path.join(self.path, 'README.txt')
# Check for Python files in path where this file lives.
filelist = list(ff(self.path, '*.py', False))
# This directory has many Python files.
self.assertGreater(len(filelist), 10)
self.assertIn(self.realpath, filelist)
self.assertNotIn(readme, filelist)
# Look for .txt files in path where this file lives.
filelist = list(ff(self.path, '*.txt', False))
self.assertNotEqual(len(filelist), 0)
self.assertNotIn(self.realpath, filelist)
self.assertIn(readme, filelist)
# Look for non-matching pattern.
filelist = list(ff(self.path, 'grep.*', False))
self.assertEqual(len(filelist), 0)
self.assertNotIn(self.realpath, filelist)
def test_recurse(self):
ff = grep.findfiles
parent = os.path.dirname(self.path)
grepfile = os.path.join(parent, 'grep.py')
pat = '*.py'
# Get Python files only in parent directory.
filelist = list(ff(parent, pat, False))
parent_size = len(filelist)
# Lots of Python files in idlelib.
self.assertGreater(parent_size, 20)
self.assertIn(grepfile, filelist)
# Without subdirectories, this file isn't returned.
self.assertNotIn(self.realpath, filelist)
# Include subdirectories.
filelist = list(ff(parent, pat, True))
# More files found now.
self.assertGreater(len(filelist), parent_size)
self.assertIn(grepfile, filelist)
# This file exists in list now.
self.assertIn(self.realpath, filelist)
# Check another level up the tree.
parent = os.path.dirname(parent)
filelist = list(ff(parent, '*.py', True))
self.assertIn(self.realpath, filelist)
class Grep_itTest(unittest.TestCase):
# Test captured reports with 0 and some hits.
# Should test file names, but Windows reports have mixed / and \ separators
# from incomplete replacement, so 'later'.
def METHOD_NAME(self, pat):
_grep.engine._pat = pat
with captured_stdout() as s:
_grep.grep_it(re.compile(pat), __file__)
lines = s.getvalue().split('\n')
lines.pop() # remove bogus '' after last \n
return lines
def test_unfound(self):
pat = 'xyz*'*7
lines = self.METHOD_NAME(pat)
self.assertEqual(len(lines), 2)
self.assertIn(pat, lines[0])
self.assertEqual(lines[1], 'No hits.')
def test_found(self):
pat = '""" !Changing this line will break Test_findfile.test_found!'
lines = self.METHOD_NAME(pat)
self.assertEqual(len(lines), 5)
self.assertIn(pat, lines[0])
self.assertIn('py: 1:', lines[1]) # line number 1
self.assertIn('2', lines[3]) # hits found 2
self.assertTrue(lines[4].startswith('(Hint:'))
class Default_commandTest(unittest.TestCase):
# To write this, move outwin import to top of GrepDialog
# so it can be replaced by captured_stdout in class setup/teardown.
pass
if __name__ == '__main__':
unittest.main(verbosity=2) |
298,960 | unit get homogeneity for object | import ctypes
from ansys.dpf.gate import utils
from ansys.dpf.gate import errors
from ansys.dpf.gate.generated import capi
from ansys.dpf.gate.generated import unit_abstract_api
from ansys.dpf.gate.generated.data_processing_capi import DataProcessingCAPI
#-------------------------------------------------------------------------------
# Unit
#-------------------------------------------------------------------------------
class UnitCAPI(unit_abstract_api.UnitAbstractAPI):
@staticmethod
def init_unit_environment(object):
# get core api
DataProcessingCAPI.init_data_processing_environment(object)
object._deleter_func = (DataProcessingCAPI.data_processing_delete_shared_object, lambda obj: obj)
@staticmethod
def unit_get_homogeneity(pre_allocated_char_64, symbol):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.Unit_GetHomogeneity(utils.to_char_ptr(pre_allocated_char_64), utils.to_char_ptr(symbol), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def unit_get_conversion_factor(from_, to):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.Unit_GetConversionFactor(utils.to_char_ptr(from_), utils.to_char_ptr(to), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def unit_get_conversion_shift(from_, to):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.Unit_GetConversionShift(utils.to_char_ptr(from_), utils.to_char_ptr(to), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def unit_are_homogeneous(from_, to):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.Unit_AreHomogeneous(utils.to_char_ptr(from_), utils.to_char_ptr(to), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def unit_get_symbol(pre_allocated_char_64, homogeneity, unit_system_id):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.Unit_getSymbol(utils.to_char_ptr(pre_allocated_char_64), utils.to_char_ptr(homogeneity), utils.to_int32(unit_system_id), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def METHOD_NAME(api_to_use, pre_allocated_char_64, symbol):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.Unit_GetHomogeneity_for_object(api_to_use._internal_obj if api_to_use is not None else None, utils.to_char_ptr(pre_allocated_char_64), utils.to_char_ptr(symbol), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def unit_get_conversion_factor_for_object(api_to_use, from_, to):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.Unit_GetConversionFactor_for_object(api_to_use._internal_obj if api_to_use is not None else None, utils.to_char_ptr(from_), utils.to_char_ptr(to), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def unit_get_conversion_shift_for_object(api_to_use, from_, to):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.Unit_GetConversionShift_for_object(api_to_use._internal_obj if api_to_use is not None else None, utils.to_char_ptr(from_), utils.to_char_ptr(to), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def unit_are_homogeneous_for_object(api_to_use, from_, to):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.Unit_AreHomogeneous_for_object(api_to_use._internal_obj if api_to_use is not None else None, utils.to_char_ptr(from_), utils.to_char_ptr(to), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def unit_get_symbol_for_object(api_to_use, pre_allocated_char_64, homogeneity, unit_system_id):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.Unit_getSymbol_for_object(api_to_use._internal_obj if api_to_use is not None else None, utils.to_char_ptr(pre_allocated_char_64), utils.to_char_ptr(homogeneity), utils.to_int32(unit_system_id), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
|
298,961 | pack uint | """Implements (a subset of) Sun XDR -- eXternal Data Representation.
See: RFC 1014
"""
import struct
from io import BytesIO
from functools import wraps
import warnings
warnings._deprecated(__name__, remove=(3, 13))
__all__ = ["Error", "Packer", "Unpacker", "ConversionError"]
# exceptions
class Error(Exception):
"""Exception class for this module. Use:
except xdrlib.Error as var:
# var has the Error instance for the exception
Public ivars:
msg -- contains the message
"""
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return repr(self.msg)
def __str__(self):
return str(self.msg)
class ConversionError(Error):
pass
def raise_conversion_error(function):
""" Wrap any raised struct.errors in a ConversionError. """
@wraps(function)
def result(self, value):
try:
return function(self, value)
except struct.error as e:
raise ConversionError(e.args[0]) from None
return result
class Packer:
"""Pack various data representations into a buffer."""
def __init__(self):
self.reset()
def reset(self):
self.__buf = BytesIO()
def get_buffer(self):
return self.__buf.getvalue()
# backwards compatibility
get_buf = get_buffer
@raise_conversion_error
def METHOD_NAME(self, x):
self.__buf.write(struct.pack('>L', x))
@raise_conversion_error
def pack_int(self, x):
self.__buf.write(struct.pack('>l', x))
pack_enum = pack_int
def pack_bool(self, x):
if x: self.__buf.write(b'\0\0\0\1')
else: self.__buf.write(b'\0\0\0\0')
def pack_uhyper(self, x):
try:
self.METHOD_NAME(x>>32 & 0xffffffff)
except (TypeError, struct.error) as e:
raise ConversionError(e.args[0]) from None
try:
self.METHOD_NAME(x & 0xffffffff)
except (TypeError, struct.error) as e:
raise ConversionError(e.args[0]) from None
pack_hyper = pack_uhyper
@raise_conversion_error
def pack_float(self, x):
self.__buf.write(struct.pack('>f', x))
@raise_conversion_error
def pack_double(self, x):
self.__buf.write(struct.pack('>d', x))
def pack_fstring(self, n, s):
if n < 0:
raise ValueError('fstring size must be nonnegative')
data = s[:n]
n = ((n+3)//4)*4
data = data + (n - len(data)) * b'\0'
self.__buf.write(data)
pack_fopaque = pack_fstring
def pack_string(self, s):
n = len(s)
self.METHOD_NAME(n)
self.pack_fstring(n, s)
pack_opaque = pack_string
pack_bytes = pack_string
def pack_list(self, list, pack_item):
for item in list:
self.METHOD_NAME(1)
pack_item(item)
self.METHOD_NAME(0)
def pack_farray(self, n, list, pack_item):
if len(list) != n:
raise ValueError('wrong array size')
for item in list:
pack_item(item)
def pack_array(self, list, pack_item):
n = len(list)
self.METHOD_NAME(n)
self.pack_farray(n, list, pack_item)
class Unpacker:
"""Unpacks various data representations from the given buffer."""
def __init__(self, data):
self.reset(data)
def reset(self, data):
self.__buf = data
self.__pos = 0
def get_position(self):
return self.__pos
def set_position(self, position):
self.__pos = position
def get_buffer(self):
return self.__buf
def done(self):
if self.__pos < len(self.__buf):
raise Error('unextracted data remains')
def unpack_uint(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>L', data)[0]
def unpack_int(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>l', data)[0]
unpack_enum = unpack_int
def unpack_bool(self):
return bool(self.unpack_int())
def unpack_uhyper(self):
hi = self.unpack_uint()
lo = self.unpack_uint()
return int(hi)<<32 | lo
def unpack_hyper(self):
x = self.unpack_uhyper()
if x >= 0x8000000000000000:
x = x - 0x10000000000000000
return x
def unpack_float(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>f', data)[0]
def unpack_double(self):
i = self.__pos
self.__pos = j = i+8
data = self.__buf[i:j]
if len(data) < 8:
raise EOFError
return struct.unpack('>d', data)[0]
def unpack_fstring(self, n):
if n < 0:
raise ValueError('fstring size must be nonnegative')
i = self.__pos
j = i + (n+3)//4*4
if j > len(self.__buf):
raise EOFError
self.__pos = j
return self.__buf[i:i+n]
unpack_fopaque = unpack_fstring
def unpack_string(self):
n = self.unpack_uint()
return self.unpack_fstring(n)
unpack_opaque = unpack_string
unpack_bytes = unpack_string
def unpack_list(self, unpack_item):
list = []
while 1:
x = self.unpack_uint()
if x == 0: break
if x != 1:
raise ConversionError('0 or 1 expected, got %r' % (x,))
item = unpack_item()
list.append(item)
return list
def unpack_farray(self, n, unpack_item):
list = []
for i in range(n):
list.append(unpack_item())
return list
def unpack_array(self, unpack_item):
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item) |
298,962 | test power | import pytest
import shlex
import time
@pytest.mark.skipif("'Dot3' not in config.lldpd.features", reason="Dot3 not supported")
class TestLldpDot3(object):
def test_aggregate(self, lldpd1, lldpd, lldpcli, namespaces, links):
links(namespaces(3), namespaces(2)) # Another link to setup a bond
with namespaces(2):
idx = links.bond("bond42", "eth1", "eth3")
lldpd()
with namespaces(1):
out = lldpcli("-f", "keyvalue", "show", "neighbors", "details")
assert out["lldp.eth0.port.descr"] == "eth1"
assert out["lldp.eth0.port.aggregation"] == str(idx)
# TODO: unfortunately, with veth, it's not possible to get an
# interface with autoneg.
@pytest.mark.parametrize(
"command, expected",
[
(
"pse supported enabled paircontrol powerpairs spare class class-3",
{
"supported": "yes",
"enabled": "yes",
"paircontrol": "yes",
"device-type": "PSE",
"pairs": "spare",
"class": "class 3",
},
),
(
"pd supported enabled powerpairs spare class class-3 type 1 source "
"pse priority low requested 10000 allocated 15000",
{
"supported": "yes",
"enabled": "yes",
"paircontrol": "no",
"device-type": "PD",
"pairs": "spare",
"class": "class 3",
"power-type": "1",
"source": "Primary power source",
"priority": "low",
"requested": "10000",
"allocated": "15000",
},
),
],
)
def METHOD_NAME(self, lldpd1, lldpd, lldpcli, namespaces, command, expected):
with namespaces(2):
lldpd()
result = lldpcli(*shlex.split("configure dot3 power {}".format(command)))
assert result.returncode == 0
time.sleep(3)
with namespaces(1):
pfx = "lldp.eth0.port.power."
out = lldpcli("-f", "keyvalue", "show", "neighbors", "details")
out = {k[len(pfx) :]: v for k, v in out.items() if k.startswith(pfx)}
assert out == expected
def test_autoneg_power(self, links, lldpd, lldpcli, namespaces):
links(namespaces(1), namespaces(2))
with namespaces(1):
lldpd()
with namespaces(2):
lldpd()
result = lldpcli(
*shlex.split(
"configure dot3 power pd "
"supported enabled paircontrol "
"powerpairs spare "
"class class-3 "
"type 1 source both priority low "
"requested 20000 allocated 5000"
)
)
assert result.returncode == 0
time.sleep(2)
with namespaces(1):
# Did we receive the request?
out = lldpcli("-f", "keyvalue", "show", "neighbors", "details")
assert out["lldp.eth0.port.power.requested"] == "20000"
assert out["lldp.eth0.port.power.allocated"] == "5000"
# Send an answer we agree to give almost that (this part
# cannot be automated, lldpd cannot take this decision).
result = lldpcli(
*shlex.split(
"configure dot3 power pse "
"supported enabled paircontrol powerpairs "
"spare class class-3 "
"type 1 source primary priority high "
"requested 20000 allocated 19000"
)
)
assert result.returncode == 0
time.sleep(2)
with namespaces(2):
# Did we receive that?
out = lldpcli("-f", "keyvalue", "show", "neighbors", "details")
assert out["lldp.eth1.port.power.requested"] == "20000"
assert out["lldp.eth1.port.power.allocated"] == "19000"
with namespaces(1):
# Did we get an echo back? This part is handled
# automatically by lldpd: we confirm we received the
# answer "immediately".
out = lldpcli("-f", "keyvalue", "show", "neighbors", "details")
assert out["lldp.eth0.port.power.requested"] == "20000"
assert out["lldp.eth0.port.power.allocated"] == "19000" |
298,963 | map hash encrypt | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_crypto.psi import Curve25519
class EllipticCurve(object):
"""
Symmetric encryption key
"""
def __init__(self, curve_name, curve_key=None):
self.curve = self.__get_curve_instance(curve_name, curve_key)
@staticmethod
def __get_curve_instance(curve_name, curve_key):
if curve_key is None:
return Curve25519()
return Curve25519(curve_key)
def get_curve_key(self):
return self.curve.get_private_key()
def encrypt(self, plaintext):
"""
Encryption method
:param plaintext:
:return:
"""
return self.curve.encrypt(plaintext)
def sign(self, ciphertext):
return self.curve.diffie_hellman(ciphertext)
def METHOD_NAME(self, plaintable, mode, hash_operator, salt):
"""
adapted from CryptorExecutor
Process the input Table as (k, v)
(k, enc_k) for mode == 0
(enc_k, -1) for mode == 1
(enc_k, v) for mode == 2
(k, (enc_k, v)) for mode == 3
(enc_k, k) for mode == 4
(enc_k, (k, v)) for mode == 5
:param plaintable: Table
:param mode: int
:return: Table
"""
if mode == 0:
return plaintable.map(
lambda k, v: (
k, self.curve.encrypt(
hash_operator.compute(
k, suffix_salt=salt))))
elif mode == 1:
return plaintable.map(
lambda k, v: (
self.curve.encrypt(
hash_operator.compute(
k, suffix_salt=salt)), -1))
elif mode == 2:
return plaintable.map(
lambda k, v: (
self.curve.encrypt(
hash_operator.compute(
k, suffix_salt=salt)), v))
elif mode == 3:
return plaintable.map(
lambda k, v: (
k, (self.curve.encrypt(
hash_operator.compute(
k, suffix_salt=salt)), v)))
elif mode == 4:
return plaintable.map(
lambda k, v: (
self.curve.encrypt(
hash_operator.compute(
k, suffix_salt=salt)), k))
elif mode == 5:
return plaintable.map(
lambda k, v: (self.curve.encrypt(hash_operator.compute(k, suffix_salt=salt)), (k, v)))
else:
raise ValueError("Unsupported mode for elliptic curve map encryption")
def map_encrypt(self, plaintable, mode):
"""
adapted from CryptorExecutor
Process the input Table as (k, v)
(k, enc_k) for mode == 0
(enc_k, -1) for mode == 1
(enc_k, v) for mode == 2
(k, (enc_k, v)) for mode == 3
(enc_k, k) for mode == 4
(enc_k, (k, v)) for mode == 5
:param plaintable: Table
:param mode: int
:return: Table
"""
if mode == 0:
return plaintable.map(lambda k, v: (k, self.curve.encrypt(k)))
elif mode == 1:
return plaintable.map(lambda k, v: (self.curve.encrypt(k), -1))
elif mode == 2:
return plaintable.map(lambda k, v: (self.curve.encrypt(k), v))
elif mode == 3:
return plaintable.map(lambda k, v: (k, (self.curve.encrypt(k), v)))
elif mode == 4:
return plaintable.map(lambda k, v: (self.curve.encrypt(k), k))
elif mode == 5:
return plaintable.map(lambda k, v: (self.curve.encrypt(k), (k, v)))
else:
raise ValueError("Unsupported mode for elliptic curve map encryption")
def map_sign(self, plaintable, mode):
"""
adapted from CryptorExecutor
Process the input Table as (k, v)
(k, enc_k) for mode == 0
(enc_k, -1) for mode == 1
(enc_k, v) for mode == 2
(k, (enc_k, v)) for mode == 3
(enc_k, k) for mode == 4
(enc_k, (k, v)) for mode == 5
:param plaintable: Table
:param mode: int
:return: Table
"""
if mode == 0:
return plaintable.map(lambda k, v: (k, self.curve.diffie_hellman(k)))
elif mode == 1:
return plaintable.map(lambda k, v: (self.curve.diffie_hellman(k), -1))
elif mode == 2:
return plaintable.map(lambda k, v: (self.curve.diffie_hellman(k), v))
elif mode == 3:
return plaintable.map(lambda k, v: (k, (self.curve.diffie_hellman(k), v)))
elif mode == 4:
return plaintable.map(lambda k, v: (self.curve.diffie_hellman(k), k))
elif mode == 5:
return plaintable.map(lambda k, v: (self.curve.diffie_hellman(k), (k, v)))
else:
raise ValueError("Unsupported mode for elliptic curve map sign") |
298,964 | require confirm unknown token | from typing import TYPE_CHECKING
from trezor import ui
from trezor.enums import ButtonRequestType
from trezor.strings import format_plural
from trezor.ui.layouts import (
confirm_blob,
confirm_ethereum_tx,
confirm_text,
should_show_more,
)
from .helpers import address_from_bytes, decode_typed_data
if TYPE_CHECKING:
from typing import Awaitable, Iterable
from trezor.messages import (
EthereumFieldType,
EthereumNetworkInfo,
EthereumStructMember,
EthereumTokenInfo,
)
async def require_confirm_tx(
to_bytes: bytes,
value: int,
gas_price: int,
gas_limit: int,
network: EthereumNetworkInfo,
token: EthereumTokenInfo | None,
) -> None:
if to_bytes:
to_str = address_from_bytes(to_bytes, network)
else:
to_str = "new contract?"
total_amount = format_ethereum_amount(value, token, network)
maximum_fee = format_ethereum_amount(gas_price * gas_limit, None, network)
gas_limit_str = f"{gas_limit} units"
gas_price_str = format_ethereum_amount(gas_price, None, network)
items = (
("Gas limit:", gas_limit_str),
("Gas price:", gas_price_str),
)
await confirm_ethereum_tx(to_str, total_amount, maximum_fee, items)
async def require_confirm_tx_eip1559(
to_bytes: bytes,
value: int,
max_gas_fee: int,
max_priority_fee: int,
gas_limit: int,
network: EthereumNetworkInfo,
token: EthereumTokenInfo | None,
) -> None:
if to_bytes:
to_str = address_from_bytes(to_bytes, network)
else:
to_str = "new contract?"
total_amount = format_ethereum_amount(value, token, network)
maximum_fee = format_ethereum_amount(max_gas_fee * gas_limit, None, network)
gas_limit_str = f"{gas_limit} units"
max_gas_fee_str = format_ethereum_amount(max_gas_fee, None, network)
max_priority_fee_str = format_ethereum_amount(max_priority_fee, None, network)
items = (
("Gas limit:", gas_limit_str),
("Max gas price:", max_gas_fee_str),
("Priority fee:", max_priority_fee_str),
)
await confirm_ethereum_tx(to_str, total_amount, maximum_fee, items)
def METHOD_NAME(address_bytes: bytes) -> Awaitable[None]:
from ubinascii import hexlify
from trezor.ui.layouts import confirm_address
contract_address_hex = "0x" + hexlify(address_bytes).decode()
return confirm_address(
"Unknown token",
contract_address_hex,
"Contract:",
"unknown_token",
br_code=ButtonRequestType.SignTx,
)
def require_confirm_address(address_bytes: bytes) -> Awaitable[None]:
from ubinascii import hexlify
from trezor.ui.layouts import confirm_address
address_hex = "0x" + hexlify(address_bytes).decode()
return confirm_address(
"Signing address",
address_hex,
br_code=ButtonRequestType.SignTx,
)
def require_confirm_data(data: bytes, data_total: int) -> Awaitable[None]:
return confirm_blob(
"confirm_data",
"Confirm data",
data,
f"Size: {data_total} bytes",
br_code=ButtonRequestType.SignTx,
ask_pagination=True,
)
async def confirm_typed_data_final() -> None:
from trezor.ui.layouts import confirm_action
await confirm_action(
"confirm_typed_data_final",
"Confirm typed data",
"Really sign EIP-712 typed data?",
verb="Hold to confirm",
hold=True,
)
def confirm_empty_typed_message() -> Awaitable[None]:
return confirm_text(
"confirm_empty_typed_message",
"Confirm message",
"",
"No message field",
)
async def should_show_domain(name: bytes, version: bytes) -> bool:
domain_name = decode_typed_data(name, "string")
domain_version = decode_typed_data(version, "string")
para = (
(ui.NORMAL, "Name and version"),
(ui.DEMIBOLD, domain_name),
(ui.DEMIBOLD, domain_version),
)
return await should_show_more(
"Confirm domain",
para,
"Show full domain",
"should_show_domain",
)
async def should_show_struct(
description: str,
data_members: list[EthereumStructMember],
title: str = "Confirm struct",
button_text: str = "Show full struct",
) -> bool:
para = (
(ui.DEMIBOLD, description),
(
ui.NORMAL,
format_plural("Contains {count} {plural}", len(data_members), "key"),
),
(ui.NORMAL, ", ".join(field.name for field in data_members)),
)
return await should_show_more(
title,
para,
button_text,
"should_show_struct",
)
async def should_show_array(
parent_objects: Iterable[str],
data_type: str,
size: int,
) -> bool:
para = ((ui.NORMAL, format_plural("Array of {count} {plural}", size, data_type)),)
return await should_show_more(
limit_str(".".join(parent_objects)),
para,
"Show full array",
"should_show_array",
)
async def confirm_typed_value(
name: str,
value: bytes,
parent_objects: list[str],
field: EthereumFieldType,
array_index: int | None = None,
) -> None:
from trezor.enums import EthereumDataType
from .helpers import get_type_name
type_name = get_type_name(field)
if array_index is not None:
title = limit_str(".".join(parent_objects + [name]))
description = f"[{array_index}] ({type_name})"
else:
title = limit_str(".".join(parent_objects))
description = f"{name} ({type_name})"
data = decode_typed_data(value, type_name)
if field.data_type in (EthereumDataType.ADDRESS, EthereumDataType.BYTES):
await confirm_blob(
"confirm_typed_value",
title,
data,
description,
ask_pagination=True,
)
else:
await confirm_text(
"confirm_typed_value",
title,
data,
description,
)
def format_ethereum_amount(
value: int,
token: EthereumTokenInfo | None,
network: EthereumNetworkInfo,
) -> str:
from trezor.strings import format_amount
if token:
suffix = token.symbol
decimals = token.decimals
else:
suffix = network.symbol
decimals = 18
# Don't want to display wei values for tokens with small decimal numbers
if decimals > 9 and value < 10 ** (decimals - 9):
suffix = "Wei " + suffix
decimals = 0
return f"{format_amount(value, decimals)} {suffix}"
def limit_str(s: str, limit: int = 16) -> str:
"""Shortens string to show the last <limit> characters."""
if len(s) <= limit + 2:
return s
return ".." + s[-limit:] |
298,965 | setup | """
Acceleration and Friction
Demonstrate how to implement simple acceleration and friction without a
physics engine.
Artwork from https://kenney.nl
If Python and Arcade are installed, this example can be run from the
command line with:
python -m arcade.examples.sprite_move_keyboard_accel
"""
from __future__ import annotations
import arcade
SPRITE_SCALING = 0.5
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Better Move Sprite with Keyboard Example"
# Important constants for this example
# Speed limit
MAX_SPEED = 3.0
# How fast we accelerate
ACCELERATION_RATE = 0.1
# How fast to slow down after we let off the key
FRICTION = 0.02
class Player(arcade.Sprite):
def update(self):
self.center_x += self.change_x
self.center_y += self.change_y
# Check to see if we hit the screen edge
if self.left < 0:
self.left = 0
self.change_x = 0 # Zero x speed
elif self.right > SCREEN_WIDTH - 1:
self.right = SCREEN_WIDTH - 1
self.change_x = 0
if self.bottom < 0:
self.bottom = 0
self.change_y = 0
elif self.top > SCREEN_HEIGHT - 1:
self.top = SCREEN_HEIGHT - 1
self.change_y = 0
class MyGame(arcade.Window):
"""
Main application class.
"""
def __init__(self, width, height, title):
"""
Initializer
"""
# Call the parent class initializer
super().__init__(width, height, title)
# Variable to will hold the player sprite list
self.player_list = None
# Create a place to store the player sprite
# so it can be accessed directly.
self.player_sprite = None
# Create places to store the speed display Text objects
self.x_speed_display = None
self.y_speed_display = None
# Track the current state of what key is pressed
self.left_pressed = False
self.right_pressed = False
self.up_pressed = False
self.down_pressed = False
# Set the background color
arcade.background_color = arcade.color.AMAZON
def METHOD_NAME(self):
""" Set up the game and initialize the variables. """
# Create a sprite list
self.player_list = arcade.SpriteList()
# Set up the player
self.player_sprite = Player(":resources:images/animated_characters/female_person/femalePerson_idle.png",
scale=SPRITE_SCALING)
self.player_sprite.position = self.width / 2, self.height / 2
self.player_list.append(self.player_sprite)
# Create the speed display objects with initial text
self.x_speed_display = arcade.Text(
f"X Speed: {self.player_sprite.change_x:6.3f}",
10, 50, arcade.color.BLACK)
self.y_speed_display = arcade.Text(
f"Y Speed: {self.player_sprite.change_y:6.3f}",
10, 70, arcade.color.BLACK)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
self.clear()
# Draw all the sprites.
self.player_list.draw()
# Draw the speed indicators
self.x_speed_display.draw()
self.y_speed_display.draw()
def on_update(self, delta_time):
""" Movement and game logic """
# Add some friction
if self.player_sprite.change_x > FRICTION:
self.player_sprite.change_x -= FRICTION
elif self.player_sprite.change_x < -FRICTION:
self.player_sprite.change_x += FRICTION
else:
self.player_sprite.change_x = 0
if self.player_sprite.change_y > FRICTION:
self.player_sprite.change_y -= FRICTION
elif self.player_sprite.change_y < -FRICTION:
self.player_sprite.change_y += FRICTION
else:
self.player_sprite.change_y = 0
# Apply acceleration based on the keys pressed
if self.up_pressed and not self.down_pressed:
self.player_sprite.change_y += ACCELERATION_RATE
elif self.down_pressed and not self.up_pressed:
self.player_sprite.change_y += -ACCELERATION_RATE
if self.left_pressed and not self.right_pressed:
self.player_sprite.change_x += -ACCELERATION_RATE
elif self.right_pressed and not self.left_pressed:
self.player_sprite.change_x += ACCELERATION_RATE
if self.player_sprite.change_x > MAX_SPEED:
self.player_sprite.change_x = MAX_SPEED
elif self.player_sprite.change_x < -MAX_SPEED:
self.player_sprite.change_x = -MAX_SPEED
if self.player_sprite.change_y > MAX_SPEED:
self.player_sprite.change_y = MAX_SPEED
elif self.player_sprite.change_y < -MAX_SPEED:
self.player_sprite.change_y = -MAX_SPEED
# Call update to move the sprite
# IMPORTANT: If using a physics engine, you need to call update
# on it instead of the sprite list!
self.player_list.update()
# Update the speed displays based on the final speed
self.x_speed_display.text = f"X Speed: {self.player_sprite.change_x:6.3f}"
self.y_speed_display.text = f"Y Speed: {self.player_sprite.change_y:6.3f}"
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
if key == arcade.key.UP:
self.up_pressed = True
elif key == arcade.key.DOWN:
self.down_pressed = True
elif key == arcade.key.LEFT:
self.left_pressed = True
elif key == arcade.key.RIGHT:
self.right_pressed = True
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
if key == arcade.key.UP:
self.up_pressed = False
elif key == arcade.key.DOWN:
self.down_pressed = False
elif key == arcade.key.LEFT:
self.left_pressed = False
elif key == arcade.key.RIGHT:
self.right_pressed = False
def main():
""" Main function """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.METHOD_NAME()
arcade.run()
if __name__ == "__main__":
main() |
298,966 | sql | from cardpay_reward_programs.rule import Rule
from cardpay_reward_programs.utils import format_amount, get_table_dataset
class Staking(Rule):
"""
This rule rewards users with CARD.cpxd held in a depot in a monthly basis
"""
def __init__(
self,
core_parameters,
user_defined_parameters,
):
super(Staking, self).__init__(core_parameters, user_defined_parameters)
def set_user_defined_parameters(self, token, interest_rate_monthly):
self.token = token
self.interest_rate_monthly = interest_rate_monthly
def register_tables(self):
safe_owner = get_table_dataset(
self.subgraph_config_locations["safe_owner"], "safe_owner"
)
self.connection.register("safe_owner", safe_owner)
token_holder = get_table_dataset(
self.subgraph_config_locations["token_holder"], "token_holder"
)
self.connection.register("token_holder", token_holder)
def METHOD_NAME(self):
return """
-- Select only the safes we are rewarding, and their owner at the end of the cycle
with filtered_safes AS (
SELECT safe, max(owner) as owner
FROM safe_owner a
WHERE _block_number = (
SELECT MAX(_block_number)
FROM safe_owner b
WHERE a.safe = b.safe
AND _block_number::integer < $2::integer
)
AND type = 'depot'
GROUP BY safe
),
-- Get the balance history of the safes we are interested in, filtered to the token,
-- joining to get the owner
filtered_balances AS (
SELECT tht.safe, sot.owner, tht.balance_downscale_e9_uint64::int64 AS balance_int64, tht._block_number
FROM token_holder AS tht
LEFT JOIN filtered_safes AS sot ON (tht.safe = sot.safe)
WHERE token = $3::text
AND sot.safe IS NOT NULL
),
-- Get the deposits & withdrawals by comparing balances to the previous balance
balance_changes AS (
SELECT
safe,
owner,
-- Get the change by subtracting the balance before
balance_int64 - LAG(balance_int64, 1 ,0) OVER (PARTITION BY safe ORDER BY _block_number asc) AS change,
$5::float+1 AS interest_rate,
-- This is the proportion of the 'month' remaining
($2::integer - _block_number::integer) / $4::float AS compounding_periods,
FROM filtered_balances
WHERE _block_number::integer < $2::integer
QUALIFY _block_number::integer >= $1::integer
),
original_balances AS (
SELECT
safe,
owner,
-- There is only one value here after the group but logically it is the "last" balance
-- it is called "change" to match the balance_changes CTE so we can union them together
LAST(balance_int64) AS change,
$5::float+1 AS interest_rate,
1 AS compounding_periods,
FROM filtered_balances a
WHERE _block_number::integer < $1::integer
AND _block_number = (
SELECT MAX(_block_number)
FROM filtered_balances b
WHERE a.safe = b.safe
AND _block_number::integer < $1::integer
)
GROUP BY safe, owner
),
-- Combine the balances at the start of the period and
all_data AS (SELECT * FROM original_balances UNION ALL SELECT * FROM balance_changes)
-- Aggregate the changes, each is treated as a compounding interest calculation
-- rate^(periods) gives you the total after growth, so we need to take 1 away to get just the growth
-- e.g. 5% APR for two years would be 1.05^2 - 1
-- and 5% APR for half a year would be 1.05^(1/2) - 1
SELECT owner AS payee, sum(change * ((interest_rate**compounding_periods) - 1)) AS rewards
FROM all_data
GROUP BY payee
"""
def run(self, payment_cycle: int, reward_program_id: str):
self.register_tables()
start_block, end_block = (
payment_cycle - self.payment_cycle_length,
payment_cycle,
)
vars = [
start_block, # $1 -> int
end_block, # $2 -> int
self.token, # $3 -> str
self.payment_cycle_length, # $4 -> int
self.interest_rate_monthly, # $5 -> float
]
df = self.connection.execute(self.METHOD_NAME(), vars).fetch_df()
df["rewardProgramID"] = reward_program_id
df["paymentCycle"] = payment_cycle
df["validFrom"] = payment_cycle
df["validTo"] = payment_cycle + self.duration
df["token"] = self.token
df["amount"] = df["rewards"] * 1_000_000_000
df["explanationData"] = df.apply(
lambda row: self.get_explanation_data(
{
"rewardProgramID": row.rewardProgramID,
"payee": row.payee,
"paymentCycle": row.paymentCycle,
"validFrom": row.validFrom,
"validTo": row.validTo,
"amount": row.amount,
"token": row.token,
}
),
axis=1,
)
df.drop(["rewards"], axis=1)
return df
def get_explanation_data(self, payment):
return {
"amount": format_amount(payment["amount"]),
"token": self.token,
"rollover_amount": payment.get("rollover_amount"),
"interest_rate": self.interest_rate_monthly,
"from_block": payment["paymentCycle"] - self.payment_cycle_length,
"to_block": payment["paymentCycle"],
} |
298,967 | render metrics onscreen | from __future__ import annotations
import abc
import dataclasses
import shutil
import typing as t
import click
import globus_sdk
from .context import should_show_server_timing
_BORDER_COLOR = "blue"
_FILL_COLOR = "yellow"
class ServerTimingParseError(ValueError):
pass
@dataclasses.dataclass
class Metric:
name: str
# although surprising, the spec allows for metrics with no duration value
# the canonical example is 'miss' (undecorated) to indicate a cache miss
duration: t.Optional[float] = None
description: t.Optional[str] = None
def maybe_show_server_timing(res: globus_sdk.GlobusHTTPResponse) -> None:
if not should_show_server_timing():
return
server_timing_str = res.headers.get("Server-Timing")
if server_timing_str:
# for now, always use the default parser and ignore malformed metric items
# in the future, this could be extended to try different parsers in series
metrics = DEFAULT_PARSER.parse_metric_header(
server_timing_str, skip_errors=True
)
METHOD_NAME(metrics)
def METHOD_NAME(metrics: list[Metric]) -> None:
click.echo("Server Timing Info", err=True)
term_width = shutil.get_terminal_size((80, 20)).columns
use_width = term_width - 4
items = sorted(
(
(f"{m.description or m.name}={m.duration}", m.duration)
for m in metrics
if m.duration is not None
),
key=lambda x: x[1],
)
last = items[-1]
factor = last[1]
desc_width = (max(len(x[0]) for x in items) if items else 0) + 1
hborder = click.style(f"+{'-' * (use_width + 2)}+", fg=_BORDER_COLOR)
vborder = click.style("|", fg=_BORDER_COLOR)
click.echo(hborder, err=True)
for desc, size in items:
desc = desc.ljust(desc_width, ".")
bar_width = max(int((use_width - desc_width) * size / factor), 1)
bar = "#" * bar_width
msg = desc + click.style(bar, fg=_FILL_COLOR)
style_char_length = len(msg) - len(click.unstyle(msg))
msg = msg.ljust(use_width + style_char_length, " ")
click.echo(f"{vborder} {msg} {vborder}", err=True)
click.echo(hborder, err=True)
class ServerTimingParser(abc.ABC):
# which version of the Server-Timing spec does this parser implement?
spec_reference: t.ClassVar[str]
@abc.abstractmethod
def parse_single_metric(self, metric_str: str) -> Metric:
...
def parse_metric_header(
self, header_str: str, skip_errors: bool = True
) -> list[Metric]:
metric_items = header_str.split(",")
ret: list[Metric] = []
for item in metric_items:
try:
ret.append(self.parse_single_metric(item))
except ServerTimingParseError:
if not skip_errors:
raise
return ret
class Draft2017Parser(ServerTimingParser):
"""
Parsing per the Server-Timing draft from 2017 and earlier
The spec has changed since this draft.
For example
'a=1; "alpha", b=2, c, d; "delta"'
will parse as
Metrics:
- name: a
description: alpha
duration: 1.0
- name: b
duration: 2.0
- name: c
- name: d
description: delta
"""
spec_reference = "https://www.w3.org/TR/2017/WD-server-timing-20171018/"
def parse_single_metric(self, metric_str: str) -> Metric:
part, *optionals = (p.strip() for p in metric_str.split(";"))
if len(optionals) > 1:
raise ServerTimingParseError(
"Too many semicolons in timing item, cannot parse"
)
metric = _parse_simple_metric_part(part)
if optionals:
metric.description = optionals[0].strip('"')
return metric
def _parse_simple_metric_part(metric: str) -> Metric:
if not metric:
raise ServerTimingParseError("encountered empty metric")
if "=" not in metric:
return Metric(name=metric)
name, _, unparsed_value = metric.partition("=")
try:
value = float(unparsed_value)
except ValueError as e:
raise ServerTimingParseError("Metric value did not parse as float") from e
return Metric(name=name.strip(), duration=value)
DEFAULT_PARSER = Draft2017Parser() |
298,968 | generatenodediff task | """
All task functions decorated with `app.task` transform the function to an instance of
`contentcuration.utils.celery.tasks.CeleryTask`. See the methods of that class for enqueuing and fetching results of
the tasks.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import time
from celery.utils.log import get_task_logger
from django.conf import settings
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.utils.translation import override
from contentcuration.celery import app
from contentcuration.models import Change
from contentcuration.models import ContentNode
from contentcuration.models import User
from contentcuration.utils.csv_writer import write_user_csv
from contentcuration.utils.nodes import calculate_resource_size
from contentcuration.utils.nodes import generate_diff
from contentcuration.viewsets.user import AdminUserFilter
logger = get_task_logger(__name__)
@app.task(bind=True, name="apply_user_changes")
def apply_user_changes_task(self, user_id):
"""
:type self: contentcuration.utils.celery.tasks.CeleryTask
:param user_id: The user ID for which to process changes
"""
from contentcuration.viewsets.sync.base import apply_changes
changes_qs = Change.objects.filter(applied=False, errored=False, user_id=user_id, channel__isnull=True)
apply_changes(changes_qs)
if changes_qs.exists():
self.requeue()
@app.task(bind=True, name="apply_channel_changes")
def apply_channel_changes_task(self, channel_id):
"""
:type self: contentcuration.utils.celery.tasks.CeleryTask
:param channel_id: The channel ID for which to process changes
"""
from contentcuration.viewsets.sync.base import apply_changes
changes_qs = Change.objects.filter(applied=False, errored=False, channel_id=channel_id)
apply_changes(changes_qs)
if changes_qs.exists():
self.requeue()
class CustomEmailMessage(EmailMessage):
"""
jayoshih: There's an issue with the django postmark backend where
_build_message attempts to attach files as base64. However,
the django EmailMessage attach method makes all content with a text/*
mimetype to be encoded as a string, causing `base64.b64encode(content)`
to fail. This is a workaround to ensure that content is still encoded as
bytes when it comes to encoding the attachment as base64
"""
def attach(self, filename=None, content=None, mimetype=None):
if filename is None:
raise AssertionError
if content is None:
raise AssertionError
if mimetype is None:
raise AssertionError
self.attachments.append((filename, content, mimetype))
@app.task(name="generateusercsv_task")
def generateusercsv_task(user_id, language=settings.LANGUAGE_CODE):
with override(language):
user = User.objects.get(pk=user_id)
csv_path = write_user_csv(user)
subject = render_to_string("export/user_csv_email_subject.txt", {})
message = render_to_string(
"export/user_csv_email.txt",
{
"legal_email": settings.POLICY_EMAIL,
"user": user,
"edit_channels": user.editable_channels.values("name", "id"),
"view_channels": user.view_only_channels.values("name", "id"),
},
)
email = CustomEmailMessage(subject, message, settings.DEFAULT_FROM_EMAIL, [user.email])
email.encoding = 'utf-8'
email.attach_file(csv_path, mimetype="text/csv")
email.send()
@app.task(name="deletetree_task")
def deletetree_task(tree_id):
ContentNode.objects.filter(tree_id=tree_id).delete()
@app.task(name="getnodedetails_task")
def getnodedetails_task(node_id):
node = ContentNode.objects.get(pk=node_id)
return node.get_details()
@app.task(name="generatenodediff_task")
def METHOD_NAME(updated_id, original_id):
return generate_diff(updated_id, original_id)
@app.task(name="calculate_user_storage_task")
def calculate_user_storage_task(user_id):
try:
user = User.objects.get(pk=user_id)
user.set_space_used()
except User.DoesNotExist:
logging.error("Tried to calculate user storage for user with id {} but they do not exist".format(user_id))
@app.task(name="calculate_resource_size_task")
def calculate_resource_size_task(node_id, channel_id):
node = ContentNode.objects.get(pk=node_id)
size, _ = calculate_resource_size(node=node, force=True)
return size
@app.task(name="sendcustomemails_task")
def sendcustomemails_task(subject, message, query):
subject = render_to_string('registration/custom_email_subject.txt', {'subject': subject})
recipients = AdminUserFilter(data=query).qs.distinct()
for recipient in recipients:
text = message.format(current_date=time.strftime("%A, %B %d"), current_time=time.strftime("%H:%M %Z"), **recipient.__dict__)
text = render_to_string('registration/custom_email.txt', {'message': text})
recipient.email_user(subject, text, settings.DEFAULT_FROM_EMAIL, ) |
298,969 | dump nan file | from __future__ import print_function
import sys
import logging
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
console = logging.StreamHandler()
logger = logging.getLogger()
logger.addHandler(console)
logger.setLevel(logging.ERROR)
labels = {}
program = []
imm_expr = []
imm_dict = {}
imm_values = []
nr = 0
def _get_value_imm(str):
if str[0] == '#':
if phase == 1:
if not str[1:] in imm_expr: # phase 1
imm_expr.append(str[1:])
return 0
else: # phase == 2
return imm_dict[_get_value(str[1:])] # phase 2
# not an immediate value
return _get_value(str)
def _get_value(str):
if str[0] == '#':
raise ValueError('Not allowed to use immediate value. Line %d' % nr)
if str[0] == '$':
val = int(str[1:], 16)
elif str[0:2] == '0x':
val = int(str[2:], 16)
else:
try:
val = int(str)
except ValueError:
try:
val = labels[str]
except KeyError:
if phase == 2:
raise NameError('Unknown indentifier ' + str)
val = 0
return val & 0xFFFF
def _output_direct(data):
global pc
pc += 1
if phase == 2:
program.append("%04X" % data)
def add_label(line):
logger.debug("add label '%s'. Pass %d. PC = %d" % (line, phase, pc))
split = line.split('=')
for i in range(len(split)):
split[i] = split[i].strip()
if (phase == 1) and (split[0] in labels):
raise NameError("Label '%s' already exists." % split[0])
if len(split) > 1:
labels[split[0]] = _get_value(split[1])
else:
labels[split[0]] = pc
##########################################################################
##
## PARSE rules for each opcode
##
##########################################################################
def _addr(params, mnem, code):
addr = _get_value(params)
if addr > 0x3FF:
eprint ("Error, address too large: %03x: %s $%03x" % (pc, mnem, addr))
sys.exit(1)
code |= addr
logger.info("PC: %03x: %04x | %s $%03x" % (pc, code, mnem, addr))
_output_direct(code)
return code
def _addr_imm(params, mnem, code):
addr = _get_value_imm(params)
if addr > 0x3FF:
eprint ("Error, address too large: %03x: %s $%03x" % (pc, mnem, addr))
sys.exit(1)
code |= addr
logger.info("PC: %03x: %04x | %s $%03x" % (pc, code, mnem, addr))
_output_direct(code)
return code
def _addr_rel(params, mnem, code):
parsed = params.split(',')
if len(parsed) < 2:
eprint ("Line %d: Syntax error in relative addressing mode: %03x: %s %s" % (nr, pc, mnem, params))
sys.exit(1)
addr = _get_value(parsed[0])
if phase == 2:
if (addr < 0x3F8) or (addr > 0x3FF):
eprint ("Line %d: Relative addressing base pointers shall be at $3F8-$3FF: %03x: %s $%03x" % (nr, pc, mnem, addr))
sys.exit(1)
offset = _get_value(parsed[1])
if offset > 0xFF:
eprint ("Line %d: Error, offset too large: %03x: %s $%03x,$%02x" % (nr, pc, mnem, addr, offset))
sys.exit(1)
code |= (addr & 0x07)
code |= (offset << 3)
logger.info("PC: %03x: %04x | %s $%03x,$%02x" % (pc, code, mnem, addr, offset))
_output_direct(code)
return code
def _data(params, mnem, code):
data = _get_value(params)
logger.info("PC: %03x: %s $%04x" % (pc, mnem, data))
_output_direct(data)
return data
def _block(params, mnem, code):
length = _get_value(params)
logger.info("PC: %03x: %s $%04x" % (pc, mnem, length))
for i in range(length):
_output_direct(0)
return 0
def _addr_io(params, mnem, code):
addr = _get_value(params)
if addr > 0xFF:
eprint ("Error, address too large: %03x: %s $%03x" % (pc, mnem, addr))
sys.exit(1)
code |= addr
logger.info("PC: %03x: %04x | %s $%03x" % (pc, code, mnem, addr))
_output_direct(code)
return code
def _no_addr(params, mnem, code):
logger.info("PC: %03x: %04x | %s" % (pc, code, mnem))
_output_direct(code)
return code
def unknown_mnem(params):
eprint ("Unknown mnemonic: '%s'" % params)
def dump_bram_init():
bram = [0]*2048
for i in range(len(program)):
inst = int(program[i], 16)
bram[2*i+0] = inst & 0xFF
bram[2*i+1] = (inst >> 8) & 0xFF
for i in range(64):
if (i*16) >= len(program):
break
hx = ''
for j in range(31,-1,-1):
hx = hx + "%02X" % bram[i*32+j]
print (" INIT_%02X => X\"%s\"," % (i, hx))
def METHOD_NAME(filename):
f = open(filename, "wb")
b = bytearray(2)
for i in range(len(program)):
inst = int(program[i], 16)
b[0] = inst & 0xFF
b[1] = (inst >> 8) & 0xFF
f.write(b)
f.close()
mnemonics = {
'LOAD' : ( _addr_imm, 0x0800 ),
'STORE' : ( _addr, 0x8000 ),
'LOADI' : ( _addr_rel, 0x8800 ),
'STORI' : ( _addr_rel, 0x9000 ),
'OR' : ( _addr_imm, 0x1800 ),
'AND' : ( _addr_imm, 0x2800 ),
'XOR' : ( _addr_imm, 0x3800 ),
'ADD' : ( _addr_imm, 0x4800 ),
'SUB' : ( _addr_imm, 0x5800 ),
'CMP' : ( _addr_imm, 0x5000 ),
'ADDC' : ( _addr_imm, 0x6800 ),
'INP' : ( _addr_io, 0x7800 ),
'OUTP' : ( _addr_io, 0xA000 ),
'RET' : ( _no_addr, 0xB800 ),
'BEQ' : ( _addr, 0xC000 ),
'BNE' : ( _addr, 0xC800 ),
'BMI' : ( _addr, 0xD000 ),
'BPL' : ( _addr, 0xD800 ),
'BRA' : ( _addr, 0XE000 ),
'CALL' : ( _addr, 0xE800 ),
'BCS' : ( _addr, 0xF000 ),
'BCC' : ( _addr, 0xF800 ),
'.dw' : ( _data, 0x0000 ),
'.blk' : ( _block, 0x0000 )
}
def parse_lines(lines):
global nr
nr = 0
for line in lines:
nr = nr + 1
line = line.rstrip()
comm = line.split(';', 1)
line = comm[0]
if(line.strip() == ''):
continue
line_strip = line.strip()
if (line[0] != ' ') and (line[0] != '\t'):
add_label(line.rstrip())
if (phase == 2):
print (" ", line)
continue
#print "Line: '%s'" % line_strip
line_split = line_strip.split(" ", 1)
if len(line_split) == 1:
line_split.append("")
mnem = line_split[0];
try:
(f, code) = mnemonics[mnem]
except KeyError as e:
raise NameError("Unknown Mnemonic %s in line %d" % (mnem, nr))
try:
code = f(line_split[1].strip(), mnem, code)
except IndexError as e:
raise ValueError("Value error in line %d" % (nr,))
if (phase == 2):
print ("%03X: %04X | " % (pc-1, code),line)
def resolve_immediates():
global pc
for imm in imm_expr:
imm_dict[_get_value(imm)] = 0;
for imm in imm_dict:
imm_dict[imm] = pc
imm_values.append(imm)
pc += 1
#print imm_expr
#print imm_dict
#print imm_values
if __name__ == "__main__":
inputfile = 'nano_code.nan'
outputfile = 'nano_code.b'
if len(sys.argv)>1:
inputfile = sys.argv[1]
if len(sys.argv)>2:
outputfile = sys.argv[2]
f = open(inputfile, 'r')
lines = f.readlines()
pc = 0
phase = 1
logger.info("Pass 1...")
parse_lines(lines)
# print labels
resolve_immediates()
pc = 0
phase = 2
logger.info("Pass 2...")
logger.setLevel(logging.WARN)
parse_lines(lines)
for imm in imm_values:
logger.info("PC: %03x: .dw $%04x" % (pc, imm))
print ("%03X: %04X | IMM #%d" % (pc, imm, imm))
_output_direct(imm)
dump_bram_init()
METHOD_NAME(outputfile) |
298,970 | test knative counters | import logging
import os
import sys
import pytest
import tests.integration.manifests as integration_manifests
from ambassador import IR, Config
from ambassador.fetch import ResourceFetcher
from ambassador.utils import NullSecretHandler, parse_bool
from kat.harness import is_knative_compatible
from tests.integration.utils import create_qotm_mapping, get_code_with_retry, install_ambassador
from tests.kubeutils import apply_kube_artifacts
from tests.manifests import qotm_manifests
from tests.runutils import run_and_assert
logger = logging.getLogger("ambassador")
# knative_service_example gets applied to the cluster with `kubectl --namespace=knative-testing
# apply`; we therefore DO NOT explicitly set the 'namespace:' because --namespace will imply it, and
# explicitly setting anything only adds room for something else to go wrong.
knative_service_example = """
---
apiVersion: serving.knative.dev/v1alpha1
kind: Service
metadata:
name: helloworld-go
spec:
template:
spec:
containers:
- image: gcr.io/knative-samples/helloworld-go
env:
- name: TARGET
value: "Ambassador is Awesome"
"""
# knative_ingress_example is not applied to the cluster, but is instead fed directly to the
# ResourceFetcher; so we MUST explicitly set the namespace, because we can't rely on kubectl and/or
# the apiserver to auto-populate it for us.
knative_ingress_example = """
apiVersion: networking.internal.knative.dev/v1alpha1
kind: Ingress
metadata:
name: helloworld-go
namespace: default
spec:
rules:
- hosts:
- helloworld-go.default.svc.cluster.local
http:
paths:
- retries:
attempts: 3
perTryTimeout: 10m0s
splits:
- percent: 100
serviceName: helloworld-go-qf94m
servicePort: 80
timeout: 10m0s
visibility: ClusterLocal
visibility: ExternalIP
"""
class KnativeTesting:
def test_knative(self):
namespace = "knative-testing"
# Install Knative
apply_kube_artifacts(
namespace=None, artifacts=integration_manifests.load("knative_serving_crds")
)
apply_kube_artifacts(
namespace="knative-serving",
artifacts=integration_manifests.load("knative_serving_0.18.0"),
)
run_and_assert(
[
"tools/bin/kubectl",
"patch",
"configmap/config-network",
"--type",
"merge",
"--patch",
r'{"data": {"ingress.class": "ambassador.ingress.networking.knative.dev"}}',
"-n",
"knative-serving",
]
)
# Wait for Knative to become ready
run_and_assert(
[
"tools/bin/kubectl",
"wait",
"--timeout=90s",
"--for=condition=Ready",
"pod",
"-l",
"app=activator",
"-n",
"knative-serving",
]
)
run_and_assert(
[
"tools/bin/kubectl",
"wait",
"--timeout=90s",
"--for=condition=Ready",
"pod",
"-l",
"app=controller",
"-n",
"knative-serving",
]
)
run_and_assert(
[
"tools/bin/kubectl",
"wait",
"--timeout=90s",
"--for=condition=Ready",
"pod",
"-l",
"app=webhook",
"-n",
"knative-serving",
]
)
run_and_assert(
[
"tools/bin/kubectl",
"wait",
"--timeout=90s",
"--for=condition=Ready",
"pod",
"-l",
"app=autoscaler",
"-n",
"knative-serving",
]
)
# Install Ambassador
install_ambassador(
namespace=namespace, envs=[{"name": "AMBASSADOR_KNATIVE_SUPPORT", "value": "true"}]
)
# Install QOTM
apply_kube_artifacts(namespace=namespace, artifacts=qotm_manifests)
create_qotm_mapping(namespace=namespace)
# Now let's wait for ambassador and QOTM pods to become ready
run_and_assert(
[
"tools/bin/kubectl",
"wait",
"--timeout=90s",
"--for=condition=Ready",
"pod",
"-l",
"service=ambassador",
"-n",
namespace,
]
)
run_and_assert(
[
"tools/bin/kubectl",
"wait",
"--timeout=90s",
"--for=condition=Ready",
"pod",
"-l",
"service=qotm",
"-n",
namespace,
]
)
# Create kservice
apply_kube_artifacts(namespace=namespace, artifacts=knative_service_example)
# Assume we can reach Ambassador through telepresence
qotm_host = "ambassador." + namespace
# Assert 200 OK at /qotm/ endpoint
qotm_url = f"http://{qotm_host}/qotm/"
code = get_code_with_retry(qotm_url)
assert code == 200, f"Expected 200 OK, got {code}"
print(f"{qotm_url} is ready")
# Assert 200 OK at / with Knative Host header and 404 with other/no header
kservice_url = f"http://{qotm_host}/"
code = get_code_with_retry(kservice_url)
assert code == 404, f"Expected 404, got {code}"
print(f"{kservice_url} returns 404 with no host")
code = get_code_with_retry(kservice_url, headers={"Host": "random.host.whatever"})
assert code == 404, f"Expected 404, got {code}"
print(f"{kservice_url} returns 404 with a random host")
# Wait for kservice
run_and_assert(
[
"tools/bin/kubectl",
"wait",
"--timeout=90s",
"--for=condition=Ready",
"ksvc",
"helloworld-go",
"-n",
namespace,
]
)
# kservice pod takes some time to spin up, so let's try a few times
code = 000
host = f"helloworld-go.{namespace}.example.com"
for _ in range(5):
code = get_code_with_retry(kservice_url, headers={"Host": host})
if code == 200:
break
assert code == 200, f"Expected 200, got {code}"
print(f"{kservice_url} returns 200 OK with host helloworld-go.{namespace}.example.com")
def METHOD_NAME():
aconf = Config()
fetcher = ResourceFetcher(logger, aconf)
fetcher.parse_yaml(knative_ingress_example, k8s=True)
aconf.load_all(fetcher.sorted())
secret_handler = NullSecretHandler(logger, None, None, "0")
ir = IR(aconf, secret_handler=secret_handler)
feats = ir.features()
assert feats["knative_ingress_count"] == 1, f"Expected a Knative ingress, did not find one"
assert (
feats["cluster_ingress_count"] == 0
), f"Expected no Knative cluster ingresses, found at least one"
@pytest.mark.flaky(reruns=1, reruns_delay=10)
def test_knative():
if not parse_bool(os.environ.get("AMBASSADOR_PYTEST_KNATIVE_TEST", "false")):
pytest.xfail("AMBASSADOR_PYTEST_KNATIVE_TEST is not set, xfailing...")
if is_knative_compatible():
knative_test = KnativeTesting()
knative_test.test_knative()
else:
pytest.xfail("Knative is not supported")
if __name__ == "__main__":
pytest.main(sys.argv) |
298,971 | get queryset | # (c) 2012-2019, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
import collections
from django.db.models import fields
from django.db.models import F, Value
from galaxy.api import base
from galaxy.api.internal.serializers.imports import (
TYPE_REPOSITORY, TYPE_COLLECTION, SERIALIZER_BY_TYPE
)
from galaxy.main import models
__all__ = (
'NamespaceImportsList',
)
class NamespaceImportsList(base.ListAPIView):
QS_BY_TYPE = {
TYPE_COLLECTION: (
models.CollectionImport.objects
.select_related('pulp_task', 'namespace')
.all()
),
TYPE_REPOSITORY: (
models.ImportTask.objects
.select_related('repository__provider_namespace__namespace')
.all()
),
}
def list(self, request, *args, **kwargs):
items = self.paginate_queryset(self.METHOD_NAME())
self.load_objects(items)
result = self.serialize_objects(items)
return self.get_paginated_response(result)
def METHOD_NAME(self):
task_type = self.request.query_params.get('type')
if task_type == TYPE_COLLECTION:
qs = self.get_collection_queryset()
elif task_type == TYPE_REPOSITORY:
qs = self.get_repsository_queryset()
else:
qs = self.get_collection_queryset()
qs = qs.union(self.get_repsository_queryset(), all=True)
return qs.order_by('-started_at')
def get_collection_queryset(self):
namespace_id = self.kwargs['namespace_id']
qs = (
models.CollectionImport.objects
.filter(namespace_id=namespace_id)
.annotate(type=Value(TYPE_COLLECTION,
output_field=fields.CharField()),
started_at=F('pulp_task__started_at'))
.values('pk', 'type', 'started_at')
)
name = self.request.query_params.get('name')
if name:
qs = qs.filter(name__icontains=name)
return qs
def get_repsository_queryset(self):
namespace_id = self.kwargs['namespace_id']
qs = (
models.ImportTask.objects
.filter(
repository__provider_namespace__namespace_id=namespace_id)
.annotate(type=Value(TYPE_REPOSITORY,
output_field=fields.CharField()),
started_at=F('started'))
.values('pk', 'type', 'started_at')
.order_by()
)
name = self.request.query_params.get('name')
if name:
qs = qs.filter(repository__name__icontains=name)
return qs
def load_objects(self, records):
to_load = collections.defaultdict(list)
for record in records:
to_load[record['type']].append(record['pk'])
loaded = {}
for tp, ids in to_load.items():
loaded[tp] = {
obj.pk: obj for obj in self.QS_BY_TYPE[tp].filter(pk__in=ids)}
for record in records:
pk = record['pk']
tp = record['type']
record['object'] = loaded[tp][pk]
def serialize_objects(self, records):
for record in records:
tp = record['type']
obj = record['object']
serializer_class = SERIALIZER_BY_TYPE[tp]
serializer = serializer_class(
obj, context={'request': self.request})
yield serializer.data |
298,972 | content item identifiers | """
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.internal.extension_dict
import google.protobuf.message
import pyatv.protocols.mrp.protobuf.PlaybackQueueContext_pb2
import pyatv.protocols.mrp.protobuf.PlayerPath_pb2
import pyatv.protocols.mrp.protobuf.ProtocolMessage_pb2
import sys
if sys.version_info >= (3, 8):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class PlaybackQueueRequestMessage(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
LOCATION_FIELD_NUMBER: builtins.int
LENGTH_FIELD_NUMBER: builtins.int
INCLUDEMETADATA_FIELD_NUMBER: builtins.int
ARTWORKWIDTH_FIELD_NUMBER: builtins.int
ARTWORKHEIGHT_FIELD_NUMBER: builtins.int
INCLUDELYRICS_FIELD_NUMBER: builtins.int
INCLUDESECTIONS_FIELD_NUMBER: builtins.int
INCLUDEINFO_FIELD_NUMBER: builtins.int
INCLUDELANGUAGEOPTIONS_FIELD_NUMBER: builtins.int
CONTEXT_FIELD_NUMBER: builtins.int
REQUESTID_FIELD_NUMBER: builtins.int
CONTENTITEMIDENTIFIERS_FIELD_NUMBER: builtins.int
RETURNCONTENTITEMASSETSINUSERCOMPLETION_FIELD_NUMBER: builtins.int
PLAYERPATH_FIELD_NUMBER: builtins.int
CACHINGPOLICY_FIELD_NUMBER: builtins.int
LABEL_FIELD_NUMBER: builtins.int
ISLEGACYNOWPLAYINGINFOREQUEST_FIELD_NUMBER: builtins.int
location: builtins.int
length: builtins.int
includeMetadata: builtins.bool
artworkWidth: builtins.float
artworkHeight: builtins.float
includeLyrics: builtins.bool
includeSections: builtins.bool
includeInfo: builtins.bool
includeLanguageOptions: builtins.bool
@property
def context(self) -> pyatv.protocols.mrp.protobuf.PlaybackQueueContext_pb2.PlaybackQueueContext: ...
requestID: builtins.str
@property
def METHOD_NAME(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]: ...
returnContentItemAssetsInUserCompletion: builtins.bool
@property
def playerPath(self) -> pyatv.protocols.mrp.protobuf.PlayerPath_pb2.PlayerPath: ...
cachingPolicy: builtins.int
label: builtins.str
isLegacyNowPlayingInfoRequest: builtins.bool
def __init__(
self,
*,
location: builtins.int | None = ...,
length: builtins.int | None = ...,
includeMetadata: builtins.bool | None = ...,
artworkWidth: builtins.float | None = ...,
artworkHeight: builtins.float | None = ...,
includeLyrics: builtins.bool | None = ...,
includeSections: builtins.bool | None = ...,
includeInfo: builtins.bool | None = ...,
includeLanguageOptions: builtins.bool | None = ...,
context: pyatv.protocols.mrp.protobuf.PlaybackQueueContext_pb2.PlaybackQueueContext | None = ...,
requestID: builtins.str | None = ...,
METHOD_NAME: collections.abc.Iterable[builtins.str] | None = ...,
returnContentItemAssetsInUserCompletion: builtins.bool | None = ...,
playerPath: pyatv.protocols.mrp.protobuf.PlayerPath_pb2.PlayerPath | None = ...,
cachingPolicy: builtins.int | None = ...,
label: builtins.str | None = ...,
isLegacyNowPlayingInfoRequest: builtins.bool | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["artworkHeight", b"artworkHeight", "artworkWidth", b"artworkWidth", "cachingPolicy", b"cachingPolicy", "context", b"context", "includeInfo", b"includeInfo", "includeLanguageOptions", b"includeLanguageOptions", "includeLyrics", b"includeLyrics", "includeMetadata", b"includeMetadata", "includeSections", b"includeSections", "isLegacyNowPlayingInfoRequest", b"isLegacyNowPlayingInfoRequest", "label", b"label", "length", b"length", "location", b"location", "playerPath", b"playerPath", "requestID", b"requestID", "returnContentItemAssetsInUserCompletion", b"returnContentItemAssetsInUserCompletion"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["artworkHeight", b"artworkHeight", "artworkWidth", b"artworkWidth", "cachingPolicy", b"cachingPolicy", "contentItemIdentifiers", b"contentItemIdentifiers", "context", b"context", "includeInfo", b"includeInfo", "includeLanguageOptions", b"includeLanguageOptions", "includeLyrics", b"includeLyrics", "includeMetadata", b"includeMetadata", "includeSections", b"includeSections", "isLegacyNowPlayingInfoRequest", b"isLegacyNowPlayingInfoRequest", "label", b"label", "length", b"length", "location", b"location", "playerPath", b"playerPath", "requestID", b"requestID", "returnContentItemAssetsInUserCompletion", b"returnContentItemAssetsInUserCompletion"]) -> None: ...
global___PlaybackQueueRequestMessage = PlaybackQueueRequestMessage
PLAYBACKQUEUEREQUESTMESSAGE_FIELD_NUMBER: builtins.int
playbackQueueRequestMessage: google.protobuf.internal.extension_dict._ExtensionFieldDescriptor[pyatv.protocols.mrp.protobuf.ProtocolMessage_pb2.ProtocolMessage, global___PlaybackQueueRequestMessage] |
298,973 | info | #! /usr/bin/python
"""ServiceGen
Generate ohnet service providers from UPnP Service XML, This uses
a python 'template' file (passed as parameter) to define the output
file format
"""
import importlib
import optparse
import os
import sys
import xmltodict
class FatalError(Exception):
def __init__(self, aMsg):
self.msg = aMsg
def __str__(self):
return 'FATAL ERROR: ' + self.msg
class Logger:
def __init__(self):
pass
@staticmethod
def METHOD_NAME(aMsg):
print(aMsg)
@staticmethod
def Error(aMsg):
print('ERROR: ' + aMsg)
@staticmethod
def Fatal(aMsg):
# raise exception or wscript will not display logging prior to failure
sys.tracebacklimit = 0
raise FatalError(aMsg)
class ServiceGen:
def __init__(self):
self.log = Logger()
self.opts = None
# User input
self._ParseOpts()
self._CheckOpts()
self._LogInfo()
# Generate output from XML+template
with open(self.opts.xml, 'r') as f:
xml = f.read()
desc = xmltodict.parse( xml )
template = importlib.import_module( 'Templates.' + self.opts.template.strip('.py') )
filename, lines = template.Generate(desc['scpd'], self.opts.domain, self.opts.type, int(self.opts.version)) # NOQA
filepath = os.path.join(self.opts.output, filename)
with open(filepath, 'wt') as f:
for line in lines:
f.write(line)
f.write('\n')
self.log.METHOD_NAME('....Completed')
#
# Handle command line options
#
def _ParseOpts(self):
"""Parse the command-line options"""
parser = optparse.OptionParser()
parser.add_option('-t', '--template-file', dest='template', default='', help='Template file which defines output format')
parser.add_option('-o', '--output-path', dest='output', default='', help='Path for output file generated by this utility')
parser.add_option('-x', '--service-xml-file', dest='xml', default='', help='Service XML file')
parser.add_option('-d', '--service-domain', dest='domain', default='', help='Domain of service (eg. av.openhome.org)')
parser.add_option('-y', '--service-type', dest='type', default='', help='Type of service (eg. Playlist)')
parser.add_option('-v', '--service-versionr', dest='version', default='', help='Version of service (eg. 1)')
(self.opts, args) = parser.parse_args()
def _CheckOpts(self):
"""Check validity of options / existence of required files"""
optsOk = True
if not os.path.exists(os.path.join( os.path.dirname(__file__), 'Templates', self.opts.template)):
self.log.Error('Invalid template file:- ' + self.opts.template)
optsOk = False
if not os.path.isdir(self.opts.output):
try:
os.makedirs(self.opts.output)
except:
self.log.Fatal('Unable to create output path:- ' + self.opts.output)
if not os.path.exists(self.opts.xml):
self.log.Error('Invalid Service XML file:- ' + self.opts.xml)
optsOk = False
if self.opts.domain == '':
self.log.Error('Service domain MUST be defined (-d parameter)')
optsOk = False
if self.opts.type == '':
self.log.Error('Service type MUST be defined (-y parameter)')
optsOk = False
if self.opts.version == '':
self.log.Error('Service version MUST be defined (-v parameter)')
optsOk = False
else:
try:
int(self.opts.version)
except:
self.log.Error('Service version MUST be an integer')
optsOk = False
if not optsOk:
self.log.Fatal('Command line option check FAILED')
def _LogInfo(self):
self.log.METHOD_NAME('Processing {0} using {1}'.format(self.opts.xml, self.opts.template))
if __name__ == '__main__':
s = ServiceGen() |
298,974 | on 204 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"mesh app delete",
is_preview=True,
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete a Service Fabric Mesh application.
"""
_aaz_info = {
"version": "2018-09-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.servicefabricmesh/applications/{}", "2018-09-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return None
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The name of the application.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.ApplicationDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class ApplicationDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
if session.http_response.status_code in [202]:
return self.on_202(session)
if session.http_response.status_code in [204]:
return self.METHOD_NAME(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabricMesh/applications/{applicationResourceName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"applicationResourceName", self.ctx.args.name,
skip_quote=True,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2018-09-01-preview",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_202(self, session):
pass
def METHOD_NAME(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
298,975 | delete user | from typing import Any, Dict, Generator
import pydash
import pytest
import requests
from faker import Faker
from requests import Response
from sqlalchemy.orm import Session
from starlette.status import HTTP_204_NO_CONTENT
from fides.api.cryptography import cryptographic_util
from fides.api.db import session
from fides.api.models.connectionconfig import (
AccessLevel,
ConnectionConfig,
ConnectionType,
)
from fides.api.models.datasetconfig import DatasetConfig
from fides.api.models.sql_models import Dataset as CtlDataset
from fides.api.util.saas_util import (
load_config_with_replacement,
load_dataset_with_replacement,
)
from tests.ops.test_helpers.saas_test_utils import poll_for_existence
from tests.ops.test_helpers.vault_client import get_secrets
secrets = get_secrets("domo")
faker = Faker()
@pytest.fixture(scope="session")
def domo_secrets(saas_config):
return {
"domain": pydash.get(saas_config, "domo.domain") or secrets["domain"],
"client_id": pydash.get(saas_config, "domo.client_id") or secrets["client_id"],
"client_secret": pydash.get(saas_config, "domo.client_secret")
or secrets["client_secret"],
}
@pytest.fixture(scope="session")
def domo_identity_email(saas_config):
return pydash.get(saas_config, "domo.identity_email") or secrets["identity_email"]
@pytest.fixture(scope="session")
def domo_erasure_identity_email():
return f"{cryptographic_util.generate_secure_random_string(13)}@email.com"
@pytest.fixture(scope="session")
def domo_token(domo_secrets) -> str:
body = {"grant_type": "client_credentials"}
url = f"https://{domo_secrets['domain']}/oauth/token"
response = requests.post(
url, body, auth=(domo_secrets["client_id"], domo_secrets["client_secret"])
)
return response.json()["access_token"]
@pytest.fixture
def domo_config() -> Dict[str, Any]:
return load_config_with_replacement(
"data/saas/config/domo_config.yml",
"<instance_fides_key>",
"domo_instance",
)
@pytest.fixture
def domo_dataset() -> Dict[str, Any]:
return load_dataset_with_replacement(
"data/saas/dataset/domo_dataset.yml",
"<instance_fides_key>",
"domo_instance",
)[0]
@pytest.fixture(scope="function")
def domo_connection_config(
db: session,
domo_config,
domo_secrets,
) -> Generator:
fides_key = domo_config["fides_key"]
connection_config = ConnectionConfig.create(
db=db,
data={
"key": fides_key,
"name": fides_key,
"connection_type": ConnectionType.saas,
"access": AccessLevel.write,
"secrets": domo_secrets,
"saas_config": domo_config,
},
)
yield connection_config
connection_config.delete(db)
@pytest.fixture
def domo_dataset_config(
db: Session,
domo_connection_config: ConnectionConfig,
domo_dataset: Dict[str, Any],
) -> Generator:
fides_key = domo_dataset["fides_key"]
domo_connection_config.name = fides_key
domo_connection_config.key = fides_key
domo_connection_config.save(db=db)
ctl_dataset = CtlDataset.create_from_dataset_dict(db, domo_dataset)
dataset = DatasetConfig.create(
db=db,
data={
"connection_config_id": domo_connection_config.id,
"fides_key": fides_key,
"ctl_dataset_id": ctl_dataset.id,
},
)
yield dataset
dataset.delete(db=db)
ctl_dataset.delete(db=db)
class DomoTestClient:
def __init__(self, domo_token, domo_connection_config: ConnectionConfig):
self.domo_secrets = domo_connection_config.secrets
self.headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {domo_token}",
}
self.base_url = f"https://{self.domo_secrets['domain']}/v1"
def create_user(self, email_address: str) -> Response:
# create a new user in Domo
body = {
"email": email_address,
"alternateEmail": email_address,
"name": f"test_connector_ethyca",
"phone": faker.phone_number(),
"title": "Software Engineer",
"role": "Participant", # (available roles are: 'Admin', 'Privileged', 'Participant')
}
url = f"{self.base_url}/users?sendInvite=false"
user_response: Response = requests.post(
url=url, json=body, headers=self.headers
)
return user_response
def get_user(self, user_id: str) -> Response:
# get user created for erasure purposes
url = f"{self.base_url}/users/{user_id}"
user_response: Response = requests.get(url=url, headers=self.headers)
return user_response
def METHOD_NAME(self, user_id) -> Response:
# delete user created for erasure purposes
url = f"{self.base_url}/users/{user_id}"
user_response: Response = requests.delete(url=url, headers=self.headers)
return user_response
@pytest.fixture(scope="function")
def domo_test_client(domo_connection_config: DomoTestClient, domo_token) -> Generator:
test_client = DomoTestClient(
domo_token, domo_connection_config=domo_connection_config
)
yield test_client
def _user_exists(user_id: str, domo_test_client: DomoTestClient) -> Any:
"""check if the user exists in the domo"""
user_response = domo_test_client.get_user(user_id)
user = user_response.json()
# it return status 200 if user exists with given id otherwise 400
if user_response.ok and user:
return user
@pytest.fixture(scope="function")
def domo_create_erasure_data(
domo_test_client: DomoTestClient,
domo_erasure_identity_email: str,
) -> Generator:
"""
Creates a dynamic test data record for erasure tests.
1) create a new user
"""
# 1) create a new user
user_response = domo_test_client.create_user(domo_erasure_identity_email)
user = user_response.json()
user_id = user["id"]
error_message = f"user with user id [{user_id}] could not be added to Domo"
poll_for_existence(
_user_exists,
(user_id, domo_test_client),
error_message=error_message,
)
yield user_id
# delete the user
user_response = domo_test_client.METHOD_NAME(user_id)
# Returns a 204 response code when successful or error based on whether the user ID being valid.
assert user_response.status_code == HTTP_204_NO_CONTENT |
298,976 | client | # This test requires InfiniBand, to run:
# UCXPY_IFNAME=ib0 UCX_NET_DEVICES=mlx5_0:1 \
# UCX_TLS=rc,tcp,cuda_copy \
# py.test --cache-clear tests/debug-tests/test_endpoint_error_callback.py
import asyncio
import multiprocessing
import os
import random
import signal
import sys
import cloudpickle
import pytest
from utils import get_cuda_devices, get_num_gpus, recv, send
from distributed.comm.utils import to_frames
from distributed.protocol import to_serialize
import ucp
from ucp.utils import get_event_loop
cupy = pytest.importorskip("cupy")
async def get_ep(name, port, endpoint_error_handling):
addr = ucp.get_address()
ep = await ucp.create_endpoint(
addr, port, endpoint_error_handling=endpoint_error_handling
)
return ep
def METHOD_NAME(port, func, endpoint_error_handling):
# wait for server to come up
# receive cupy object
# process suicides
ucp.init()
# must create context before importing
# cudf/cupy/etc
async def read():
await asyncio.sleep(1)
ep = await get_ep("client", port, endpoint_error_handling)
msg = None
import cupy
cupy.cuda.set_allocator(None)
frames, msg = await recv(ep)
# Client process suicides to force an "Endpoint timeout"
# on the server
os.kill(os.getpid(), signal.SIGKILL)
get_event_loop().run_until_complete(read())
def server(port, func, endpoint_error_handling):
# create listener receiver
# send cupy object
# receive cupy object and raise flag on timeout
# terminates ep/listener
# checks that "Endpoint timeout" was flagged and return process status accordingly
ucp.init()
global ep_failure_occurred
ep_failure_occurred = False
async def f(listener_port):
# coroutine shows up when the client asks
# to connect
async def write(ep):
global ep_failure_occurred
import cupy
cupy.cuda.set_allocator(None)
print("CREATING CUDA OBJECT IN SERVER...")
cuda_obj_generator = cloudpickle.loads(func)
cuda_obj = cuda_obj_generator()
msg = {"data": to_serialize(cuda_obj)}
frames = await to_frames(msg, serializers=("cuda", "dask", "pickle"))
# Send meta data
try:
await send(ep, frames)
except Exception:
# Avoids process hanging on "Endpoint timeout"
pass
if endpoint_error_handling is True:
try:
frames, msg = await recv(ep)
except ucp.exceptions.UCXError:
ep_failure_occurred = True
else:
try:
frames, msg = await asyncio.wait_for(recv(ep), 3)
except asyncio.TimeoutError:
ep_failure_occurred = True
print("Shutting Down Server...")
await ep.close()
lf.close()
lf = ucp.create_listener(
write, port=listener_port, endpoint_error_handling=endpoint_error_handling
)
try:
while not lf.closed():
await asyncio.sleep(0.1)
except ucp.UCXCloseError:
pass
get_event_loop().run_until_complete(f(port))
if ep_failure_occurred:
sys.exit(0)
else:
sys.exit(1)
def cupy_obj():
import cupy
size = 10**8
return cupy.arange(size)
@pytest.mark.skipif(
get_num_gpus() <= 2, reason="Machine does not have more than two GPUs"
)
@pytest.mark.parametrize("endpoint_error_handling", [True, False])
def test_send_recv_cu(endpoint_error_handling):
base_env = os.environ
env_client = base_env.copy()
# grab first two devices
cvd = get_cuda_devices()[:2]
cvd = ",".join(map(str, cvd))
# reverse CVD for other worker
env_client["CUDA_VISIBLE_DEVICES"] = cvd[::-1]
port = random.randint(13000, 15500)
# serialize function and send to the client and server
# server will use the return value of the contents,
# serialize the values, then send serialized values to client.
# client will compare return values of the deserialized
# data sent from the server
func = cloudpickle.dumps(cupy_obj)
ctx = multiprocessing.get_context("spawn")
server_process = ctx.Process(
name="server", target=server, args=[port, func, endpoint_error_handling]
)
client_process = ctx.Process(
name="client", target=METHOD_NAME, args=[port, func, endpoint_error_handling]
)
server_process.start()
# cudf will ping the driver for validity of device
# this will influence device on which a cuda context is created.
# work around is to update env with new CVD before spawning
os.environ.update(env_client)
client_process.start()
server_process.join()
client_process.join()
print("server_process.exitcode:", server_process.exitcode)
assert server_process.exitcode == 0
assert client_process.exitcode == -9 |
298,977 | retrieve | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2016-2018 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module tracks each node, tenants currently active shell sessions
# 'ConsoleSession' objects from consoleserver are used, but with the additional
# capacity for having a multiple of sessions per node active at a given time
import confluent.consoleserver as consoleserver
import confluent.exceptions as exc
import confluent.messages as msg
activesessions = {}
class _ShellHandler(consoleserver.ConsoleHandler):
_plugin_path = '/nodes/{0}/_shell/session'
_genwatchattribs = False
_logtobuffer = False
def check_collective(self, attrvalue):
return
def log(self, *args, **kwargs):
# suppress logging through proving a stub 'log' function
return
def feedbuffer(self, data):
return
#return super().feedbuffer(data)
def get_recent(self):
retdata, connstate = super(_ShellHandler, self).get_recent()
return '', connstate
def _got_disconnected(self):
self.connectstate = 'closed'
self._send_rcpts({'connectstate': self.connectstate})
for session in list(self.livesessions):
session.destroy()
def get_sessions(tenant, node, user):
"""Get sessionids active for node
Given a tenant, nodename, and user; provide an iterable of sessionids.
Each permutation of tenant, nodename and user have a distinct set of shell
sessions.
:param tenant: The tenant identifier for the current scope
:param node: The nodename of the current scope.
:param user: The confluent user that will 'own' the session.
"""
return activesessions.get((tenant, node, user), {})
def get_session(tenant, node, user, sessionid):
return activesessions.get((tenant, node, user), {}).get(sessionid, None)
class ShellSession(consoleserver.ConsoleSession):
"""Create a new socket to converse with a node shell session
This object provides a filehandle that can be read/written
too in a normal fashion and the concurrency, logging, and
event watching will all be handled seamlessly. It represents a remote
CLI shell session.
:param node: Name of the node for which this session will be created
:param configmanager: A configuration manager object for current context
:param username: Username for which this session object will operate
:param datacallback: An asynchronous data handler, to be called when data
is available. Note that if passed, it makes
'get_next_output' non-functional
:param skipreplay: If true, will skip the attempt to redraw the screen
:param sessionid: An optional identifier to match a running session or
customize the name of a new session.
"""
def __init__(self, node, configmanager, username, datacallback=None,
skipreplay=False, sessionid=None, width=80, height=24):
self.sessionid = sessionid
self.configmanager = configmanager
self.node = node
super(ShellSession, self).__init__(node, configmanager, username,
datacallback, skipreplay,
width=width, height=height)
def connect_session(self):
global activesessions
tenant = self.configmanager.tenant
if (self.configmanager.tenant, self.node) not in activesessions:
activesessions[(tenant, self.node, self.username)] = {}
if self.sessionid is None:
self.sessionid = 1
while str(self.sessionid) in activesessions[(tenant, self.node, self.username)]:
self.sessionid += 1
self.sessionid = str(self.sessionid)
if self.sessionid not in activesessions[(tenant, self.node, self.username)]:
activesessions[(tenant, self.node, self.username)][self.sessionid] = _ShellHandler(self.node, self.configmanager, width=self.width, height=self.height)
self.conshdl = activesessions[(self.configmanager.tenant, self.node, self.username)][self.sessionid]
def destroy(self):
try:
activesessions[(self.configmanager.tenant, self.node,
self.username)][self.sessionid].close()
del activesessions[(self.configmanager.tenant, self.node,
self.username)][self.sessionid]
except KeyError:
pass
super(ShellSession, self).destroy()
def create(nodes, element, configmanager, inputdata):
# For creating a resource, it really has to be handled
# in httpapi/sockapi specially, like a console.
raise exc.InvalidArgumentException('Special client code required')
def METHOD_NAME(nodes, element, configmanager, inputdata):
tenant = configmanager.tenant
user = configmanager.current_user
if (tenant, nodes[0], user) in activesessions:
for sessionid in activesessions[(tenant, nodes[0], user)]:
yield msg.ChildCollection(sessionid) |
298,978 | result str | from pathlib import Path
from torch.autograd.profiler import profile
from .prof_utils import BaseProfiler, _format_time, _format_memory, _format_bandwidth
from typing import List
def _get_size(dtype: str):
if dtype == "fp16":
return 2
elif dtype == "fp32":
return 4
else:
raise NotImplementedError
def _get_numel(my_list: List[int]) -> int:
from functools import reduce
from operator import mul
return reduce(mul, my_list)
def _reduce_location(locations: List[str]) -> str:
ret = []
for lo in locations:
ret.append(lo)
ret.append("\n")
ret = ret[:-1]
return ''.join(ret)
class PcieEvent(object):
"""Pcie Event.
"""
def __init__(self, count: int = 0, pcie_vol: int = 0, cuda_time: int = 0):
self.count = count
self.pcie_vol = pcie_vol
self.cuda_time = cuda_time
def add(self, rhs):
self.count += rhs.count
self.pcie_vol += rhs.pcie_vol
self.cuda_time += rhs.cuda_time
class PcieProfiler(BaseProfiler):
"""Pcie profiler. Records all data transmission between CPU and GPU.
TODO: Merge pcie profiler into communication profiler
"""
def __init__(self, dtype: str = "fp32", depth: int = 1):
super().__init__(profiler_name="Pcie", priority=10)
self.depth = depth
self.data_size = _get_size(dtype)
self.h2d_count = 0
self.h2d_time = 0
self.d2h_count = 0
self.d2h_time = 0
self.ops_record = dict()
self.profiler = None
def reset(self):
self.h2d_count = 0
self.h2d_time = 0
self.d2h_count = 0
self.d2h_time = 0
self.ops_record = dict()
self.profiler = None
def enable(self):
self.profiler = profile(enabled=True,
use_cuda=True,
use_cpu=True,
use_kineto=True,
record_shapes=True,
with_stack=True)
self.profiler.__enter__()
def disable(self):
self.profiler.__exit__(None, None, None)
if self.profiler.enabled:
events = self.profiler.function_events
for event in events:
if event.name == "aten::copy_":
t_shape = event.input_shapes[0]
if len(t_shape) == 0 or event.cuda_time_total == 0 or len(event.stack) == 0:
continue
current_comm_event = PcieEvent(1, self.data_size * _get_numel(t_shape), event.cuda_time_total)
code_location = _reduce_location(event.stack[:self.depth])
if code_location in self.ops_record:
self.ops_record[code_location].add(current_comm_event)
else:
self.ops_record[code_location] = current_comm_event
elif 'Memcpy HtoD' in event.name:
self.h2d_count += 1
self.h2d_time += event.cuda_time_total
elif 'Memcpy DtoH' in event.name:
self.d2h_count += 1
self.d2h_time += event.cuda_time_total
self.profiler = None
def to_tensorboard(self, writer):
writer.add_text(tag="Data Transmission", text_string=self.METHOD_NAME("\n\n"))
def to_file(self, filename: Path):
with open(filename, "w") as f:
f.write(self.METHOD_NAME())
def show(self):
print(self.METHOD_NAME())
def METHOD_NAME(self, sep: str = "\n"):
res = []
def append(s: str = None):
if s is not None:
res.append(s)
res.append(sep)
append("Pcie profiling result:")
append("time of data transmission (CPU -> GPU): {}".format(_format_time(self.h2d_time)))
append("number of transmission (CPU -> GPU): {}".format(self.h2d_count))
append("time of data transmission (GPU -> CPU): {}".format(_format_time(self.d2h_time)))
append("number of transmission (GPU -> CPU): {}".format(self.d2h_count))
append("Possible data transmission events in PCIE:")
separation = '-' * 62
row_format = '{:^10}' + '{:^12}' + '{:^16}' + '{:^12}' * 2
append(separation)
append(row_format.format('Location', 'GPU time', 'Trans volume', 'Bandwidth', 'Num of calls'))
append(separation)
show_list = sorted(self.ops_record.items(), key=lambda kv: -kv[1].cuda_time)
for location, event in show_list:
append(location)
append(
row_format.format('', _format_time(event.cuda_time), _format_memory(event.pcie_vol),
_format_bandwidth(event.pcie_vol, event.cuda_time), event.count))
append()
return ''.join(res) |
298,979 | test organizer can edit answers | import json
import pytest
from django_scopes import scope
from pretalx.submission.models import Answer
@pytest.mark.django_db
@pytest.mark.parametrize("is_public", (True, False))
def test_answers_not_visible_by_default(client, answer, schedule, is_public):
with scope(event=answer.event):
answer.question.is_public = is_public
answer.question.save()
response = client.get(answer.event.api_urls.answers, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert bool(len(content["results"])) is is_public
@pytest.mark.django_db
def test_organizer_can_see_answer(orga_client, answer):
response = orga_client.get(answer.event.api_urls.answers, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 1
assert content["results"][0]["id"] == answer.id
@pytest.mark.django_db
@pytest.mark.parametrize("is_visible", (True, False))
def test_answers_not_visible_by_default_to_reviewers(review_client, answer, is_visible):
with scope(event=answer.event):
answer.question.is_visible_to_reviewers = is_visible
answer.question.save()
response = review_client.get(answer.question.event.api_urls.answers, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert bool(len(content["results"])) is is_visible
@pytest.mark.django_db
def test_organizer_can_create_answer(event, orga_client, question, submission, speaker):
with scope(event=event):
count = Answer.objects.filter(question__event=event).count()
response = orga_client.post(
event.api_urls.answers,
{
"question": question.id,
"submission": submission.code,
"person": speaker.code,
"answer": "Tralalalala",
},
)
assert response.status_code == 201, response.content.decode()
with scope(event=event):
assert Answer.objects.filter(question__event=event).count() == count + 1
answer = Answer.objects.filter(question__event=event).first()
assert answer.answer == "Tralalalala"
@pytest.mark.django_db
def test_duplicate_answer_updates_existing_answer(
event, orga_client, question, submission, speaker, answer
):
with scope(event=event):
count = Answer.objects.filter(question__event=event).count()
response = orga_client.post(
event.api_urls.answers,
{
"question": answer.question_id,
"submission": answer.submission.code,
"person": "",
"answer": "Tralalalala",
},
)
assert response.status_code == 201, response.content.decode()
with scope(event=event):
assert Answer.objects.filter(question__event=event).count() == count
answer = Answer.objects.filter(question__event=event).first()
assert answer.answer == "Tralalalala"
@pytest.mark.django_db
def METHOD_NAME(event, orga_client, answer):
response = orga_client.patch(
event.api_urls.answers + f"{answer.pk}/",
{"answer": "ohno.png"},
content_type="application/json",
)
assert response.status_code == 200, response.content.decode()
with scope(event=event):
answer.refresh_from_db()
assert answer.answer == "ohno.png"
@pytest.mark.django_db
def test_reviewer_cannot_create_answer(
event, review_client, question, submission, speaker
):
with scope(event=event):
count = Answer.objects.filter(question__event=event).count()
response = review_client.post(
event.api_urls.answers,
{
"question": question.id,
"submission": submission.code,
"person": speaker.code,
"answer": "Tralalalala",
},
)
assert response.status_code == 403, response.content.decode()
with scope(event=event):
assert Answer.objects.filter(question__event=event).count() == count
@pytest.mark.django_db
def test_reviewer_cannot_edit_answer(event, review_client, answer):
response = review_client.patch(
event.api_urls.answers + f"{answer.pk}/",
{"answer": "ohno.png"},
content_type="application/json",
)
assert response.status_code == 403, response.content.decode()
with scope(event=event):
answer.refresh_from_db()
assert answer.answer != "ohno.png" |
298,980 | is topology secret | from controllers.common.csi_logger import get_stdout_logger
from controllers.servers.settings import SECRET_SUPPORTED_TOPOLOGIES_PARAMETER
from controllers.servers.utils import is_topology_match
import controllers.common.settings as common_settings
from controllers.servers.host_definer.globals import MANAGED_SECRETS
from controllers.servers.host_definer import settings
from controllers.servers.host_definer.utils import utils
import controllers.servers.host_definer.messages as messages
from controllers.servers.host_definer.k8s.api import K8SApi
from controllers.servers.host_definer.resource_manager.resource_info import ResourceInfoManager
logger = get_stdout_logger()
class SecretManager:
def __init__(self):
self.k8s_api = K8SApi()
self.resource_info_manager = ResourceInfoManager()
def is_node_should_be_managed_on_secret(self, node_name, secret_name, secret_namespace):
logger.info(messages.CHECK_NODE_SHOULD_BE_MANAGED_BY_SECRET.format(node_name, secret_name, secret_namespace))
secret_data = self.get_secret_data(secret_name, secret_namespace)
utils.validate_secret(secret_data)
managed_secret_info, _ = self._get_managed_secret_by_name_and_namespace(secret_name, secret_namespace)
if self.is_node_should_managed_on_secret_info(node_name, managed_secret_info):
logger.info(messages.NODE_SHOULD_BE_MANAGED_ON_SECRET.format(node_name, secret_name, secret_namespace))
return True
logger.info(messages.NODE_SHOULD_NOT_BE_MANAGED_ON_SECRET.format(node_name, secret_name, secret_namespace))
return False
def _get_managed_secret_by_name_and_namespace(self, secret_name, secret_namespace):
secret_info = self.resource_info_manager.generate_secret_info(secret_name, secret_namespace)
managed_secret_info, index = self.get_matching_managed_secret_info(secret_info)
return managed_secret_info, index
def is_node_should_managed_on_secret_info(self, node_name, secret_info):
if secret_info:
nodes_with_system_id = secret_info.nodes_with_system_id
if nodes_with_system_id and nodes_with_system_id.get(node_name):
return True
if nodes_with_system_id:
return False
return True
return False
def is_node_labels_in_system_ids_topologies(self, system_ids_topologies, node_labels):
return self.get_system_id_for_node_labels(system_ids_topologies, node_labels) != ''
def get_system_id_for_node_labels(self, system_ids_topologies, node_labels):
node_topology_labels = self.get_topology_labels(node_labels)
for system_id, system_topologies in system_ids_topologies.items():
if is_topology_match(system_topologies, node_topology_labels):
return system_id
return ''
def METHOD_NAME(self, secret_data):
utils.validate_secret(secret_data)
if utils.get_secret_config(secret_data):
return True
return False
def generate_secret_system_ids_topologies(self, secret_data):
system_ids_topologies = {}
secret_config = utils.get_secret_config(secret_data)
for system_id, system_info in secret_config.items():
try:
system_ids_topologies[system_id] = (system_info.get(SECRET_SUPPORTED_TOPOLOGIES_PARAMETER))
except AttributeError:
system_ids_topologies[system_id] = None
return system_ids_topologies
def is_secret(self, parameter_name):
return parameter_name.endswith(common_settings.SECRET_NAME_SUFFIX) and \
parameter_name.startswith(common_settings.CSI_PARAMETER_PREFIX)
def get_secret_name_and_namespace(self, storage_class_info, parameter_name):
secret_name_suffix = common_settings.SECRET_NAME_SUFFIX
prefix = parameter_name.split(secret_name_suffix)[0]
return (storage_class_info.parameters[parameter_name],
storage_class_info.parameters[prefix + secret_name_suffix.replace(
common_settings.NAME_FIELD, common_settings.NAMESPACE_FIELD)])
def add_unique_secret_info_to_list(self, secret_info, secrets_info_list):
for secret_info_in_list in secrets_info_list:
if secret_info_in_list.name == secret_info.name and \
secret_info_in_list.namespace == secret_info.namespace:
return secrets_info_list
secrets_info_list.append(secret_info)
return secrets_info_list
def is_secret_can_be_changed(self, secret_info, watch_event_type):
return self._is_secret_managed(secret_info) and \
not utils.is_watch_object_type_is_delete(watch_event_type)
def _is_secret_managed(self, secret_info):
_, index = self.get_matching_managed_secret_info(secret_info)
if index != -1:
return True
return False
def get_matching_managed_secret_info(self, secret_info):
for index, managed_secret_info in enumerate(MANAGED_SECRETS):
if managed_secret_info.name == secret_info.name and managed_secret_info.namespace == secret_info.namespace:
return managed_secret_info, index
return secret_info, -1
def get_array_connection_info(self, secret_name, secret_namespace, labels):
secret_data = self.get_secret_data(secret_name, secret_namespace)
if secret_data:
node_topology_labels = self.get_topology_labels(labels)
return utils.get_array_connection_info_from_secret_data(secret_data, node_topology_labels)
return {}
def get_secret_data(self, secret_name, secret_namespace):
logger.info(messages.READ_SECRET.format(secret_name, secret_namespace))
secret_data = self.k8s_api.get_secret_data(secret_name, secret_namespace)
if secret_data:
return utils.change_decode_base64_secret_config(secret_data)
return {}
def get_topology_labels(self, labels):
topology_labels = {}
for label in labels:
if utils.is_topology_label(label):
topology_labels[label] = labels[label]
return topology_labels |
298,981 | get formatted counter array | # (C) Datadog, Inc. 2022-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import ctypes
import pywintypes
import win32pdh
from .constants import PDH_CSTATUS_INVALID_DATA, PDH_CSTATUS_NEW_DATA, PDH_CSTATUS_VALID_DATA, PDH_MORE_DATA
# If the PERF_TYPE_COUNTER value was selected then select one of the
# following to indicate the type of counter
# typedef struct _PDH_FMT_COUNTERVALUE {
# DWORD CStatus;
# union {
# LONG longValue;
# double doubleValue;
# LONGLONG largeValue;
# LPCSTR AnsiStringValue;
# LPCWSTR WideStringValue;
# };
# } PDH_FMT_COUNTERVALUE, * PPDH_FMT_COUNTERVALUE;
class PDH_COUNTERVALUE(ctypes.Union):
_fields_ = [
('longValue', ctypes.c_long),
('doubleValue', ctypes.c_double),
('largeValue', ctypes.c_longlong),
('AnsiStringValue', ctypes.wintypes.LPCSTR),
('WideStringValue', ctypes.wintypes.LPCWSTR),
]
class PDH_FMT_COUNTERVALUE(ctypes.Structure):
_fields_ = [
('CStatus', ctypes.wintypes.DWORD),
('value', PDH_COUNTERVALUE),
]
# typedef struct _PDH_FMT_COUNTERVALUE_ITEM_W {
# LPWSTR szName;
# PDH_FMT_COUNTERVALUE FmtValue;
# } PDH_FMT_COUNTERVALUE_ITEM_W, * PPDH_FMT_COUNTERVALUE_ITEM_W;
class PDH_FMT_COUNTERVALUE_ITEM_W(ctypes.Structure):
_fields_ = [
('szName', ctypes.wintypes.LPCWSTR),
('FmtValue', PDH_FMT_COUNTERVALUE),
]
# This function is a temporary work around of inability of win32pdh.GetFormattedCounterArray()
# retrieve non-unique instances since win32pdh.GetFormattedCounterArray() is using a simple
# dictionary of name to values. See for details
# https://github.com/mhammond/pywin32/blob/main/win32/src/win32pdhmodule.cpp#L677
# This function is 4-10x slower than CPython's native implementation - the reason is
# significantly less efficient facilities to parse compact layout of PDH Item Array.
# On the other hand a contribution to overall performance overhead from this or other PDH
# function calls are very small - around 1% or less.
#
def METHOD_NAME(counter_handle, format):
# Define PdhGetFormattedCounterArrayW prototype
# https://learn.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhgetformattedcounterarrayw
PdhGetFormattedCounterArrayW_fn = ctypes.windll.pdh.PdhGetFormattedCounterArrayW
PdhGetFormattedCounterArrayW_fn.argtypes = [
ctypes.wintypes.HANDLE, # [in] PDH_HCOUNTER hCounter,
ctypes.wintypes.DWORD, # [in] DWORD dwFormat,
ctypes.POINTER(ctypes.wintypes.DWORD), # [in, out] LPDWORD lpdwBufferSize,
ctypes.POINTER(ctypes.wintypes.DWORD), # [out] LPDWORD lpdwItemCount,
ctypes.wintypes.LPVOID, # [out] PPDH_FMT_COUNTERVALUE_ITEM_W ItemBuffer
]
# Get required buffer size
buffer_size = ctypes.wintypes.DWORD(0)
item_count = ctypes.wintypes.DWORD(0)
handle = ctypes.wintypes.HANDLE(counter_handle)
result = PdhGetFormattedCounterArrayW_fn(handle, format, ctypes.byref(buffer_size), ctypes.byref(item_count), None)
if result != PDH_MORE_DATA:
# To simulate real win32/win32pdh error/exception - no need to convert error to its real string for this
# temporary function like it is done in
# https://github.com/mhammond/pywin32/blob/main/win32/src/PyWinTypesmodule.cpp#L278
raise pywintypes.error(result, 'PdhGetFormattedCounterArray', 'Failed to retrieve counters values.')
# Then get items for real
items_buffer = (ctypes.c_byte * buffer_size.value)()
result = PdhGetFormattedCounterArrayW_fn(
handle, format, ctypes.byref(buffer_size), ctypes.byref(item_count), items_buffer
)
if result == PDH_CSTATUS_INVALID_DATA:
raise pywintypes.error(result, 'PdhGetFormattedCounterArray', 'The returned data is not valid.')
if result != PDH_CSTATUS_VALID_DATA:
raise pywintypes.error(result, 'PdhGetFormattedCounterArray', 'Failed to retrieve counters values.')
# Instance values is a dictionary with instance name as a key and value could be a single
# atomic value or list of them for non-unique instances
instance_values = {}
previous_instance_name = ""
previous_instance_value = None
instance_count = 0
# Loop over all collected instances
if result == 0 and item_count.value > 0:
for idx in range(item_count.value):
# Get offset in buffer for item at index idx
offset = idx * ctypes.sizeof(PDH_FMT_COUNTERVALUE_ITEM_W)
# Cast byte buffer to item
item_ptr = ctypes.byref(items_buffer, offset)
item = ctypes.cast(item_ptr, ctypes.POINTER(PDH_FMT_COUNTERVALUE_ITEM_W))
# Typically errored instances are not reported but Microsoft docs implies a stricter validation
instance_status = item.contents.FmtValue.CStatus
if instance_status != PDH_CSTATUS_VALID_DATA and instance_status != PDH_CSTATUS_NEW_DATA:
continue
# Get instance value pair
if format & win32pdh.PDH_FMT_DOUBLE:
# Check this format first since it is hardcoded format (see COUNTER_VALUE_FORMAT)
instance_value = item.contents.FmtValue.value.doubleValue
elif format & win32pdh.PDH_FMT_LONG:
instance_value = item.contents.FmtValue.value.longValue
elif format & win32pdh.PDH_FMT_LARGE:
instance_value = item.contents.FmtValue.value.largeValue
else:
raise pywintypes.error(-1, 'GetFormattedCounterArray', 'Not supported value of format is specified.')
# Get instance name
instance_name = item.contents.szName
# For performance and to support non-unique instance names do not immedeatly store valuethem
# in the instance_values but accumulate them over few iterations. Order of instances is
# sequential
if len(previous_instance_name) == 0:
# Very first iteration
previous_instance_name = instance_name
previous_instance_value = instance_value
instance_count = 1
elif instance_name == previous_instance_name:
# Second or more instances
if instance_count == 1:
previous_instance_value = [previous_instance_value, instance_value]
else:
previous_instance_value.append(instance_value)
instance_count += 1
else:
# A different instance name cameup - flush previous value(s) to the dictionary
instance_values[previous_instance_name] = previous_instance_value
previous_instance_name = instance_name
previous_instance_value = instance_value
instance_count = 1
# Flush last value(s) to the dictionary
instance_values[previous_instance_name] = previous_instance_value
return instance_values |
298,982 | get internet exchange | from __future__ import annotations
from seedemu.core import AutonomousSystem, InternetExchange, AddressAssignmentConstraint, Node, Graphable, Emulator, Layer
from typing import Dict, List
BaseFileTemplates: Dict[str, str] = {}
BaseFileTemplates["interface_setup_script"] = """\
#!/bin/bash
cidr_to_net() {
ipcalc -n "$1" | sed -E -n 's/^Network: +([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/[0-9]{1,2}) +.*/\\1/p'
}
ip -j addr | jq -cr '.[]' | while read -r iface; do {
ifname="`jq -cr '.ifname' <<< "$iface"`"
jq -cr '.addr_info[]' <<< "$iface" | while read -r iaddr; do {
addr="`jq -cr '"\(.local)/\(.prefixlen)"' <<< "$iaddr"`"
net="`cidr_to_net "$addr"`"
[ -z "$net" ] && continue
line="`grep "$net" < ifinfo.txt`"
new_ifname="`cut -d: -f1 <<< "$line"`"
latency="`cut -d: -f3 <<< "$line"`"
bw="`cut -d: -f4 <<< "$line"`"
[ "$bw" = 0 ] && bw=1000000000000
loss="`cut -d: -f5 <<< "$line"`"
[ ! -z "$new_ifname" ] && {
ip li set "$ifname" down
ip li set "$ifname" name "$new_ifname"
ip li set "$new_ifname" up
tc qdisc add dev "$new_ifname" root handle 1:0 tbf rate "${bw}bit" buffer 1000000 limit 1000
tc qdisc add dev "$new_ifname" parent 1:0 handle 10: netem delay "${latency}ms" loss "${loss}%"
}
}; done
}; done
"""
class Base(Layer, Graphable):
"""!
@brief The base layer.
"""
__ases: Dict[int, AutonomousSystem]
__ixes: Dict[int, InternetExchange]
__name_servers: List[str]
def __init__(self):
"""!
@brief Base layer constructor.
"""
super().__init__()
self.__ases = {}
self.__ixes = {}
self.__name_servers = []
def getName(self) -> str:
return "Base"
def configure(self, emulator: Emulator):
self._log('registering nodes...')
for asobj in self.__ases.values():
if len(asobj.getNameServers()) == 0:
asobj.setNameServers(self.__name_servers)
asobj.registerNodes(emulator)
self._log('setting up internet exchanges...')
for ix in self.__ixes.values(): ix.configure(emulator)
self._log('setting up autonomous systems...')
for asobj in self.__ases.values(): asobj.configure(emulator)
def render(self, emulator: Emulator) -> None:
for ((scope, type, name), obj) in emulator.getRegistry().getAll().items():
if not (type == 'rs' or type == 'rnode' or type == 'hnode'):
continue
node: Node = obj
ifinfo = ''
for iface in node.getInterfaces():
net = iface.getNet()
[l, b, d] = iface.getLinkProperties()
ifinfo += '{}:{}:{}:{}:{}\n'.format(net.getName(), net.getPrefix(), l, b, d)
node.setFile('/ifinfo.txt', ifinfo)
node.setFile('/interface_setup', BaseFileTemplates['interface_setup_script'])
node.insertStartCommand(0, '/interface_setup')
node.insertStartCommand(0, 'chmod +x /interface_setup')
def setNameServers(self, servers: List[str]) -> Base:
"""!
@brief set recursive name servers to use on all nodes. Can be override
by calling setNameServers at AS level or node level.
@param servers list of IP addresses of recursive name servers.
@returns self, for chaining API calls.
"""
self.__name_servers = servers
return self
def getNameServers(self) -> List[str]:
"""!
@brief get configured recursive name servers for all nodes.
@returns list of IP addresses of recursive name servers
"""
return self.__name_servers
def createAutonomousSystem(self, asn: int) -> AutonomousSystem:
"""!
@brief Create a new AutonomousSystem.
@param asn ASN of the new AS.
@returns created AS.
@throws AssertionError if asn exists.
"""
assert asn not in self.__ases, "as{} already exist.".format(asn)
self.__ases[asn] = AutonomousSystem(asn)
return self.__ases[asn]
def getAutonomousSystem(self, asn: int) -> AutonomousSystem:
"""!
@brief Create an existing AutonomousSystem.
@param asn ASN of the AS.
@returns AS.
@throws AssertionError if asn does not exist.
"""
assert asn in self.__ases, "as{} does not exist.".format(asn)
return self.__ases[asn]
def setAutonomousSystem(self, asObject: AutonomousSystem):
"""!
@brief Set AS to an existing AS object.
@param asObject AS object.
"""
asn = asObject.getAsn()
self.__ases[asn] = asObject
def createInternetExchange(self, asn: int, prefix: str = "auto", aac: AddressAssignmentConstraint = None) -> InternetExchange:
"""!
@brief Create a new InternetExchange.
@param asn ASN of the new IX.
@param prefix (optional) prefix of the IX peering LAN.
@param aac (optional) Address assigment constraint.
@returns created IX.
@throws AssertionError if IX exists.
"""
assert asn not in self.__ixes, "ix{} already exist.".format(asn)
self.__ixes[asn] = InternetExchange(asn, prefix, aac)
return self.__ixes[asn]
def METHOD_NAME(self, asn: int) -> InternetExchange:
"""!
@brief Get an existing InternetExchange.
@param asn ASN of the IX.
@returns InternetExchange.
@throws AssertionError if ix does not exist.
"""
assert asn in self.__ixes, "ix{} does not exist.".format(asn)
return self.__ixes[asn]
def setInternetExchange(self, ixObject: InternetExchange):
"""!
@brief Set IX to an existing IX object.
@param ixObject IX object.
"""
asn = ixObject.getId()
self.__ixes[asn] = ixObject
def getAsns(self) -> List[int]:
"""!
@brief Get list of ASNs.
@returns List of ASNs.
"""
return list(self.__ases.keys())
def getInternetExchangeIds(self) -> List[int]:
"""!
@brief Get list of IX IDs.
@returns List of IX IDs.
"""
return list(self.__ixes.keys())
def getNodesByName(self, name:str) -> List[Node]:
"""!
@brief Get list of Nodes by name.
@returns List of Nodes whose name is start with input_name.
"""
nodes = []
for _as in self.__ases.values():
for host_name in _as.getHosts():
if host_name.startswith(name):
nodes.append(_as.getHost(host_name))
return nodes
def _doCreateGraphs(self, emulator: Emulator):
graph = self._addGraph('Layer 2 Connections', False)
for asobj in self.__ases.values():
asobj.createGraphs(emulator)
asgraph = asobj.getGraph('AS{}: Layer 2 Connections'.format(asobj.getAsn()))
graph.copy(asgraph)
def print(self, indent: int) -> str:
out = ' ' * indent
out += 'BaseLayer:\n'
indent += 4
out += ' ' * indent
out += 'AutonomousSystems:\n'
for _as in self.__ases.values():
out += _as.print(indent + 4)
out += ' ' * indent
out += 'InternetExchanges:\n'
for _as in self.__ixes.values():
out += _as.print(indent + 4)
return ou |
298,983 | attach to tree | import json, os, requests
from copy import deepcopy
def get_node_struct(seq):
return {'branch_attrs':{'mutations':{}},
'name':seq['seqName']+"_clades",
'node_attrs':{'clade_membership':{'value':seq['clade']},
'new_node': {'value': 'Yes'},
'QCStatus':{'value':seq['QCStatus']}},
'mutations':{}
}
def get_root_seq():
with open('src/assets/data/defaultRootSequence.txt', 'r') as fh:
seq = ''.join(map(lambda x:x.strip(), fh.readlines()))
return seq
def mutations_on_tree(node, mutations):
tmp_muts = deepcopy(mutations)
if 'branch_attrs' in node and 'nuc' in node['branch_attrs']['mutations']:
for mut in node['branch_attrs']['mutations']['nuc']:
anc, pos, der = mut[0], int(mut[1:-1])-1, mut[-1]
if pos in tmp_muts and tmp_muts[pos]!=anc:
print("inconsistent")
tmp_muts[pos] = der
node["mutations"] = tmp_muts
if "children" in node:
for c in node['children']:
mutations_on_tree(c, tmp_muts)
def calculate_distance(node, seq):
shared_differences = 0
shared_sites = 0
for qmut in seq['mutations']:
if qmut['pos'] in node['mutations']:
if qmut['queryNuc'] == node['mutations'][qmut['pos']]:
shared_differences += 1
else:
shared_sites += 1
return len(node["mutations"]) + len(seq['mutations']) - 2*shared_differences - shared_sites
def get_differences(node, seq):
shared_differences = 0
shared_sites = 0
mutations = []
for qmut in seq['mutations']:
if qmut['pos'] in node['mutations']:
if qmut['queryNuc'] != node['mutations'][qmut['pos']]:
mutations.append(node['mutations'][qmut['pos']]+str(qmut['pos']+1)+qmut['queryNuc'])
else:
mutations.append(root_seq[qmut['pos']]+str(qmut['pos']+1)+qmut['queryNuc'])
return mutations
def closest_match(node, seq):
best = calculate_distance(node, seq)
best_node = node
if "children" in node:
for c in node['children']:
tmp_best, tmp_best_node = closest_match(c, seq)
if tmp_best<best:
best = tmp_best
best_node = tmp_best_node
return best, best_node
def METHOD_NAME(base_node, seq):
if 'children' not in base_node:
base_node['children'] = []
mutations = get_differences(base_node, seq)
new_node = get_node_struct(seq)
new_node['branch_attrs']['mutations']['nuc'] = mutations
new_node['node_attrs']['div'] = base_node['node_attrs']['div'] + len(mutations)
new_node['mutations'] = deepcopy(base_node['mutations'])
for mut in mutations:
anc, pos, der = mut[0], int(mut[1:-1])-1, mut[-1]
new_node['mutations'][pos] = der
base_node['children'].append(new_node)
def remove_mutations(node):
if 'mutations' in node:
node.pop('mutations')
if 'children' in node:
for c in node['children']:
remove_mutations(c)
if __name__ == '__main__':
if os.path.isfile('tree.json'):
with open('tree.json', 'r') as fh:
T = json.load(fh)
else:
r = requests.get('https://nextstrain-neherlab.s3.amazonaws.com/ncov_small.json')
T = r.json()
with open('tree.json', 'w') as fh:
json.dump(T, fh)
root_seq = get_root_seq()
with open('nextclades.json', 'r') as fh:
data = json.load(fh)
# start that the root of the tree
focal_node = T['tree']
mutations_on_tree(focal_node, {})
for seq in data[:]:
if 'errors' in seq and seq['errors']:
continue
match, match_node = closest_match(focal_node, seq)
print(seq['seqName'], match_node['name'], match)
print(get_differences(match_node, seq))
METHOD_NAME(match_node, seq)
remove_mutations(focal_node)
T['meta']['colorings'].append({'key': 'QCStatus',
'title': 'QC Status',
'type': 'categorical'},
)
T['meta']['colorings'].append({'key': 'new_node',
'title': 'New Node',
'type': 'categorical'},
)
T['meta']['display_defaults'] = {'branch_label': 'clade',
'color_by': 'new_node',
'distance_measure': 'div',
'geo_resolution': 'country',
'map_triplicate': True,
'transmission_lines': False}
with open('tree_clades.json', 'w') as fh:
json.dump(T, fh) |
298,984 | message | from pubnub.endpoints.file_operations.file_based_endpoint import FileOperationEndpoint
from pubnub.crypto import PubNubFileCrypto
from pubnub.enums import HttpMethod, PNOperationType
from pubnub.models.consumer.file import PNSendFileResult
from pubnub.endpoints.file_operations.publish_file_message import PublishFileMessage
from pubnub.endpoints.file_operations.fetch_upload_details import FetchFileUploadS3Data
from pubnub.request_handlers.requests_handler import RequestsRequestHandler
from pubnub.endpoints.mixins import TimeTokenOverrideMixin
class SendFileNative(FileOperationEndpoint, TimeTokenOverrideMixin):
def __init__(self, pubnub):
super(SendFileNative, self).__init__(pubnub)
self._file_name = None
self._pubnub = pubnub
self._file_upload_envelope = None
self._message = None
self._should_store = None
self._ttl = 0
self._meta = None
self._cipher_key = None
self._file_object = None
self._replicate = None
self._ptto = None
def file_object(self, fd):
self._file_object = fd
return self
def build_params_callback(self):
return lambda a: {}
def build_path(self):
return self._file_upload_envelope.result.data["url"]
def encrypt_payload(self):
if self._cipher_key or self._pubnub.config.cipher_key:
try:
payload = self._file_object.read()
except AttributeError:
payload = self._file_object
return PubNubFileCrypto(self._pubnub.config).encrypt(
self._cipher_key or self._pubnub.config.cipher_key,
payload
)
else:
return self._file_object
def build_file_upload_request(self):
file = self.encrypt_payload()
multipart_body = {}
for form_field in self._file_upload_envelope.result.data["form_fields"]:
multipart_body[form_field["key"]] = (None, form_field["value"])
multipart_body["file"] = (self._file_name, file, None)
return multipart_body
def http_method(self):
return HttpMethod.POST
def use_compression(self, compress=True):
self._use_compression = bool(compress)
return self
def is_compressable(self):
return True
def custom_params(self):
return {}
def validate_params(self):
self.validate_subscribe_key()
self.validate_channel()
self.validate_file_object()
self.validate_file_name()
def use_base_path(self):
return False
def non_json_response(self):
return True
def is_auth_required(self):
return False
def should_store(self, should_store):
self._should_store = bool(should_store)
return self
def ttl(self, ttl):
self._ttl = ttl
return self
def meta(self, meta):
self._meta = meta
return self
def METHOD_NAME(self, METHOD_NAME):
self._message = METHOD_NAME
return self
def file_name(self, file_name):
self._file_name = file_name
return self
def cipher_key(self, cipher_key):
self._cipher_key = cipher_key
return self
def create_response(self, envelope, data=None):
return PNSendFileResult(envelope, self._file_upload_envelope)
def operation_type(self):
return PNOperationType.PNSendFileAction
def request_headers(self):
return {}
def name(self):
return "Send file to S3"
def sync(self):
self._file_upload_envelope = FetchFileUploadS3Data(self._pubnub).\
channel(self._channel).\
file_name(self._file_name).sync()
response_envelope = super(SendFileNative, self).sync()
publish_file_response = PublishFileMessage(self._pubnub).\
channel(self._channel).\
meta(self._meta).\
METHOD_NAME(self._message).\
file_id(response_envelope.result.file_id).\
file_name(response_envelope.result.name).\
should_store(self._should_store).\
ttl(self._ttl).\
replicate(self._replicate).\
ptto(self._ptto).\
cipher_key(self._cipher_key).sync()
response_envelope.result.timestamp = publish_file_response.result.timestamp
return response_envelope
def pn_async(self, callback):
return RequestsRequestHandler(self._pubnub).async_file_based_operation(self.sync, callback, "File Download") |
298,985 | get mask | import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum, matmul, nn
# normalization
# they use layernorm without bias, something that pytorch does not offer
class LayerNorm(nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# parallel with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelResidual(nn.Module):
def __init__(self, *fns):
super().__init__()
self.fns = nn.ModuleList(fns)
def forward(self, x):
return x + sum([fn(x) for fn in self.fns])
# rotary positional embedding
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000**(torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len, *, device):
seq = torch.arange(max_seq_len, device=device)
#freqs = einsum("i , j -> i j", seq.type_as(self.inv_freq), self.inv_freq)
#freqs = torch.outer(seq.type_as(self.inv_freq), self.inv_freq)
i, j = len(seq.type_as(self.inv_freq)), len(self.inv_freq)
freqs = matmul(seq.type_as(self.inv_freq).reshape(i, 1), self.inv_freq.reshape(1, j))
return torch.cat((freqs, freqs), dim=-1)
def rotate_half(x):
x = rearrange(x, "... (j d) -> ... j d", j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# feedforward
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
def FeedForward(dim, mult=4):
inner_dim = int(dim * mult)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, inner_dim * 2, bias=False),
SwiGLU(),
nn.Linear(inner_dim, dim, bias=False),
)
# attention
class Attention(nn.Module):
def __init__(self, dim, dim_head=64, heads=8):
super().__init__()
inner_dim = dim_head * heads
self.norm = LayerNorm(dim)
self.heads = heads
self.scale = dim_head**-0.5
self.rotary_emb = RotaryEmbedding(dim_head)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
# for caching causal mask and rotary embeddings
self.register_buffer("mask", None, persistent=False)
self.register_buffer("pos_emb", None, persistent=False)
def METHOD_NAME(self, n, device):
if self.mask is not None and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def get_rotary_embedding(self, n, device):
if self.pos_emb is not None and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n]
pos_emb = self.rotary_emb(n, device=device)
self.register_buffer("position", pos_emb, persistent=False)
return pos_emb
def forward(self, x):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# queries, keys, values
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim=-1))
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h=h)
# rotary embeddings
positions = self.get_rotary_embedding(n, device)
q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k))
# scale
q = q * self.scale
b, h, i, d, j = q.size(0), q.size(1), q.size(2), q.size(3), k.size(1)
# similarity
#sim = einsum("b h i d, b j d -> b h i j", q, k)
sim = matmul(q.reshape(b, h * i, d), k.transpose(1, 2))
sim = sim.reshape(b, h, i, j)
# causal mask
causal_mask = self.METHOD_NAME(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
b_, h_, i_, j_, d_ = attn.size(0), attn.size(1), attn.size(2), attn.size(3), v.size(2)
# aggregate values
#out = einsum("b h i j, b j d -> b h i d", attn, v)
out = matmul(attn.reshape(b_, h_ * i_, j_), v)
out = out.reshape(b_, h_, i_, d_)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
# transformer
def PaLM(*, dim, num_tokens, depth, dim_head=64, heads=8, ff_mult=4):
net = nn.Sequential(
nn.Embedding(num_tokens, dim), *[
ParallelResidual(
Attention(dim=dim, dim_head=dim_head, heads=heads),
FeedForward(dim=dim, mult=ff_mult),
) for _ in range(depth)
], LayerNorm(dim), nn.Linear(dim, num_tokens, bias=False))
# they used embedding weight tied projection out to logits, not common, but works
net[-1].weight = net[0].weight
nn.init.normal_(net[0].weight, std=0.02)
return net |
298,986 | create bundle info | #!/usr/local/autopkg/python
#
# Copyright 2010 Per Olofsson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""See docstring for PkgInfoCreator class"""
import math
import os
import plistlib
from xml.etree import ElementTree
from autopkglib import Processor, ProcessorError
__all__ = ["PkgInfoCreator"]
class PkgInfoCreator(Processor):
"""Creates an PackageInfo file for a package."""
description = __doc__
input_variables = {
"template_path": {"required": True, "description": "An Info.plist template."},
"version": {"required": True, "description": "Version of the package."},
"pkgroot": {"required": True, "description": "Virtual root of the package."},
"infofile": {
"required": True,
"description": "Path to the info file to create.",
},
"pkgtype": {"required": True, "description": "'flat' or 'bundle'."},
}
output_variables = {}
def find_template(self):
"""Searches for the template, looking in the recipe directory
and parent recipe directories if needed."""
template_path = self.env["template_path"]
if os.path.exists(template_path):
return template_path
elif not template_path.startswith("/"):
recipe_dir = self.env.get("RECIPE_DIR")
search_dirs = [recipe_dir]
if self.env.get("PARENT_RECIPES"):
# also look in the directories containing the parent recipes
parent_recipe_dirs = list(
{os.path.dirname(item) for item in self.env["PARENT_RECIPES"]}
)
search_dirs.extend(parent_recipe_dirs)
for directory in search_dirs:
test_item = os.path.join(directory, template_path)
if os.path.exists(test_item):
return test_item
raise ProcessorError(f"Can't find {template_path}")
def main(self):
if self.env["pkgtype"] not in ("bundle", "flat"):
raise ProcessorError(f"Unknown pkgtype {self.env['pkgtype']}")
template = self.load_template(self.find_template(), self.env["pkgtype"])
if self.env["pkgtype"] == "bundle":
raise ProcessorError("Bundle package creation no longer supported!")
else:
self.create_flat_info(template)
def convert_bundle_info_to_flat(self, info):
"""Converts pkg info from bundle format to flat format"""
# Since we now only support flat packages, we might be able to
# get rid of this in the near future, but all existing recipes
# would need to convert to only flat-style Resources/data
conversion_map = {
"None": "none",
"RecommendRestart": "restart",
"RequireLogout": "logout",
"RequireRestart": "restart",
"RequireShutdown": "shutdown",
}
pkg_info = ElementTree.Element("pkg-info")
pkg_info.set("format-version", "2")
for bundle, flat in (
("IFPkgFlagDefaultLocation", "install-location"),
("CFBundleShortVersionString", "version"),
("CFBundleIdentifier", "identifier"),
):
if bundle in info:
pkg_info.set(flat, info[bundle])
if "IFPkgFlagAuthorizationAction" in info:
if info["IFPkgFlagAuthorizationAction"] == "RootAuthorization":
pkg_info.set("auth", "root")
else:
pkg_info.set("auth", "none")
if "IFPkgFlagRestartAction" in info:
pkg_info.set(
"postinstall-action", conversion_map[info["IFPkgFlagRestartAction"]]
)
payload = ElementTree.SubElement(pkg_info, "payload")
if "IFPkgFlagInstalledSize" in info:
payload.set("installKBytes", str(info["IFPkgFlagInstalledSize"]))
return ElementTree.ElementTree(pkg_info)
def convert_flat_info_to_bundle(self, info):
"""Converts pkg info from flat format to bundle format"""
# since we now only support flat packages, just raise an exception
raise ProcessorError("Bundle package creation no longer supported!")
def load_template(self, template_path, template_type):
"""Load a package info template in Info.plist or PackageInfo format."""
if template_path.endswith(".plist"):
# Try to load Info.plist in bundle format.
try:
with open(self.env["template_path"], "rb") as f:
info = plistlib.load(f)
except Exception:
raise ProcessorError(
f"Malformed Info.plist template {self.env['template_path']}"
)
if template_type == "bundle":
return info
else:
return self.convert_bundle_info_to_flat(info)
else:
# Try to load PackageInfo in flat format.
try:
info = ElementTree.parse(template_path)
except Exception:
raise ProcessorError(
f"Malformed PackageInfo template {self.env['template_path']}"
)
if template_type == "flat":
return info
else:
return self.convert_flat_info_to_bundle(info)
def get_pkgroot_size(self, pkgroot):
"""Return the size of pkgroot (in kilobytes) and the number of files."""
size = 0
nfiles = 0
for (dirpath, _, filenames) in os.walk(pkgroot):
# Count the current directory and the number of files in it.
nfiles += 1 + len(filenames)
for filename in filenames:
path = os.path.join(dirpath, filename)
# Add up file size rounded up to the nearest 4 kB, which
# appears to match what du -sk returns, and what PackageMaker
# uses.
size += int(math.ceil(float(os.lstat(path).st_size) / 4096.0))
return (size, nfiles)
def create_flat_info(self, template):
"""Create PackageInfo file for flat package"""
info = template
pkg_info = info.getroot()
if pkg_info.tag != "pkg-info":
raise ProcessorError("PackageInfo root should be pkg-info")
pkg_info.set("version", self.env["version"])
payload = pkg_info.find("payload")
if payload is None:
payload = ElementTree.SubElement(pkg_info, "payload")
size, nfiles = self.get_pkgroot_size(self.env["pkgroot"])
payload.set("installKBytes", str(size))
payload.set("numberOfFiles", str(nfiles))
info.write(self.env["infofile"])
def METHOD_NAME(self, template):
"""Create Info.plist data for bundle-style pkg"""
# We don't support the creation of bundle-style pkgs
# any longer, so raise an exception
raise ProcessorError("Bundle package creation no longer supported!")
if __name__ == "__main__":
PROCESSOR = PkgInfoCreator()
PROCESSOR.execute_shell() |
298,987 | ratings | # mypy: disallow_untyped_defs=False
import inspect
from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Union
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
from recipe_scrapers.settings import settings
from ._grouping_utils import IngredientGroup
from ._schemaorg import SchemaOrg
# some sites close their content for 'bots', so user-agent must be supplied
HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:86.0) Gecko/20100101 Firefox/86.0"
}
class AbstractScraper:
page_data: Union[str, bytes]
def __init__(
self,
url: Union[str, None],
proxies: Optional[
Dict[str, str]
] = None, # allows us to specify optional proxy server
timeout: Optional[
Union[float, Tuple[float, float], Tuple[float, None]]
] = None, # allows us to specify optional timeout for request
wild_mode: Optional[bool] = False,
html: Union[str, bytes, None] = None,
):
if html:
self.page_data = html
self.url = url
else:
assert url is not None, "url required for fetching recipe data"
resp = requests.get(
url,
headers=HEADERS,
proxies=proxies,
timeout=timeout,
)
self.page_data = resp.content
self.url = resp.url
self.wild_mode = wild_mode
self.soup = BeautifulSoup(self.page_data, "html.parser")
self.schema = SchemaOrg(self.page_data)
# attach the plugins as instructed in settings.PLUGINS
if not hasattr(self.__class__, "plugins_initialized"):
for name, func in inspect.getmembers(self, inspect.ismethod):
current_method = getattr(self.__class__, name)
for plugin in reversed(settings.PLUGINS):
if plugin.should_run(self.host(), name):
current_method = plugin.run(current_method)
setattr(self.__class__, name, current_method)
setattr(self.__class__, "plugins_initialized", True)
@classmethod
def host(cls) -> str:
"""get the host of the url, so we can use the correct scraper"""
raise NotImplementedError("This should be implemented.")
def canonical_url(self):
canonical_link = self.soup.find("link", {"rel": "canonical", "href": True})
if canonical_link:
return urljoin(self.url, canonical_link["href"])
return self.url
def title(self):
raise NotImplementedError("This should be implemented.")
def category(self):
raise NotImplementedError("This should be implemented.")
def total_time(self):
"""total time it takes to preparate and cook the recipe in minutes"""
raise NotImplementedError("This should be implemented.")
def cook_time(self):
"""cook time of the recipe in minutes"""
raise NotImplementedError("This should be implemented.")
def prep_time(self):
"""preparation time of the recipe in minutes"""
raise NotImplementedError("This should be implemented.")
def yields(self):
"""The number of servings or items in the recipe"""
raise NotImplementedError("This should be implemented.")
def image(self):
raise NotImplementedError("This should be implemented.")
def nutrients(self):
raise NotImplementedError("This should be implemented.")
def language(self):
"""
Human language the recipe is written in.
May be overridden by individual scrapers.
"""
candidate_languages = OrderedDict()
html = self.soup.find("html", {"lang": True})
candidate_languages[html.get("lang")] = True
# Deprecated: check for a meta http-equiv header
# See: https://www.w3.org/International/questions/qa-http-and-lang
meta_language = self.soup.find(
"meta",
{
"http-equiv": lambda x: x and x.lower() == "content-language",
"content": True,
},
)
if meta_language:
language = meta_language.get("content").split(",", 1)[0]
if language:
candidate_languages[language] = True
# If other langs exist, remove 'en' commonly generated by HTML editors
if len(candidate_languages) > 1:
candidate_languages.pop("en", None)
# Return the first candidate language
return candidate_languages.popitem(last=False)[0]
def ingredients(self):
raise NotImplementedError("This should be implemented.")
def ingredient_groups(self) -> List[IngredientGroup]:
return [IngredientGroup(purpose=None, ingredients=self.ingredients())]
def instructions(self) -> str:
"""instructions to prepare the recipe"""
raise NotImplementedError("This should be implemented.")
def instructions_list(self) -> List[str]:
"""instructions to prepare the recipe"""
return [
instruction
for instruction in self.instructions().split("\n")
if instruction
]
def METHOD_NAME(self):
raise NotImplementedError("This should be implemented.")
def author(self):
raise NotImplementedError("This should be implemented.")
def cuisine(self):
raise NotImplementedError("This should be implemented.")
def description(self):
raise NotImplementedError("This should be implemented.")
def reviews(self):
raise NotImplementedError("This should be implemented.")
def links(self):
invalid_href = {"#", ""}
links_html = self.soup.findAll("a", href=True)
return [link.attrs for link in links_html if link["href"] not in invalid_href]
def site_name(self):
meta = self.soup.find("meta", property="og:site_name")
return meta.get("content") if meta else None
def to_json(self):
json_dict = {}
public_method_names = [
method
for method in dir(self)
if callable(getattr(self, method))
if not method.startswith("_") and method not in ["soup", "links", "to_json"]
]
for method in public_method_names:
try:
if method == "ingredient_groups":
json_dict[method] = [i.__dict__ for i in getattr(self, method)()]
else:
json_dict[method] = getattr(self, method)()
except Exception:
pass
return json_dict |
298,988 | generate test row | from abc import abstractmethod
from typing import Any, Dict, List, Optional, Sequence, Tuple
import pandas as pd
import torch
from torch import Tensor
from torch.cuda.amp import autocast
from torch.nn.modules.loss import _Loss
from torch.utils.data import DataLoader, Sampler
from clinicadl.utils.caps_dataset.data import CapsDataset
from clinicadl.utils.metric_module import MetricModule
from clinicadl.utils.network.network import Network
# TODO: add function to check that the output size of the network corresponds to what is expected to
# perform the task
class TaskManager:
def __init__(self, mode: str, n_classes: int = None):
self.mode = mode
self.metrics_module = MetricModule(self.evaluation_metrics, n_classes=n_classes)
@property
@abstractmethod
def columns(self):
"""
List of the columns' names in the TSV file containing the predictions.
"""
pass
@property
@abstractmethod
def evaluation_metrics(self):
"""
Evaluation metrics which can be used to evaluate the task.
"""
pass
@property
@abstractmethod
def save_outputs(self):
"""
Boolean value indicating if the output values should be saved as tensor for this task.
"""
pass
@abstractmethod
def METHOD_NAME(
self, idx: int, data: Dict[str, Any], outputs: Tensor
) -> List[List[Any]]:
"""
Computes an individual row of the prediction TSV file.
Args:
idx: index of the individual input and output in the batch.
data: input batch generated by a DataLoader on a CapsDataset.
outputs: output batch generated by a forward pass in the model.
Returns:
list of items to be contained in a row of the prediction TSV file.
"""
pass
@abstractmethod
def compute_metrics(self, results_df: pd.DataFrame) -> Dict[str, float]:
"""
Compute the metrics based on the result of generate_test_row
Args:
results_df: results generated based on _results_test_row
Returns:
dictionary of metrics
"""
pass
@abstractmethod
def ensemble_prediction(
self,
performance_df: pd.DataFrame,
validation_df: pd.DataFrame,
selection_threshold: float = None,
use_labels: bool = True,
method: str = "soft",
) -> Tuple[pd.DataFrame, Dict[str, float]]:
"""
Compute the results at the image-level by assembling the results on parts of the image.
Args:
performance_df: results that need to be assembled.
validation_df: results on the validation set used to compute the performance
of each separate part of the image.
selection_threshold: with soft-voting method, allows to exclude some parts of the image
if their associated performance is too low.
use_labels: If True, metrics are computed and the label column values must be different
from None.
method: method to assemble the results. Current implementation proposes soft or hard-voting.
Returns:
the results and metrics on the image level
"""
pass
@staticmethod
@abstractmethod
def generate_label_code(df: pd.DataFrame, label: str) -> Optional[Dict[str, int]]:
"""
Generates a label code that links the output node number to label value.
Args:
df: meta-data of the training set.
label: name of the column containing the labels.
Returns:
label_code
"""
pass
@staticmethod
@abstractmethod
def output_size(
input_size: Sequence[int], df: pd.DataFrame, label: str
) -> Sequence[int]:
"""
Computes the output_size needed to perform the task.
Args:
input_size: size of the input.
df: meta-data of the training set.
label: name of the column containing the labels.
Returns:
output_size
"""
pass
@staticmethod
@abstractmethod
def generate_sampler(
dataset: CapsDataset, sampler_option: str = "random", n_bins: int = 5
) -> Sampler:
"""
Returns sampler according to the wanted options.
Args:
dataset: the dataset to sample from.
sampler_option: choice of sampler.
n_bins: number of bins to used for a continuous variable (regression task).
Returns:
callable given to the training data loader.
"""
pass
@staticmethod
@abstractmethod
def get_criterion(criterion: str = None) -> _Loss:
"""
Gives the optimization criterion.
Must check that it is compatible with the task.
Args:
criterion: name of the loss as written in Pytorch.
Raises:
ClinicaDLArgumentError: if the criterion is not compatible with the task.
"""
pass
@staticmethod
@abstractmethod
def get_default_network() -> Network:
"""Returns the default network to use when no architecture is specified."""
pass
def test(
self,
model: Network,
dataloader: DataLoader,
criterion: _Loss,
use_labels: bool = True,
amp: bool = False,
) -> Tuple[pd.DataFrame, Dict[str, float]]:
"""
Computes the predictions and evaluation metrics.
Args:
model: the model trained.
dataloader: wrapper of a CapsDataset.
criterion: function to calculate the loss.
use_labels: If True the true_label will be written in output DataFrame
and metrics dict will be created.
amp: If True, enables Pytorch's automatic mixed precision.
Returns:
the results and metrics on the image level.
"""
model.eval()
dataloader.dataset.eval()
results_df = pd.DataFrame(columns=self.columns)
total_loss = {}
with torch.no_grad():
for i, data in enumerate(dataloader):
# initialize the loss list to save the loss components
with autocast(enabled=amp):
outputs, loss_dict = model.compute_outputs_and_loss(
data, criterion, use_labels=use_labels
)
if i == 0:
for loss_component in loss_dict.keys():
total_loss[loss_component] = 0
for loss_component in total_loss.keys():
total_loss[loss_component] += (
loss_dict[loss_component].float().item()
)
# Generate detailed DataFrame
for idx in range(len(data["participant_id"])):
row = self.METHOD_NAME(idx, data, outputs.float())
row_df = pd.DataFrame(row, columns=self.columns)
results_df = pd.concat([results_df, row_df])
del outputs, loss_dict
results_df.reset_index(inplace=True, drop=True)
if not use_labels:
metrics_dict = None
else:
metrics_dict = self.compute_metrics(results_df)
for loss_component in total_loss.keys():
metrics_dict[loss_component] = total_loss[loss_component]
torch.cuda.empty_cache()
return results_df, metrics_dict |
298,989 | test 06 self cannot upgrade group |
from django.test import TestCase
from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import Group
from hs_access_control.models import PrivilegeCodes
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set
class T05ShareResource(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(T05ShareResource, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.admin = hydroshare.create_account(
'admin@gmail.com',
username='admin',
first_name='administrator',
last_name='couch',
superuser=True,
groups=[]
)
self.cat = hydroshare.create_account(
'cat@gmail.com',
username='cat',
first_name='not a dog',
last_name='last_name_cat',
superuser=False,
groups=[]
)
self.dog = hydroshare.create_account(
'dog@gmail.com',
username='dog',
first_name='a little arfer',
last_name='last_name_dog',
superuser=False,
groups=[]
)
# use this as non owner
self.mouse = hydroshare.create_account(
'mouse@gmail.com',
username='mouse',
first_name='first_name_mouse',
last_name='last_name_mouse',
superuser=False,
groups=[]
)
self.holes = hydroshare.create_resource(
resource_type='CompositeResource',
owner=self.cat,
title='all about dog holes',
metadata=[],
)
self.meowers = self.cat.uaccess.create_group(
title='some random meowers', description="some random group")
def test_01_self_unshare_resource(self):
"""A user can unshare a resource with self"""
holes = self.holes
cat = self.cat
dog = self.dog
cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.CHANGE)
self.assertTrue(dog in holes.raccess.edit_users)
self.assertTrue(dog in holes.raccess.view_users)
self.assertTrue(
is_equal_to_as_set(
[dog],
dog.uaccess.get_resource_unshare_users(holes)))
dog.uaccess.unshare_resource_with_user(holes, dog)
self.assertFalse(dog in holes.raccess.edit_users)
self.assertFalse(dog in holes.raccess.view_users)
self.assertTrue(
is_equal_to_as_set(
[], dog.uaccess.get_resource_unshare_users(holes)))
def test_02_self_downgrade_resource(self):
"""can downgrade privilege for a resource to which one has access"""
holes = self.holes
cat = self.cat
dog = self.dog
cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.CHANGE)
self.assertTrue(dog in holes.raccess.edit_users)
self.assertTrue(dog in holes.raccess.view_users)
self.assertTrue(
is_equal_to_as_set(
[dog],
dog.uaccess.get_resource_unshare_users(holes)))
dog.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)
self.assertFalse(dog in holes.raccess.edit_users)
self.assertTrue(dog in holes.raccess.view_users)
self.assertTrue(
is_equal_to_as_set(
[dog],
dog.uaccess.get_resource_unshare_users(holes)))
def test_03_self_cannot_upgrade_resource(self):
"""cannot upgrade privilege for a resource to which one has access"""
holes = self.holes
cat = self.cat
dog = self.dog
cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)
self.assertFalse(dog in holes.raccess.edit_users)
self.assertTrue(dog in holes.raccess.view_users)
self.assertTrue(
is_equal_to_as_set(
[dog],
dog.uaccess.get_resource_unshare_users(holes)))
with self.assertRaises(PermissionDenied):
dog.uaccess.share_resource_with_user(
holes, dog, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
dog.uaccess.share_resource_with_user(
holes, dog, PrivilegeCodes.CHANGE)
self.assertTrue(dog in holes.raccess.view_users)
self.assertTrue(
is_equal_to_as_set(
[dog],
dog.uaccess.get_resource_unshare_users(holes)))
def test_04_self_unshare_group(self):
"""A user can unshare a group with self"""
meowers = self.meowers
cat = self.cat
dog = self.dog
cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)
self.assertTrue(dog in meowers.gaccess.edit_users)
self.assertTrue(dog in meowers.gaccess.members)
self.assertTrue(
is_equal_to_as_set(
[dog],
dog.uaccess.get_group_unshare_users(meowers)))
dog.uaccess.unshare_group_with_user(meowers, dog)
self.assertFalse(dog in meowers.gaccess.edit_users)
self.assertFalse(dog in meowers.gaccess.members)
self.assertTrue(
is_equal_to_as_set(
[], dog.uaccess.get_group_unshare_users(meowers)))
def test_05_self_can_downgrade_group(self):
"""can downgrade privilege for a group of which one is a member """
meowers = self.meowers
cat = self.cat
dog = self.dog
cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)
self.assertTrue(dog in meowers.gaccess.edit_users)
self.assertTrue(dog in meowers.gaccess.members)
self.assertTrue(
is_equal_to_as_set(
[dog],
dog.uaccess.get_group_unshare_users(meowers)))
dog.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)
self.assertFalse(dog in meowers.gaccess.edit_users)
self.assertTrue(dog in meowers.gaccess.members)
self.assertTrue(
is_equal_to_as_set(
[dog],
dog.uaccess.get_group_unshare_users(meowers)))
def METHOD_NAME(self):
"""cannot upgrade privilege for a group of which one is a member """
meowers = self.meowers
cat = self.cat
dog = self.dog
cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)
self.assertFalse(dog in meowers.gaccess.edit_users)
self.assertTrue(dog in meowers.gaccess.members)
self.assertTrue(
is_equal_to_as_set(
[dog],
dog.uaccess.get_group_unshare_users(meowers)))
with self.assertRaises(PermissionDenied):
dog.uaccess.share_group_with_user(
meowers, dog, PrivilegeCodes.VIEW)
with self.assertRaises(PermissionDenied):
dog.uaccess.share_group_with_user(
meowers, dog, PrivilegeCodes.CHANGE)
self.assertTrue(dog in meowers.gaccess.members)
self.assertTrue(
is_equal_to_as_set(
[dog],
dog.uaccess.get_group_unshare_users(meowers))) |
298,990 | assert playbook output | """module with utils for e2e tests"""
import asyncio
import json
from dataclasses import dataclass
from pathlib import Path
from subprocess import CompletedProcess
from typing import Iterable, List, Optional, Union
import websockets.server as ws_server
BASE_DATA_PATH = Path(f"{__file__}").parent / Path("files")
DEFAULT_SOURCES = Path(f"{__file__}").parent / Path("../sources")
DEFAULT_INVENTORY = BASE_DATA_PATH / "inventories/default_inventory.yml"
@dataclass
class Command:
"""
Represents the command and their arguments and
provides methods to render it for cmd runners
"""
rulebook: Path
program_name: str = "ansible-rulebook"
cwd: Path = BASE_DATA_PATH
inventory: Path = DEFAULT_INVENTORY
sources: Optional[Path] = DEFAULT_SOURCES
vars_file: Optional[Path] = None
envvars: Optional[str] = None
proc_id: Union[str, int, None] = None
verbose: bool = False
debug: bool = False
websocket: Optional[str] = None
project_tarball: Optional[Path] = None
worker_mode: bool = False
verbosity: int = 0
heartbeat: int = 0
execution_strategy: Optional[str] = None
hot_reload: bool = False
def __post_init__(self):
# verbosity overrides verbose and debug
if self.verbosity > 0:
self.verbose = False
self.debug = False
def __str__(self) -> str:
return self.to_string()
def __iter__(self) -> Iterable:
return (item for item in self.to_list())
def to_list(self) -> List:
result = [self.program_name]
result.extend(["-i", str(self.inventory.absolute())])
if self.sources:
result.extend(["-S", str(self.sources.absolute())])
if self.vars_file:
result.extend(["--vars", str(self.vars_file.absolute())])
if self.envvars:
result.extend(["--env-vars", self.envvars])
if self.proc_id:
result.extend(["--id", str(self.proc_id)])
if self.websocket:
result.extend(["--websocket-address", self.websocket])
if self.project_tarball:
result.extend(
["--project-tarball", str(self.project_tarball.absolute())]
)
if self.worker_mode:
result.append("--worker")
if self.rulebook:
result.extend(["--rulebook", str(self.rulebook.absolute())])
if self.verbose:
result.append("-v")
if self.debug:
result.append("-vv")
if self.verbosity > 0:
result.append(f"-{'v'*self.verbosity}")
if self.heartbeat > 0:
result.extend(["--heartbeat", str(self.heartbeat)])
if self.execution_strategy:
result.extend(["--execution-strategy", self.execution_strategy])
if self.hot_reload:
result.append("--hot-reload")
return result
def to_string(self) -> str:
return " ".join(self.to_list())
def jsonify_output(output: str) -> List[dict]:
"""
Receives an str from the cmd output when json_mode is enabled
and returns the list of dicts
"""
return [json.loads(line) for line in output.splitlines()]
def METHOD_NAME(result: CompletedProcess) -> List[dict]:
"""
Common logic to assert a succesful execution of a run_playbook action.
Returns the stdout deserialized
"""
assert result.returncode == 0
assert not result.stderr
output = jsonify_output(result.stdout.decode())
if output:
assert output[-1]["event_data"]["ok"]
assert not output[-1]["event_data"]["failures"]
return output
async def msg_handler(
websocket: ws_server.WebSocketServerProtocol, queue: asyncio.Queue
):
"""
Handler for a websocket server that passes json messages
from ansible-rulebook in the given queue
"""
async for message in websocket:
payload = json.loads(message)
data = {"path": websocket.path, "payload": payload}
await queue.put(data) |
298,991 | from config | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.regularizer import L2Decay
from paddlex.ppdet.core.workspace import register, serializable
from ..shape_spec import ShapeSpec
from ..backbones.esnet import SEModule
from .csp_pan import ConvBNLayer, Channel_T, DPModule
__all__ = ['ESPAN']
class ES_Block(nn.Layer):
def __init__(self,
in_channels,
mid_channels,
out_channels,
kernel_size=5,
stride=1,
act='leaky_relu'):
super(ES_Block, self).__init__()
self._residual = ConvBNLayer(
in_channel=in_channels,
out_channel=out_channels,
kernel_size=1,
stride=1,
groups=1,
act=act)
self._conv_pw = ConvBNLayer(
in_channel=in_channels,
out_channel=mid_channels // 2,
kernel_size=1,
stride=1,
groups=1,
act=act)
self._conv_dw = ConvBNLayer(
in_channel=mid_channels // 2,
out_channel=mid_channels // 2,
kernel_size=kernel_size,
stride=stride,
groups=mid_channels // 2,
act=None)
self._se = SEModule(mid_channels)
self._conv_linear = ConvBNLayer(
in_channel=mid_channels,
out_channel=out_channels,
kernel_size=1,
stride=1,
groups=1,
act=act)
self._out_conv = ConvBNLayer(
in_channel=out_channels * 2,
out_channel=out_channels,
kernel_size=1,
stride=1,
groups=1,
act=act)
def forward(self, inputs):
x1 = self._residual(inputs)
x2 = self._conv_pw(inputs)
x3 = self._conv_dw(x2)
x3 = paddle.concat([x2, x3], axis=1)
x3 = self._se(x3)
x3 = self._conv_linear(x3)
out = paddle.concat([x1, x3], axis=1)
out = self._out_conv(out)
return out
@register
@serializable
class ESPAN(nn.Layer):
"""Path Aggregation Network with ES module.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
kernel_size (int): The conv2d kernel size of this Module.
num_features (int): Number of output features of CSPPAN module.
num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 1
use_depthwise (bool): Whether to depthwise separable convolution in
blocks. Default: True
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=5,
num_features=3,
use_depthwise=True,
act='hard_swish',
spatial_scales=[0.125, 0.0625, 0.03125]):
super(ESPAN, self).__init__()
self.conv_t = Channel_T(in_channels, out_channels, act=act)
in_channels = [out_channels] * len(spatial_scales)
self.in_channels = in_channels
self.out_channels = out_channels
self.spatial_scales = spatial_scales
self.num_features = num_features
conv_func = DPModule if use_depthwise else ConvBNLayer
if self.num_features == 4:
self.first_top_conv = conv_func(
in_channels[0], in_channels[0], kernel_size, stride=2, act=act)
self.second_top_conv = conv_func(
in_channels[0], in_channels[0], kernel_size, stride=2, act=act)
self.spatial_scales.append(self.spatial_scales[-1] / 2)
# build top-down blocks
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.top_down_blocks = nn.LayerList()
for idx in range(len(in_channels) - 1, 0, -1):
self.top_down_blocks.append(
ES_Block(
in_channels[idx - 1] * 2,
in_channels[idx - 1],
in_channels[idx - 1],
kernel_size=kernel_size,
stride=1,
act=act))
# build bottom-up blocks
self.downsamples = nn.LayerList()
self.bottom_up_blocks = nn.LayerList()
for idx in range(len(in_channels) - 1):
self.downsamples.append(
conv_func(
in_channels[idx],
in_channels[idx],
kernel_size=kernel_size,
stride=2,
act=act))
self.bottom_up_blocks.append(
ES_Block(
in_channels[idx] * 2,
in_channels[idx + 1],
in_channels[idx + 1],
kernel_size=kernel_size,
stride=1,
act=act))
def forward(self, inputs):
"""
Args:
inputs (tuple[Tensor]): input features.
Returns:
tuple[Tensor]: CSPPAN features.
"""
assert len(inputs) == len(self.in_channels)
inputs = self.conv_t(inputs)
# top-down path
inner_outs = [inputs[-1]]
for idx in range(len(self.in_channels) - 1, 0, -1):
feat_heigh = inner_outs[0]
feat_low = inputs[idx - 1]
upsample_feat = self.upsample(feat_heigh)
inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](
paddle.concat([upsample_feat, feat_low], 1))
inner_outs.insert(0, inner_out)
# bottom-up path
outs = [inner_outs[0]]
for idx in range(len(self.in_channels) - 1):
feat_low = outs[-1]
feat_height = inner_outs[idx + 1]
downsample_feat = self.downsamples[idx](feat_low)
out = self.bottom_up_blocks[idx](paddle.concat(
[downsample_feat, feat_height], 1))
outs.append(out)
top_features = None
if self.num_features == 4:
top_features = self.first_top_conv(inputs[-1])
top_features = top_features + self.second_top_conv(outs[-1])
outs.append(top_features)
return tuple(outs)
@property
def out_shape(self):
return [
ShapeSpec(
channels=self.out_channels, stride=1. / s)
for s in self.spatial_scales
]
@classmethod
def METHOD_NAME(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape], } |
298,992 | main | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Split a tractogram into multiple files, 2 options available :
Split into X files, or split into files of Y streamlines.
By default, streamlines to add to each chunk will be chosen randomly.
Optionally, you can split streamlines...
- sequentially (the first n/nb_chunks streamlines in the first chunk and so
on).
- randomly, but per Quickbundles clusters.
"""
import argparse
import logging
import os
from dipy.io.stateful_tractogram import set_sft_logger_level
from dipy.io.streamline import save_tractogram
import numpy as np
from scilpy.io.streamlines import load_tractogram_with_reference
from scilpy.io.utils import (add_overwrite_arg, add_reference_arg,
assert_inputs_exist, assert_outputs_exist,
assert_output_dirs_exist_and_empty,
add_verbose_arg)
from scilpy.tractograms.tractogram_operations import (
split_sft_sequentially,
split_sft_randomly,
split_sft_randomly_per_cluster)
def _build_arg_parser():
p = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description=__doc__)
p.add_argument('in_tractogram',
help='Tractogram input file name.')
p.add_argument('out_prefix',
help='Prefix for the output tractogram, index will be '
'appended \nautomatically (ex, _0.trk), based on '
'input type.')
p.add_argument('--out_dir', default='',
help='Put all output tractogram in a specific directory.')
group = p.add_mutually_exclusive_group(required=True)
group.add_argument('--chunk_size', type=int,
help='The maximum number of streamlines per file.')
group.add_argument('--nb_chunks', type=int,
help='Divide the file in equal parts.')
group2 = p.add_mutually_exclusive_group()
group2.add_argument(
'--split_per_cluster', action='store_true',
help='If set, splitting will be done per cluster (computed with \n'
'Quickbundles) to ensure that at least some streamlines are \n'
'kept from each bundle in each chunk. Else, random splitting is\n'
'performed (default).')
group2.add_argument(
'--do_not_randomize', action='store_true',
help="If set, splitting is done sequentially through the original \n"
"sft instead of using random indices.")
p.add_argument('--qbx_thresholds', nargs='+', type=float,
default=[40, 30, 20], metavar='t',
help="If you chose option '--split_per_cluster', you may "
"set the \nQBx threshold value(s) here. Default: "
"%(default)s")
p.add_argument('--seed', default=None, type=int,
help='Use a specific random seed for the subsampling.')
add_reference_arg(p)
add_overwrite_arg(p)
add_verbose_arg(p)
return p
def METHOD_NAME():
parser = _build_arg_parser()
args = parser.parse_args()
assert_inputs_exist(parser, args.in_tractogram)
_, out_extension = os.path.splitext(args.in_tractogram)
assert_output_dirs_exist_and_empty(parser, args, [], optional=args.out_dir)
# Check only the first potential output filename, we don't know how many
# there are yet.
assert_outputs_exist(parser, args, os.path.join(
args.out_dir, '{}_0{}'.format(args.out_prefix, out_extension)))
log_level = logging.WARNING
if args.verbose:
log_level = logging.DEBUG
set_sft_logger_level('INFO')
logging.getLogger().setLevel(log_level)
logging.debug("Loading sft.")
sft = load_tractogram_with_reference(parser, args, args.in_tractogram)
streamlines_count = len(sft.streamlines)
if args.nb_chunks:
chunk_size = int(streamlines_count/args.nb_chunks)
nb_chunks = args.nb_chunks
else:
chunk_size = args.chunk_size
nb_chunks = int(streamlines_count/chunk_size)+1
# Check other outputs
out_names = ['{0}_{1}{2}'.format(args.out_prefix, i, out_extension) for
i in range(nb_chunks)]
assert_outputs_exist(parser, args,
[os.path.join(args.out_dir, out_names[i]) for i in
range(1, nb_chunks)])
# All chunks will be equal except the last one
chunk_sizes = np.ones((nb_chunks,), dtype=np.int16) * chunk_size
chunk_sizes[-1] += (streamlines_count - chunk_size * nb_chunks)
if args.do_not_randomize:
sfts = split_sft_sequentially(sft, chunk_sizes)
elif args.split_per_cluster:
# With this version, will contain an additional sft with non-included
# streamlines. Should be of size close to 0. Not using it.
sfts = split_sft_randomly_per_cluster(
sft, chunk_sizes, args.seed, args.qbx_thresholds)
else:
sfts = split_sft_randomly(sft, chunk_sizes, args.seed)
for i in range(nb_chunks):
out_name = os.path.join(args.out_dir, out_names[i])
save_tractogram(sfts[i], out_name)
if __name__ == "__main__":
METHOD_NAME() |
298,993 | get name | import os
import sys
import click
import logging
from .config import Config
import warnings
log = logging.getLogger("a2ml")
CONTEXT_SETTINGS = dict(auto_envvar_prefix='A2ML')
PROVIDERS = ['auger', 'google', 'azure', 'external']
PROVIDERS_META = '|'.join(PROVIDERS)
class Context(object):
"""The Context class provides an environment to run A2ML"""
def __init__(self, name='auger', path=None, debug=False):
"""Initializes the Context instance
Args:
name (str): The name of the config file. Default is 'config'
path(str): The path to your config file. If the config file is in the root directory leave as None.
debug (bool): True | False. Default is False.
Returns:
object: Context object
Example:
.. code-block:: python
ctx = Context()
"""
super(Context, self).__init__()
self.config = Config(name=name, path=path)
self.name = self.config.name
self.notificator = None
self.request_id = None
self.provider_info = None
if len(self.name) > 0:
self.name = f'[{self.name}] ' #"{:<9}".format('[%s]' % self.name)
self.debug = self.config.get('debug', debug)
self.set_runs_on_server(False)
def warn(*args, **kwargs):
self.system_warning(*args, **kwargs)
warnings.warn = warn
def set_runs_on_server(self, value):
self._runs_on_server = value
self.config.runs_on_server = value
def is_runs_on_server(self):
return self._runs_on_server
def use_auger_cloud(self):
return self.config.get('use_auger_cloud', self.config.get('use_a2ml_hub', True))
def METHOD_NAME(self):
return self.config.name
def get_providers(self, provider = None):
"""constructs Context instance
Args:
name (str): The name of the config file. Default is 'config'
path(str): The path to your config file. If the config file is in the root directory leave as None.
debug (bool): True | False. Default is False.
Returns:
list[str]: ['azure', 'auger']
Examples:
.. code-block:: python
ctx = Context()
ctx.get_providers()
"""
if provider:
providers = provider
else:
providers = self.config.get('providers', ['auger'])
if isinstance(providers, (str,)):
providers = [p.strip() for p in providers.split(',')]
if isinstance(providers, (list,)):
for p in providers:
if p not in PROVIDERS:
raise Exception('Provider %s is not supported.' % p)
return providers
raise Exception('Expecting list of providers in config.yaml\providers')
def get_model_provider(self, model_id):
if not model_id:
return None
if model_id.startswith("AutoML_"):
return "azure"
return "auger"
def is_external_provider(self):
providers = self.get_providers()
return providers and providers[0] == 'external'
def copy(self, name):
try:
self.config.set("providers", name, config_name='config')
except Exception as e:
# In case if command run in folder without config, do not set it
pass
return self
"""creates a copy of an existing Context
Args:
name (str): The name of the config file. Default is 'config'
Returns:
object: Context object
Example:
.. code-block:: python
ctx = Context()
new_ctx = ctx.copy()
"""
# new = Context(name, self.config.path, self.debug)
# new.set_runs_on_server(self._runs_on_server)
# new.notificator = self.notificator
# new.request_id = self.request_id
# new.config.parts = self.config.parts
# new.config.parts_changes = self.config.parts_changes
# try:
# new.config.set("providers", name, config_name='config')
# except Exception as e:
# # In case if command run in folder without config, do not set it
# pass
# if hasattr(self, 'credentials'):
# new.credentials = self.credentials
# return new
def info(self, msg, *args, **kwargs):
log.info('%s%s' %(self.name, msg), *args, **kwargs)
self.publish_log('info', '%s%s' %(self.name, msg), *args, **kwargs)
def log(self, msg, *args, **kwargs):
log.info('%s%s' %(self.name, msg), *args, **kwargs)
self.publish_log('info', '%s%s' %(self.name, msg), *args, **kwargs)
def log_debug(self, msg, *args, **kwargs):
if self.debug:
log.debug('%s%s' %(self.name, msg), *args, **kwargs)
self.publish_log('debug', '%s%s' %(self.name, msg), *args, **kwargs)
def error(self, msg, *args, **kwargs):
log.error('%s%s' %(self.name, msg), *args, **kwargs)
self.publish_log('error', '%s%s' %(self.name, msg), *args, **kwargs)
def exception(self, msg, *args, **kwargs):
self.error(msg, *args, **kwargs)
raise Exception(msg, *args, **kwargs)
def system_warning(self, *args, **kwargs):
if self.debug and args:
name = '[warning] '
msg = args[0]
log.warning('%s%s' %(name, msg))
self.publish_log('warning', '%s%s' %(name, msg))
def publish_log(self, level, msg, *args, **kwargs):
if self.notificator:
self.notificator.publish_log(self.request_id, level, msg, args, kwargs)
@staticmethod
def setup_logger(format='%(asctime)s %(name)s | %(message)s'):
logging.basicConfig(
stream=sys.stdout,
datefmt='%H:%M:%S',
format=format,
level=logging.INFO)
pass_context = click.make_pass_decorator(Context, ensure=True)
|
298,994 | set epoch | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.abs
import csv
import logging
import os.path as op
from typing import List, Optional
import numpy as np
import torch
from fairseq.data import Dictionary
from fairseq.data.audio.speech_to_text_dataset import (
S2TDataConfig
)
from fairseq.data.audio.text_to_speech_dataset import (
TextToSpeechDataset, TextToSpeechDatasetCreator
)
logger = logging.getLogger(__name__)
class FrmTextToSpeechDataset(TextToSpeechDataset):
def __init__(
self,
split: str,
is_train_split: bool,
data_cfg: S2TDataConfig,
audio_paths: List[str],
n_frames: List[int],
src_texts: Optional[List[str]] = None,
tgt_texts: Optional[List[str]] = None,
speakers: Optional[List[str]] = None,
src_langs: Optional[List[str]] = None,
tgt_langs: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
tgt_dict: Optional[Dictionary] = None,
pre_tokenizer=None,
bpe_tokenizer=None,
n_frames_per_step=1,
speaker_to_id=None,
do_chunk=False,
chunk_bound=-1,
chunk_init=50,
chunk_incr=5,
add_eos=True,
dedup=True,
ref_fpu=-1
):
# It assumes texts are encoded at a fixed frame-rate
super().__init__(
split=split,
is_train_split=is_train_split,
data_cfg=data_cfg,
audio_paths=audio_paths,
n_frames=n_frames,
src_texts=src_texts,
tgt_texts=tgt_texts,
speakers=speakers,
src_langs=src_langs,
tgt_langs=tgt_langs,
ids=ids,
tgt_dict=tgt_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
n_frames_per_step=n_frames_per_step,
speaker_to_id=speaker_to_id
)
self.do_chunk = do_chunk
self.chunk_bound = chunk_bound
self.chunk_init = chunk_init
self.chunk_incr = chunk_incr
self.add_eos = add_eos
self.dedup = dedup
self.ref_fpu = ref_fpu
self.chunk_size = -1
if do_chunk:
assert self.chunk_incr >= 0
assert self.pre_tokenizer is None
def __getitem__(self, index):
index, source, target, speaker_id, _, _, _ = super().__getitem__(index)
if target[-1].item() == self.tgt_dict.eos_index:
target = target[:-1]
fpu = source.size(0) / target.size(0) # frame-per-unit
fps = self.n_frames_per_step
assert (
self.ref_fpu == -1 or
abs((fpu * fps - self.ref_fpu) / self.ref_fpu) < 0.1
), f"{fpu*fps} != {self.ref_fpu}"
# only chunk training split
if self.is_train_split and self.do_chunk and self.chunk_size > 0:
lang = target[:int(self.data_cfg.prepend_tgt_lang_tag)]
text = target[int(self.data_cfg.prepend_tgt_lang_tag):]
size = len(text)
chunk_size = min(self.chunk_size, size)
chunk_start = np.random.randint(size - chunk_size + 1)
text = text[chunk_start:chunk_start+chunk_size]
target = torch.cat((lang, text), 0)
f_size = int(np.floor(chunk_size * fpu))
f_start = int(np.floor(chunk_start * fpu))
assert(f_size > 0)
source = source[f_start:f_start+f_size, :]
if self.dedup:
target = torch.unique_consecutive(target)
if self.add_eos:
eos_idx = self.tgt_dict.eos_index
target = torch.cat((target, torch.LongTensor([eos_idx])), 0)
return index, source, target, speaker_id
def METHOD_NAME(self, epoch):
if self.is_train_split and self.do_chunk:
old = self.chunk_size
self.chunk_size = self.chunk_init + epoch * self.chunk_incr
if self.chunk_bound > 0:
self.chunk_size = min(self.chunk_size, self.chunk_bound)
logger.info((
f"{self.split}: setting chunk size "
f"from {old} to {self.chunk_size}"
))
class FrmTextToSpeechDatasetCreator(TextToSpeechDatasetCreator):
# inherit for key names
@classmethod
def from_tsv(
cls,
root: str,
data_cfg: S2TDataConfig,
split: str,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split: bool,
n_frames_per_step: int,
speaker_to_id,
do_chunk: bool = False,
chunk_bound: int = -1,
chunk_init: int = 50,
chunk_incr: int = 5,
add_eos: bool = True,
dedup: bool = True,
ref_fpu: float = -1
) -> FrmTextToSpeechDataset:
tsv_path = op.join(root, f"{split}.tsv")
if not op.isfile(tsv_path):
raise FileNotFoundError(f"Dataset not found: {tsv_path}")
with open(tsv_path) as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
s = [dict(e) for e in reader]
assert len(s) > 0
ids = [ss[cls.KEY_ID] for ss in s]
audio_paths = [
op.join(data_cfg.audio_root, ss[cls.KEY_AUDIO]) for ss in s
]
n_frames = [int(ss[cls.KEY_N_FRAMES]) for ss in s]
tgt_texts = [ss[cls.KEY_TGT_TEXT] for ss in s]
src_texts = [ss.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for ss in s]
speakers = [ss.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for ss in s]
src_langs = [ss.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for ss in s]
tgt_langs = [ss.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for ss in s]
return FrmTextToSpeechDataset(
split=split,
is_train_split=is_train_split,
data_cfg=data_cfg,
audio_paths=audio_paths,
n_frames=n_frames,
src_texts=src_texts,
tgt_texts=tgt_texts,
speakers=speakers,
src_langs=src_langs,
tgt_langs=tgt_langs,
ids=ids,
tgt_dict=tgt_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
n_frames_per_step=n_frames_per_step,
speaker_to_id=speaker_to_id,
do_chunk=do_chunk,
chunk_bound=chunk_bound,
chunk_init=chunk_init,
chunk_incr=chunk_incr,
add_eos=add_eos,
dedup=dedup,
ref_fpu=ref_fpu
) |
298,995 | split dir filename ext | # Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# utility for nncc
import os
import sys
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.platform import gfile
from tensorflow.python.tools import freeze_graph
from tensorflow.python.tools import optimize_for_inference_lib
# --------
def file_validity_check(fn, ext_must_be=''):
''' check if file exist and file extention is corrent '''
if os.path.exists(fn) == False:
print("# error: file does not exist " + fn)
return False
if ext_must_be != '':
ext = os.path.splitext(fn)[1]
if ext[1:].lower(
) != ext_must_be: # ext contains , e.g., '.pb'. need to exclud '.'
print("# error: wrong extension {}. Should be {} ".format(ext, ext_must_be))
return False
return True
# --------
def importGraphIntoSession(sess, filename, graphNameAfterImporting):
# this should be called inside
# with tf.Session() as sess:
assert sess
(_, _, ext) = METHOD_NAME(filename)
if (ext.lower() == 'pb'):
with gfile.FastGFile(filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
elif (ext.lower() == 'pbtxt'):
with open(filename, 'r') as reader:
graph_def = tf.GraphDef()
text_format.Parse(reader.read(), graph_def)
else:
print("# Error: unknown extension - " + ext)
tf.import_graph_def(graph_def, name=graphNameAfterImporting)
# --------
def METHOD_NAME(path):
# in case of '/tmp/.ssh/my.key.dat'
# this returns ('/tmp/.ssh', 'my.key', 'dat')
directory = os.path.split(path)[0]
ext = os.path.splitext(path)[1][1:] # remove '.', e.g., '.dat' -> 'dat'
filename = os.path.splitext(os.path.split(path)[1])[0]
return (directory, filename, ext)
# --------
def convertPbtxt2Pb(pbtxtPath):
''' convert pbtxt file to pb file. e.g., /tmp/a.pbtxt --> /tmp/a.pb '''
with open(pbtxtPath) as f:
txt = f.read()
gdef = text_format.Parse(txt, tf.GraphDef())
(directory, filename, ext) = METHOD_NAME(pbtxtPath)
tf.train.write_graph(gdef, directory, filename + '.pb', as_text=False)
return os.path.join(directory, filename + '.pb')
# --------
def convertPb2Pbtxt(pbPath):
''' convert pb file to pbtxt file. e.g., /tmp/a.pb --> /tmp/a.pbtxt '''
(directory, filename, ext) = METHOD_NAME(pbPath)
with gfile.FastGFile(pbPath, 'rb') as f:
content = f.read()
graph_def = tf.GraphDef()
graph_def.ParseFromString(content)
tf.import_graph_def(graph_def, name='')
tf.train.write_graph(graph_def, directory, filename + '.pbtxt', as_text=True)
return os.path.join(directory, filename + '.pbtxt')
# --------
def savePbAndCkpt(sess, directory, fn_prefix):
''' save files related to session's graph into directory.
- fn_prefix.pb : binary protocol buffer file
- fn_prefix.pbtxt : text format of protocol buffer file
- fn_prefix.ckpt.* : checkpoing files contains values of variables
returns (path of pb file, path of pbtxt file, path of ckpt files)
'''
tf.train.write_graph(sess.graph_def, directory, fn_prefix + '.pb', as_text=False)
tf.train.write_graph(sess.graph_def, directory, fn_prefix + '.pbtxt', as_text=True)
# save a checkpoint file, which will store the above assignment
saver = tf.train.Saver()
saver.save(sess, os.path.join(directory, 'checkoiint', fn_prefix + '.ckpt'))
return (os.path.join(directory, fn_prefix + '.pb'),
os.path.join(directory, fn_prefix + '.pbtxt'),
os.path.join(directory, 'checkoiint', fn_prefix + '.ckpt'))
def optimizeGraph(input_graph_path, input_node_name, output_node_name):
''' this function calls optimize_for_inference of tensorflow and generates '*_optimized.pb'.
- input_graph_path : must be a path to pb file
- input_node_name : name of input operation node
- output_node_name : name of head(top) operation node
'''
(directory, fn, ext) = METHOD_NAME(input_graph_path)
output_optimized_graph_path = os.path.join(directory, fn + '_optimized.pb')
# Optimize for inference
input_graph_def = tf.GraphDef()
with tf.gfile.Open(input_graph_path, "rb") as f:
data = f.read()
input_graph_def.ParseFromString(data)
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
input_graph_def, input_node_name.split(","), output_node_name.split(","),
tf.float32.as_datatype_enum)
# Save the optimized graph
f = tf.gfile.FastGFile(output_optimized_graph_path, "w")
f.write(output_graph_def.SerializeToString())
return output_optimized_graph_path
# --------
def freezeGraph(input_graph_path, checkpoint_path, output_node_name):
''' this function calls freeze_grapy.py of tensorflow and generates '*_frozen.pb' and '*_frozen.pbtxt'.
- input_graph_path : must be a path to pb file
- checkpoint_path : path of *.ckpt, e.g., '/tmp/inception_v3/graph.ckpt'
- output_node_name : name of head(top) operation node
'''
input_saver_def_path = ""
input_binary = True
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
clear_devices = True
(directory, fn, ext) = METHOD_NAME(input_graph_path)
output_frozen_graph_path = os.path.join(directory, fn + '_frozen.pb')
if file_validity_check(input_graph_path, 'pb') == False:
print("Error: {} not found or not have pb extension".format(input_graph_path))
sys.exit(0)
freeze_graph.freeze_graph(input_graph_path, input_saver_def_path, input_binary,
checkpoint_path, output_node_name, restore_op_name,
filename_tensor_name, output_frozen_graph_path,
clear_devices, "")
pbtxtPath = convertPb2Pbtxt(output_frozen_graph_path)
return (output_frozen_graph_path, pbtxtPath)
# --------
def generateTensorboardLog(pbFiles, graphNames, directory):
''' Generate logs for tensorboard. after calling this, graph(s) can be viewed inside tensorboard.
This function creates a new Session(), so call this outside of 'with Session():'
parameters:
- pbFiles: if multiple graphs needs to be shown, pass the list of pb (or pbtxt) files
- directory: parent directory of '/.tensorboard' directory where log files are saved
how to run tensorboard:
$ tensorboard --logdir=directory_in_parameter
'''
assert len(pbFiles) == len(graphNames)
# without this, graph used previous session is reused : https://stackoverflow.com/questions/42706761/closing-session-in-tensorflow-doesnt-reset-graph
tf.reset_default_graph()
with tf.Session() as sess:
i = 0
for pbFile in pbFiles:
graphName = graphNames[i]
importGraphIntoSession(sess, pbFile, graphName)
i = i + 1
tbLogPath = directory
train_writer = tf.summary.FileWriter(tbLogPath)
train_writer.add_graph(sess.graph)
train_writer.flush()
train_writer.close()
return tbLogPath
#--------
def isScalar(x):
'''
keyword argument:
x - base_freezer.Tensor
'''
return (type(x.getShape()) == []) |
298,996 | do delete | """
Stub implementation of YouTube for acceptance tests.
To start this stub server on its own from Vagrant:
1.) Locally, modify your Vagrantfile so that it contains:
config.vm.network :forwarded_port, guest: 8031, host: 8031
2.) From within Vagrant dev environment do:
cd common/djangoapps/terrain
python -m stubs.start youtube 8031
3.) Locally, try accessing http://localhost:8031/ and see that
you get "Unused url" message inside the browser.
"""
import json
import time
from collections import OrderedDict
import requests
from six.moves.urllib.parse import urlparse
from .http import StubHttpRequestHandler, StubHttpService
class StubYouTubeHandler(StubHttpRequestHandler):
"""
A handler for Youtube GET requests.
"""
# Default number of seconds to delay the response to simulate network latency.
DEFAULT_DELAY_SEC = 0.5
def METHOD_NAME(self): # pylint: disable=invalid-name
"""
Allow callers to delete all the server configurations using the /del_config URL.
"""
if self.path in ("/del_config", "/del_config/"):
self.server.config = {}
self.log_message("Reset Server Configuration.")
self.send_response(200)
else:
self.send_response(404)
def do_GET(self):
"""
Handle a GET request from the client and sends response back.
"""
self.log_message(
f"Youtube provider received GET request to path {self.path}"
)
if 'get_config' in self.path:
self.send_json_response(self.server.config)
elif 'test_transcripts_youtube' in self.path:
if 't__eq_exist' in self.path:
status_message = "".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.0" dur="1.0">',
'Equal transcripts</text></transcript>'
]).encode('utf-8')
self.send_response(
200, content=status_message, headers={'Content-type': 'application/xml'}
)
elif 't_neq_exist' in self.path:
status_message = "".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.1" dur="5.5">',
'Transcripts sample, different that on server',
'</text></transcript>'
]).encode('utf-8')
self.send_response(
200, content=status_message, headers={'Content-type': 'application/xml'}
)
else:
self.send_response(404)
elif 'test_youtube' in self.path:
params = urlparse(self.path)
youtube_id = params.path.split('/').pop()
if self.server.config.get('youtube_api_private_video'):
self._send_private_video_response(youtube_id, "I'm youtube private video.") # lint-amnesty, pylint: disable=too-many-function-args
else:
self._send_video_response(youtube_id, "I'm youtube.")
elif 'get_youtube_api' in self.path:
# Delay the response to simulate network latency
time.sleep(self.server.config.get('time_to_response', self.DEFAULT_DELAY_SEC))
if self.server.config.get('youtube_api_blocked'):
self.send_response(404, content=b'', headers={'Content-type': 'text/plain'})
else:
# Get the response to send from YouTube.
# We need to do this every time because Google sometimes sends different responses
# as part of their own experiments, which has caused our tests to become "flaky"
self.log_message("Getting iframe api from youtube.com")
iframe_api_response = requests.get('https://www.youtube.com/iframe_api').content.strip(b"\n")
self.send_response(200, content=iframe_api_response, headers={'Content-type': 'text/html'})
else:
self.send_response(
404, content=b"Unused url", headers={'Content-type': 'text/plain'}
)
def _send_video_response(self, youtube_id, message):
"""
Send message back to the client for video player requests.
Requires sending back callback id.
"""
# Delay the response to simulate network latency
time.sleep(self.server.config.get('time_to_response', self.DEFAULT_DELAY_SEC))
# Construct the response content
callback = self.get_params['callback']
data = OrderedDict({
'items': list(
OrderedDict({
'contentDetails': OrderedDict({
'id': youtube_id,
'duration': 'PT2M20S',
})
})
)
})
response = f"{callback}({json.dumps(data)})".encode('utf-8')
self.send_response(200, content=response, headers={'Content-type': 'text/html'})
self.log_message(f"Youtube: sent response {message}")
def _send_private_video_response(self, message):
"""
Send private video error message back to the client for video player requests.
"""
# Construct the response content
callback = self.get_params['callback']
data = OrderedDict({
"error": OrderedDict({
"code": 403,
"errors": [
{
"code": "ServiceForbiddenException",
"domain": "GData",
"internalReason": "Private video"
}
],
"message": message,
})
})
response = f"{callback}({json.dumps(data)})".encode('utf-8')
self.send_response(200, content=response, headers={'Content-type': 'text/html'})
self.log_message(f"Youtube: sent response {message}")
class StubYouTubeService(StubHttpService):
"""
A stub Youtube provider server that responds to GET requests to localhost.
"""
HANDLER_CLASS = StubYouTubeHandler |
298,997 | run | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
Answering machines.
"""
########################
# Answering machines #
########################
from __future__ import absolute_import
from __future__ import print_function
from scapy.sendrecv import send, sniff
from scapy.config import conf
from scapy.error import log_interactive
import scapy.modules.six as six
class ReferenceAM(type):
def __new__(cls, name, bases, dct):
obj = super(ReferenceAM, cls).__new__(cls, name, bases, dct)
if obj.function_name:
globals()[obj.function_name] = lambda obj=obj, *args, **kargs: obj(*args, **kargs)() # noqa: E501
return obj
class AnsweringMachine(six.with_metaclass(ReferenceAM, object)):
function_name = ""
filter = None
sniff_options = {"store": 0}
sniff_options_list = ["store", "iface", "count", "promisc", "filter", "type", "prn", "stop_filter"] # noqa: E501
send_options = {"verbose": 0}
send_options_list = ["iface", "inter", "loop", "verbose"]
send_function = staticmethod(send)
def __init__(self, **kargs):
self.mode = 0
if self.filter:
kargs.setdefault("filter", self.filter)
kargs.setdefault("prn", self.reply)
self.optam1 = {}
self.optam2 = {}
self.optam0 = {}
doptsend, doptsniff = self.parse_all_options(1, kargs)
self.defoptsend = self.send_options.copy()
self.defoptsend.update(doptsend)
self.defoptsniff = self.sniff_options.copy()
self.defoptsniff.update(doptsniff)
self.optsend, self.optsniff = [{}, {}]
def __getattr__(self, attr):
for dct in [self.optam2, self.optam1]:
if attr in dct:
return dct[attr]
raise AttributeError(attr)
def __setattr__(self, attr, val):
mode = self.__dict__.get("mode", 0)
if mode == 0:
self.__dict__[attr] = val
else:
[self.optam1, self.optam2][mode - 1][attr] = val
def parse_options(self):
pass
def parse_all_options(self, mode, kargs):
sniffopt = {}
sendopt = {}
for k in list(kargs): # use list(): kargs is modified in the loop
if k in self.sniff_options_list:
sniffopt[k] = kargs[k]
if k in self.send_options_list:
sendopt[k] = kargs[k]
if k in self.sniff_options_list + self.send_options_list:
del kargs[k]
if mode != 2 or kargs:
if mode == 1:
self.optam0 = kargs
elif mode == 2 and kargs:
k = self.optam0.copy()
k.update(kargs)
self.parse_options(**k)
kargs = k
omode = self.__dict__.get("mode", 0)
self.__dict__["mode"] = mode
self.parse_options(**kargs)
self.__dict__["mode"] = omode
return sendopt, sniffopt
def is_request(self, req):
return 1
def make_reply(self, req):
return req
def send_reply(self, reply):
self.send_function(reply, **self.optsend)
def print_reply(self, req, reply):
print("%s ==> %s" % (req.summary(), reply.summary()))
def reply(self, pkt):
if not self.is_request(pkt):
return
reply = self.make_reply(pkt)
self.send_reply(reply)
if conf.verb >= 0:
self.print_reply(pkt, reply)
def METHOD_NAME(self, *args, **kargs):
log_interactive.warning("run() method deprecated. The instance is now callable") # noqa: E501
self(*args, **kargs)
def __call__(self, *args, **kargs):
optsend, optsniff = self.parse_all_options(2, kargs)
self.optsend = self.defoptsend.copy()
self.optsend.update(optsend)
self.optsniff = self.defoptsniff.copy()
self.optsniff.update(optsniff)
try:
self.sniff()
except KeyboardInterrupt:
print("Interrupted by user")
def sniff(self):
sniff(**self.optsniff) |
298,998 | test acceptable prop value ranges defaults | from datetime import datetime, timedelta
import pytest
from tests.factories.rest.market import OfferProposalFactory
from yapapi.props import Activity
from yapapi.props.builder import DemandBuilder
from yapapi.strategy import (
PROP_DEBIT_NOTE_ACCEPTANCE_TIMEOUT,
PROP_DEBIT_NOTE_INTERVAL_SEC,
PROP_PAYMENT_TIMEOUT_SEC,
MarketStrategy,
PropValueRange,
)
from yapapi.strategy.base import (
DEFAULT_DEBIT_NOTE_INTERVAL_SEC,
DEFAULT_PAYMENT_TIMEOUT_SEC,
DEFAULT_PROPERTY_VALUE_RANGES,
MIN_EXPIRATION_FOR_MID_AGREEMENT_PAYMENTS,
)
class BadStrategy(MarketStrategy): # noqa
pass
class GoodStrategy(MarketStrategy):
async def score_offer(self, offer) -> float:
pass
class PropValueOverrideStrategy(GoodStrategy):
acceptable_prop_value_range_overrides = {
PROP_DEBIT_NOTE_INTERVAL_SEC: PropValueRange(120, 1200)
}
def test_bad_strategy_instantiate():
with pytest.raises(TypeError) as e:
BadStrategy()
assert str(e.value).startswith("Can't instantiate abstract class BadStrategy")
def test_good_strategy_instantiate():
GoodStrategy()
def METHOD_NAME():
strategy = GoodStrategy()
assert strategy.acceptable_prop_value_ranges == DEFAULT_PROPERTY_VALUE_RANGES
assert strategy.acceptable_prop_value_ranges is not DEFAULT_PROPERTY_VALUE_RANGES
def test_acceptable_prop_value_range_override():
defaults = DEFAULT_PROPERTY_VALUE_RANGES
overrides = PropValueOverrideStrategy.acceptable_prop_value_range_overrides
effective_ranges = PropValueOverrideStrategy().acceptable_prop_value_ranges
# sanity check
assert overrides[PROP_DEBIT_NOTE_INTERVAL_SEC] != defaults[PROP_DEBIT_NOTE_INTERVAL_SEC]
assert effective_ranges[PROP_DEBIT_NOTE_INTERVAL_SEC] == overrides[PROP_DEBIT_NOTE_INTERVAL_SEC]
assert effective_ranges[PROP_PAYMENT_TIMEOUT_SEC] == defaults[PROP_PAYMENT_TIMEOUT_SEC]
assert (
effective_ranges[PROP_DEBIT_NOTE_ACCEPTANCE_TIMEOUT]
== defaults[PROP_DEBIT_NOTE_ACCEPTANCE_TIMEOUT]
)
@pytest.mark.parametrize(
"min, max, val, contains, clamped, clamp_error",
[
(None, None, 0, True, 0, False),
(None, None, 12345, True, 12345, False),
(None, None, -0.12345, True, -0.12345, False),
(-42, None, 0, True, 0, False),
(42, None, 42, True, 42, False),
(0, None, -66, False, 0, False),
(None, 42, 0, True, 0, False),
(None, -42, 0, False, -42, False),
(None, 0, 0, True, 0, False),
(-42.5, 66.7, 0, True, 0, False),
(-42.5, 66.7, -42.5, True, -42.5, False),
(-42.5, 66.7, 66.7, True, 66.7, False),
(-42.5, 66.7, -66, False, -42.5, False),
(-42.5, 66.7, 88, False, 66.7, False),
(256, 0, 128, False, None, True),
],
)
def test_prop_value_range(min, max, val, contains, clamped, clamp_error):
range = PropValueRange(min, max)
assert (val in range) == contains
if clamp_error:
with pytest.raises(ValueError):
range.clamp(val)
else:
assert range.clamp(val) == clamped
SHORT_EXPIRATION = 1000
LONG_EXPIRATION = MIN_EXPIRATION_FOR_MID_AGREEMENT_PAYMENTS + 1000
ACCEPTABLE_DEBIT_NOTE_INTERVAL = DEFAULT_DEBIT_NOTE_INTERVAL_SEC + 10
UNACCEPTABLE_DEBIT_NOTE_INTERAL = DEFAULT_DEBIT_NOTE_INTERVAL_SEC - 10
ACCEPTABLE_PAYMENT_TIMEOUT = DEFAULT_PAYMENT_TIMEOUT_SEC + 10
UNACCEPTABLE_PAYMENT_TIMEOUT = DEFAULT_PAYMENT_TIMEOUT_SEC - 10
@pytest.mark.parametrize(
"offer_props, expiration_secs, expected_props",
[
# provider is unaware of mid-agreement payments, don't negotiate it from our end either
({}, SHORT_EXPIRATION, {}),
# provider is unaware of mid-agreement payments, don't negotiate it from our end either
({}, LONG_EXPIRATION, {}),
# provider would like mid-agreement payments but it doesn't make sense from requestor pov
({PROP_DEBIT_NOTE_INTERVAL_SEC: ACCEPTABLE_DEBIT_NOTE_INTERVAL}, SHORT_EXPIRATION, {}),
# provider would like mid-agreement payments but it doesn't make sense from requestor pov
({PROP_DEBIT_NOTE_INTERVAL_SEC: UNACCEPTABLE_DEBIT_NOTE_INTERAL}, SHORT_EXPIRATION, {}),
# provider would like mid-agreement payments and the debit note interval is okay
(
{PROP_DEBIT_NOTE_INTERVAL_SEC: ACCEPTABLE_DEBIT_NOTE_INTERVAL},
LONG_EXPIRATION,
{PROP_DEBIT_NOTE_INTERVAL_SEC: ACCEPTABLE_DEBIT_NOTE_INTERVAL},
),
# provider would like mid-agreement payments but the debit note interval is unacceptable
(
{PROP_DEBIT_NOTE_INTERVAL_SEC: UNACCEPTABLE_DEBIT_NOTE_INTERAL},
LONG_EXPIRATION,
{PROP_DEBIT_NOTE_INTERVAL_SEC: DEFAULT_DEBIT_NOTE_INTERVAL_SEC},
),
# full set of m-a p props from provider end but too short expiration for requestor
(
{
PROP_DEBIT_NOTE_INTERVAL_SEC: ACCEPTABLE_DEBIT_NOTE_INTERVAL,
PROP_PAYMENT_TIMEOUT_SEC: ACCEPTABLE_PAYMENT_TIMEOUT,
},
SHORT_EXPIRATION,
{},
),
# full set of m-a p props from the provider and all intervals within acceptable bounds
(
{
PROP_DEBIT_NOTE_INTERVAL_SEC: ACCEPTABLE_DEBIT_NOTE_INTERVAL,
PROP_PAYMENT_TIMEOUT_SEC: ACCEPTABLE_PAYMENT_TIMEOUT,
},
LONG_EXPIRATION,
{
PROP_DEBIT_NOTE_INTERVAL_SEC: ACCEPTABLE_DEBIT_NOTE_INTERVAL,
PROP_PAYMENT_TIMEOUT_SEC: ACCEPTABLE_PAYMENT_TIMEOUT,
},
),
# full set of m-a p props from the provider but payment timeout not acceptable for requestor
(
{
PROP_DEBIT_NOTE_INTERVAL_SEC: ACCEPTABLE_DEBIT_NOTE_INTERVAL,
PROP_PAYMENT_TIMEOUT_SEC: UNACCEPTABLE_PAYMENT_TIMEOUT,
},
LONG_EXPIRATION,
{
PROP_DEBIT_NOTE_INTERVAL_SEC: ACCEPTABLE_DEBIT_NOTE_INTERVAL,
PROP_PAYMENT_TIMEOUT_SEC: DEFAULT_PAYMENT_TIMEOUT_SEC,
},
),
],
)
@pytest.mark.asyncio
async def test_respond_to_provider_offer(offer_props, expiration_secs, expected_props):
strategy = GoodStrategy()
demand = DemandBuilder()
expiration = datetime.now() + timedelta(seconds=expiration_secs)
demand.add(Activity(expiration=expiration))
offer_kwargs = {"proposal__proposal__properties": offer_props}
offer = OfferProposalFactory(**offer_kwargs)
updated_demand = await strategy.respond_to_provider_offer(demand, offer)
del updated_demand.properties["golem.srv.comp.expiration"]
assert updated_demand.properties == expected_props |
298,999 | perform set team test | # Copyright 2021-2023 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import configparser
import os
import pytest
from vdk.internal.control.exception.vdk_exception import VDKException
from vdk.internal.control.job.job_config import JobConfig
from vdk.internal.control.utils import cli_utils
from vdk.internal.test_utils import find_test_resource
class TestSetTeam:
@pytest.fixture(autouse=True)
def setup_method(self, tmpdir):
self.tmp_copy_job_test_path = os.path.join(tmpdir, "my-tmp-test-job")
self.test_job_path = find_test_resource("test-job")
self.tmp_copy_job_test_config_ini_path = os.path.join(
self.tmp_copy_job_test_path, "config.ini"
)
cli_utils.copy_directory(self.test_job_path, self.tmp_copy_job_test_path)
def test_set_team(self):
self.METHOD_NAME(
"my_unique_team_name", JobConfig(self.tmp_copy_job_test_path)
)
def test_set_empty_team(self):
self.METHOD_NAME("", JobConfig(self.tmp_copy_job_test_path))
def test_set_team_with_spaces(self):
self.METHOD_NAME(
"my unique team name", JobConfig(self.tmp_copy_job_test_path)
)
def test_set_team_with_no_team_in_config_ini(self):
# remove all contents of config.ini (including team option)
config_ini_file = open(self.tmp_copy_job_test_config_ini_path, "w")
config_ini_file.truncate(0)
config_ini_file.close()
job_config = JobConfig(self.tmp_copy_job_test_path)
assert (
not job_config.get_team()
), f"empty config.ini file should not provide a team"
assert not job_config.set_team_if_exists(
"my unique team name"
), f"set_team_if_exists was supposed to return False if there is no team option in config.ini"
def METHOD_NAME(self, team_name, job_config):
assert job_config.set_team_if_exists(
team_name
), f"team option was expected to be present in: {self.tmp_copy_job_test_config_ini_path}"
with open(self.tmp_copy_job_test_config_ini_path) as f:
assert (
team_name in f.read()
), f"set_team_if_exists failed to write team {team_name} in: {self.tmp_copy_job_test_config_ini_path}"
def test_notification_delay_period_minutes(self):
self.__create_config_ini(
self.tmp_copy_job_test_config_ini_path,
[("notification_delay_period_minutes", "100")],
)
job_config = JobConfig(self.tmp_copy_job_test_path)
assert job_config.get_notification_delay_period_minutes() == 100
def test_invalid_notification_delay_period_minutes(self):
self.__create_config_ini(
self.tmp_copy_job_test_config_ini_path,
[("notification_delay_period_minutes", "invalid_value")],
)
job_config = JobConfig(self.tmp_copy_job_test_path)
with pytest.raises(VDKException):
job_config.get_notification_delay_period_minutes()
def test_negative_notification_delay_period_minutes(self):
self.__create_config_ini(
self.tmp_copy_job_test_config_ini_path,
[("notification_delay_period_minutes", "-100")],
)
job_config = JobConfig(self.tmp_copy_job_test_path)
with pytest.raises(VDKException):
job_config.get_notification_delay_period_minutes()
def test_parse_notification_contacts(self):
self.__create_config_ini(
self.tmp_copy_job_test_config_ini_path,
[
("notified_on_job_deploy", "a@abv.bg; b@dir.bg, c@abv.bg ; d@dir.bg"),
(
"notified_on_job_success",
"a@abv.bg; b@dir.bg, c@abv.bg ; d@dir.bg",
),
(
"notified_on_job_failure_user_error",
"a@abv.bg; b@dir.bg, c@abv.bg ; d@dir.bg",
),
(
"notified_on_job_failure_platform_error",
"a@abv.bg; b@dir.bg, c@abv.bg ; d@dir.bg",
),
],
)
job_config = JobConfig(self.tmp_copy_job_test_path)
assert job_config.get_contacts_notified_on_job_deploy() == [
"a@abv.bg",
"b@dir.bg",
"c@abv.bg",
"d@dir.bg",
]
assert job_config.get_contacts_notified_on_job_success() == [
"a@abv.bg",
"b@dir.bg",
"c@abv.bg",
"d@dir.bg",
]
assert job_config.get_contacts_notified_on_job_failure_user_error() == [
"a@abv.bg",
"b@dir.bg",
"c@abv.bg",
"d@dir.bg",
]
assert job_config.get_contacts_notified_on_job_failure_platform_error() == [
"a@abv.bg",
"b@dir.bg",
"c@abv.bg",
"d@dir.bg",
]
@staticmethod
def __create_config_ini(file, contacts):
config = configparser.ConfigParser()
config.add_section("contacts")
for k, v in contacts:
config.set("contacts", k, v)
with open(file, "w") as f:
config.write(f) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.