id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
5,200 | drop | # stash.py
# Copyright (C) 2018 Jelmer Vernooij <jelmer@samba.org>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Stash handling."""
import os
from .file import GitFile
from .index import commit_tree, iter_fresh_objects
from .reflog import drop_reflog_entry, read_reflog
DEFAULT_STASH_REF = b"refs/stash"
class Stash:
"""A Git stash.
Note that this doesn't currently update the working tree.
"""
def __init__(self, repo, ref=DEFAULT_STASH_REF) -> None:
self._ref = ref
self._repo = repo
@property
def _reflog_path(self):
return os.path.join(
self._repo.commondir(), "logs", os.fsdecode(self._ref)
)
def stashes(self):
try:
with GitFile(self._reflog_path, "rb") as f:
return reversed(list(read_reflog(f)))
except FileNotFoundError:
return []
@classmethod
def from_repo(cls, repo):
"""Create a new stash from a Repo object."""
return cls(repo)
def METHOD_NAME(self, index):
"""Drop entry with specified index."""
with open(self._reflog_path, "rb+") as f:
drop_reflog_entry(f, index, rewrite=True)
if len(self) == 0:
os.remove(self._reflog_path)
del self._repo.refs[self._ref]
return
if index == 0:
self._repo.refs[self._ref] = self[0].new_sha
def pop(self, index):
raise NotImplementedError(self.pop)
def push(self, committer=None, author=None, message=None):
"""Create a new stash.
Args:
committer: Optional committer name to use
author: Optional author name to use
message: Optional commit message
"""
# First, create the index commit.
commit_kwargs = {}
if committer is not None:
commit_kwargs["committer"] = committer
if author is not None:
commit_kwargs["author"] = author
index = self._repo.open_index()
index_tree_id = index.commit(self._repo.object_store)
index_commit_id = self._repo.do_commit(
ref=None,
tree=index_tree_id,
message=b"Index stash",
merge_heads=[self._repo.head()],
no_verify=True,
**commit_kwargs
)
# Then, the working tree one.
stash_tree_id = commit_tree(
self._repo.object_store,
iter_fresh_objects(
index,
os.fsencode(self._repo.path),
object_store=self._repo.object_store,
),
)
if message is None:
message = b"A stash on " + self._repo.head()
# TODO(jelmer): Just pass parents into do_commit()?
self._repo.refs[self._ref] = self._repo.head()
cid = self._repo.do_commit(
ref=self._ref,
tree=stash_tree_id,
message=message,
merge_heads=[index_commit_id],
no_verify=True,
**commit_kwargs
)
return cid
def __getitem__(self, index):
return list(self.stashes())[index]
def __len__(self) -> int:
return len(list(self.stashes())) |
5,201 | fluence | """Calculate."""
# --- import -------------------------------------------------------------
import numpy as np
from .. import units as wt_units
# --- define -------------------------------------------------------------
__all__ = ["fluence", "mono_resolution", "nm_width", "symmetric_sqrt"]
# --- functions ----------------------------------------------------------
def METHOD_NAME(
power_mW,
color,
beam_radius,
reprate_Hz,
pulse_width,
color_units="wn",
beam_radius_units="mm",
pulse_width_units="fs_t",
area_type="even",
) -> tuple:
"""Calculate the fluence of a beam.
Parameters
----------
power_mW : number
Time integrated power of beam.
color : number
Color of beam in units.
beam_radius : number
Radius of beam in units.
reprate_Hz : number
Laser repetition rate in inverse seconds (Hz).
pulse_width : number
Pulsewidth of laser in units
color_units : string (optional)
Valid wt.units color unit identifier. Default is wn.
beam_radius_units : string (optional)
Valid wt.units distance unit identifier. Default is mm.
pulse_width_units : number
Valid wt.units time unit identifier. Default is fs.
area_type : string (optional)
Type of calculation to accomplish for Gaussian area.
even specfies a flat-top calculation
average specifies a Gaussian average within the FWHM
Default is even.
Returns
-------
tuple
Fluence in uj/cm2, photons/cm2, and peak intensity in GW/cm2
"""
# calculate beam area
if area_type == "even":
radius_cm = wt_units.converter(beam_radius, beam_radius_units, "cm")
area_cm2 = np.pi * radius_cm**2 # cm^2
elif area_type == "average":
radius_cm = wt_units.converter(beam_radius, beam_radius_units, "cm")
area_cm2 = np.pi * radius_cm**2 # cm^2
area_cm2 /= 0.7213 # weight by average intensity felt by oscillator inside of FWHM
else:
raise NotImplementedError
# calculate fluence in uj/cm^2
ujcm2 = power_mW / reprate_Hz # mJ
ujcm2 *= 1e3 # uJ
ujcm2 /= area_cm2 # uJ/cm^2
# calculate fluence in photons/cm^2
energy = wt_units.converter(color, color_units, "eV") # eV
photonscm2 = ujcm2 * 1e-6 # J/cm2
photonscm2 /= 1.60218e-19 # eV/cm2
photonscm2 /= energy # photons/cm2
# calculate peak intensity in GW/cm^2
pulse_width_s = wt_units.converter(pulse_width, pulse_width_units, "s_t") # seconds
GWcm2 = ujcm2 / 1e6 # J/cm2
GWcm2 /= pulse_width_s # W/cm2
GWcm2 /= 1e9
# finish
return ujcm2, photonscm2, GWcm2
def mono_resolution(
grooves_per_mm, slit_width, focal_length, output_color, output_units="wn"
) -> float:
"""Calculate the resolution of a monochromator.
Parameters
----------
grooves_per_mm : number
Grooves per millimeter.
slit_width : number
Slit width in microns.
focal_length : number
Focal length in mm.
output_color : number
Output color in nm.
output_units : string (optional)
Output units. Default is wn.
Returns
-------
float
Resolution.
"""
d_lambda = 1e6 * slit_width / (grooves_per_mm * focal_length) # nm
upper = output_color + d_lambda / 2 # nm
lower = output_color - d_lambda / 2 # nm
return abs(
wt_units.converter(upper, "nm", output_units)
- wt_units.converter(lower, "nm", output_units)
)
def nm_width(center, width, units="wn") -> float:
"""Given a center and width, in energy units, get back a width in nm.
Parameters
----------
center : number
Center (in energy units).
width : number
Width (in energy units).
units : string (optional)
Input units. Default is wn.
Returns
-------
number
Width in nm.
"""
red = wt_units.converter(center - width / 2.0, units, "nm")
blue = wt_units.converter(center + width / 2.0, units, "nm")
return red - blue
def symmetric_sqrt(x, out=None):
"""Compute the 'symmetric' square root: sign(x) * sqrt(abs(x)).
Parameters
----------
x : array_like or number
Input array.
out : ndarray, None, or tuple of ndarray and None (optional)
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
np.ndarray
Symmetric square root of arr.
"""
factor = np.sign(x)
out = np.sqrt(np.abs(x), out=out)
out *= factor
return out |
5,202 | connection | # Copyright © 2012-2023 Forschungszentrum Jülich GmbH
# SPDX-License-Identifier: LGPL-3.0-or-later
import sqlite3
from pathlib import Path
from jupedsim.native.simulation import Simulation
from jupedsim.serialization import TrajectoryWriter
class SqliteTrajectoryWriter(TrajectoryWriter):
"""Write trajectory data into a sqlite db"""
def __init__(self, output_file: Path):
"""SqliteTrajectoryWriter constructor
Parameters
----------
output_file : pathlib.Path
name of the output file.
Note: the file will not be written until the first call to 'begin_writing'
Returns
-------
SqliteTrajectoryWriter
"""
self._output_file = output_file
self._frame = 0
self._con = sqlite3.connect(self._output_file, isolation_level=None)
def begin_writing(self, fps: float, geometry_as_wkt: str) -> None:
"""Begin writing trajectory data.
This method is intended to handle all data writing that has to be done
once before the trajectory data can be written. E.g. Meta information
such as framerate etc...
"""
cur = self._con.cursor()
try:
cur.execute("BEGIN")
cur.execute("DROP TABLE IF EXISTS trajectory_data")
cur.execute(
"CREATE TABLE trajectory_data ("
" frame INTEGER NOT NULL,"
" id INTEGER NOT NULL,"
" pos_x REAL NOT NULL,"
" pos_y REAL NOT NULL,"
" ori_x REAL NOT NULL,"
" ori_y REAL NOT NULL)"
)
cur.execute("DROP TABLE IF EXISTS metadata")
cur.execute(
"CREATE TABLE metadata(key TEXT NOT NULL UNIQUE, value TEXT NOT NULL)"
)
cur.executemany(
"INSERT INTO metadata VALUES(?, ?)",
(("version", "1"), ("fps", fps)),
)
cur.execute("DROP TABLE IF EXISTS geometry")
cur.execute("CREATE TABLE geometry(wkt TEXT NOT NULL)")
cur.execute("INSERT INTO geometry VALUES(?)", (geometry_as_wkt,))
cur.execute(
"CREATE INDEX frame_id_idx ON trajectory_data(frame, id)"
)
cur.execute("COMMIT")
except sqlite3.Error as e:
cur.execute("ROLLBACK")
raise TrajectoryWriter.Exception(f"Error creating database: {e}")
def write_iteration_state(self, simulation: Simulation) -> None:
"""Write trajectory data of one simulation iteration.
This method is intended to handle serialization of the trajectory data
of a single iteration.
"""
if not self._con:
raise TrajectoryWriter.Exception("Database not opened.")
cur = self._con.cursor()
try:
cur.execute("BEGIN")
frame_data = [
(
self._frame,
agent.id,
agent.position[0],
agent.position[1],
agent.orientation[0],
agent.orientation[1],
)
for agent in simulation.agents()
]
cur.executemany(
"INSERT INTO trajectory_data VALUES(?, ?, ?, ?, ?, ?)",
frame_data,
)
cur.execute("COMMIT")
except sqlite3.Error as e:
cur.execute("ROLLBACK")
raise TrajectoryWriter.Exception(f"Error writing to database: {e}")
self._frame += 1
def end_writing(self) -> None:
"""End writing trajectory data.
This method is intended to handle finalizing writing of trajectory
data, e.g. write closing tags, or footer meta data.
"""
if not self._con:
raise TrajectoryWriter.Exception("Database not opened.")
cur = self._con.cursor()
try:
cur.execute("BEGIN")
res = cur.execute(
"SELECT MIN(pos_x), MAX(pos_x), MIN(pos_y), MAX(pos_y) FROM trajectory_data"
)
xmin, xmax, ymin, ymax = res.fetchone()
cur.execute(
"INSERT INTO metadata(key, value) VALUES(?,?)",
("xmin", str(xmin)),
)
cur.execute(
"INSERT INTO metadata(key, value) VALUES(?,?)",
("xmax", str(xmax)),
)
cur.execute(
"INSERT INTO metadata(key, value) VALUES(?,?)",
("ymin", str(ymin)),
)
cur.execute(
"INSERT INTO metadata(key, value) VALUES(?,?)",
("ymax", str(ymax)),
)
cur.execute("COMMIT")
except sqlite3.Error as e:
cur.execute("ROLLBACK")
raise TrajectoryWriter.Exception(f"Error writing to database: {e}")
self._con.close()
def METHOD_NAME(self) -> sqlite3.Connection:
return self._con |
5,203 | test order line remove by app | from unittest.mock import patch
import graphene
import pytest
from django.db.models import Sum
from .....order import OrderStatus
from .....order import events as order_events
from .....order.models import OrderEvent
from .....warehouse.models import Stock
from ....tests.utils import assert_no_permission, get_graphql_content
from ..utils import assert_proper_webhook_called_once
ORDER_LINE_DELETE_MUTATION = """
mutation OrderLineDelete($id: ID!) {
orderLineDelete(id: $id) {
errors {
field
message
}
orderLine {
id
}
order {
id
total{
gross{
currency
amount
}
net {
currency
amount
}
}
}
}
}
"""
@patch("saleor.plugins.manager.PluginsManager.product_variant_back_in_stock")
def test_order_line_remove_with_back_in_stock_webhook(
back_in_stock_webhook_mock,
order_with_lines,
permission_group_manage_orders,
staff_api_client,
):
permission_group_manage_orders.user_set.add(staff_api_client.user)
Stock.objects.update(quantity=3)
first_stock = Stock.objects.first()
assert (
first_stock.quantity
- (
first_stock.allocations.aggregate(Sum("quantity_allocated"))[
"quantity_allocated__sum"
]
or 0
)
) == 0
query = ORDER_LINE_DELETE_MUTATION
order = order_with_lines
order.status = OrderStatus.UNCONFIRMED
order.save(update_fields=["status"])
line = order.lines.first()
line_id = graphene.Node.to_global_id("OrderLine", line.id)
variables = {"id": line_id}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["orderLineDelete"]
assert OrderEvent.objects.count() == 1
assert OrderEvent.objects.last().type == order_events.OrderEvents.REMOVED_PRODUCTS
assert data["orderLine"]["id"] == line_id
assert line not in order.lines.all()
first_stock.refresh_from_db()
assert (
first_stock.quantity
- (
first_stock.allocations.aggregate(Sum("quantity_allocated"))[
"quantity_allocated__sum"
]
or 0
)
) == 3
back_in_stock_webhook_mock.assert_called_once_with(Stock.objects.first())
@pytest.mark.parametrize("status", (OrderStatus.DRAFT, OrderStatus.UNCONFIRMED))
@patch("saleor.plugins.manager.PluginsManager.draft_order_updated")
@patch("saleor.plugins.manager.PluginsManager.order_updated")
def test_order_line_remove(
order_updated_webhook_mock,
draft_order_updated_webhook_mock,
status,
order_with_lines,
permission_group_manage_orders,
staff_api_client,
):
query = ORDER_LINE_DELETE_MUTATION
permission_group_manage_orders.user_set.add(staff_api_client.user)
order = order_with_lines
order.status = status
order.save(update_fields=["status"])
line = order.lines.first()
line_id = graphene.Node.to_global_id("OrderLine", line.id)
variables = {"id": line_id}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["orderLineDelete"]
assert OrderEvent.objects.count() == 1
assert OrderEvent.objects.last().type == order_events.OrderEvents.REMOVED_PRODUCTS
assert data["orderLine"]["id"] == line_id
assert line not in order.lines.all()
assert_proper_webhook_called_once(
order, status, draft_order_updated_webhook_mock, order_updated_webhook_mock
)
def test_order_line_remove_by_usr_no_channel_access(
order_with_lines,
permission_group_all_perms_channel_USD_only,
staff_api_client,
channel_PLN,
):
# given
query = ORDER_LINE_DELETE_MUTATION
permission_group_all_perms_channel_USD_only.user_set.add(staff_api_client.user)
order = order_with_lines
order.channel = channel_PLN
order.status = OrderStatus.UNCONFIRMED
order.save(update_fields=["status", "channel"])
line = order.lines.first()
line_id = graphene.Node.to_global_id("OrderLine", line.id)
variables = {"id": line_id}
# when
response = staff_api_client.post_graphql(query, variables)
# then
assert_no_permission(response)
@patch("saleor.plugins.manager.PluginsManager.draft_order_updated")
@patch("saleor.plugins.manager.PluginsManager.order_updated")
def METHOD_NAME(
order_updated_webhook_mock,
draft_order_updated_webhook_mock,
order_with_lines,
permission_manage_orders,
app_api_client,
channel_PLN,
):
# given
query = ORDER_LINE_DELETE_MUTATION
order = order_with_lines
order.channel = channel_PLN
order.status = OrderStatus.UNCONFIRMED
order.save(update_fields=["status", "channel"])
line = order.lines.first()
line_id = graphene.Node.to_global_id("OrderLine", line.id)
variables = {"id": line_id}
# when
response = app_api_client.post_graphql(
query, variables, permissions=(permission_manage_orders,)
)
# then
content = get_graphql_content(response)
data = content["data"]["orderLineDelete"]
assert OrderEvent.objects.count() == 1
assert OrderEvent.objects.last().type == order_events.OrderEvents.REMOVED_PRODUCTS
assert data["orderLine"]["id"] == line_id
assert line not in order.lines.all()
assert_proper_webhook_called_once(
order,
OrderStatus.UNCONFIRMED,
draft_order_updated_webhook_mock,
order_updated_webhook_mock,
)
@patch("saleor.plugins.manager.PluginsManager.draft_order_updated")
@patch("saleor.plugins.manager.PluginsManager.order_updated")
def test_invalid_order_when_removing_lines(
order_update_webhook_mock,
draft_order_update_webhook_mock,
staff_api_client,
order_with_lines,
permission_group_manage_orders,
):
query = ORDER_LINE_DELETE_MUTATION
permission_group_manage_orders.user_set.add(staff_api_client.user)
order = order_with_lines
line = order.lines.first()
line_id = graphene.Node.to_global_id("OrderLine", line.id)
variables = {"id": line_id}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["orderLineDelete"]
assert data["errors"]
order_update_webhook_mock.assert_not_called()
draft_order_update_webhook_mock.assert_not_called()
def test_draft_order_properly_recalculate_total_after_shipping_product_removed(
staff_api_client,
draft_order,
permission_group_manage_orders,
):
permission_group_manage_orders.user_set.add(staff_api_client.user)
order = draft_order
line = order.lines.get(product_sku="SKU_AA")
line.is_shipping_required = True
line.save()
query = ORDER_LINE_DELETE_MUTATION
line_2 = order.lines.get(product_sku="SKU_B")
line_2_id = graphene.Node.to_global_id("OrderLine", line_2.id)
variables = {"id": line_2_id}
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["orderLineDelete"]
order.refresh_from_db()
assert data["order"]["total"]["net"]["amount"] == float(
line.total_price_net_amount
) + float(order.shipping_price_net_amount) |
5,204 | graph | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
from PyQt5 import QtCore, QtWidgets
from peacock.PostprocessorViewer.plugins.LineSettingsWidget import main
from peacock.utils import Testing
class TestLineSettingsWidget(Testing.PeacockImageTestCase):
"""
Test class for the LineSettingsWidget.
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
def setUp(self):
"""
Creates the GUI containing the ArtistToggleWidget and the matplotlib figure axes.
"""
# Create widgets
self._widget, self._toggle, self._window = main('data')
# Create plotting function
def METHOD_NAME():
settings = self._toggle.settings()
ax = self._window.axes()[settings.pop('axis')]
self._window.clear()
if self._toggle.isValid():
ax.plot([0,1,2,4], [0,1,4,16], **settings)
self._window.draw()
self._toggle.clicked.connect(METHOD_NAME)
def click(self):
"""
Enable the plot.
"""
self._toggle.CheckBox.setCheckState(QtCore.Qt.Checked)
self._toggle.CheckBox.clicked.emit()
def testEmpty(self):
"""
Test that an empty plot is possible.
"""
# Test for empty plot
self.assertImage('testEmpty.png')
# Test that controls are disabled
self.assertFalse(self._toggle.ColorButton.isEnabled(), "ColorButton should be disabled.")
self.assertFalse(self._toggle.PlotAxis.isEnabled(), "PlotAxis should be disabled.")
self.assertFalse(self._toggle.LineStyle.isEnabled(), "LineStyle should be disabled.")
self.assertFalse(self._toggle.LineWidth.isEnabled(), "LineWidth should be disabled.")
self.assertFalse(self._toggle.MarkerStyle.isEnabled(), "MarkerStyle should be disabled.")
self.assertFalse(self._toggle.MarkerSize.isEnabled(), "MarkerSize should be disabled.")
self.assertIn('rgb(0, 0, 255, 255)', str(self._toggle.ColorButton.styleSheet()))
def testToggle(self):
"""
Test that a line appears.
"""
self.click()
self.assertImage('testToggle.png')
def testColor(self):
"""
Test that that line color can be changed.
Note: This doesn't use the QColorDialog (it is assumed that this is working correctly)
"""
self.click()
self._toggle._settings['color'] = [1,0,0]
self._toggle.update()
self.assertImage('testColor.png')
def testLineStyle(self):
"""
Test that line style toggle is working.
"""
self.click()
self._toggle.LineStyle.setCurrentIndex(3)
self.assertImage('testLineStyle.png')
def testLineWidth(self):
"""
Test that line width toggle is working.
"""
self.click()
self._toggle.LineWidth.setValue(6)
self.assertImage('testLineWidth.png')
def testMarkerStyleSize(self):
"""
Test that markers style toggle is working.
"""
self.click()
# Disable line
self._toggle.LineStyle.setCurrentIndex(4)
self.assertFalse(self._toggle.LineWidth.isEnabled(), "LineWidth should be disabled.")
self._toggle.MarkerStyle.setCurrentIndex(2)
self._toggle.MarkerSize.setValue(12)
self.assertTrue(self._toggle.MarkerSize.isEnabled(), "MarkerSize should be enabled.")
self.assertImage('testMarkerStyleSize.png')
def testRepr(self):
"""
Test that repr() method is working.
"""
self.click()
output, imports = self._toggle.repr()
exact = "axes0.plot(x, y, label='data', linestyle='-', linewidth=1, color=[0, 0, 1], marker='', markersize=1)"
self.assertEqual(output[1], exact)
if __name__ == '__main__':
import unittest
unittest.main(module=__name__, verbosity=2) |
5,205 | create thing | # This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import re
import time
import traceback
import pytest
from hypothesis import (
HealthCheck,
assume,
event,
example,
given,
settings,
stateful,
strategies as st,
target,
)
from hypothesis.statistics import collector, describe_statistics
def call_for_statistics(test_function):
result = []
with collector.with_value(result.append):
try:
test_function()
except Exception:
traceback.print_exc()
assert len(result) == 1, result
return result[0]
def unique_events(stats):
return set(sum((t["events"] for t in stats["generate-phase"]["test-cases"]), []))
def test_notes_hard_to_satisfy():
@given(st.integers())
@settings(suppress_health_check=list(HealthCheck))
def test(i):
assume(i == 13)
stats = call_for_statistics(test)
assert "satisfied assumptions" in stats["stopped-because"]
def test_can_callback_with_a_string():
@given(st.integers())
def test(i):
event("hi")
stats = call_for_statistics(test)
assert any("hi" in s for s in unique_events(stats))
counter = 0
seen = []
class Foo:
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __hash__(self):
return 0
def __str__(self):
seen.append(self)
global counter
counter += 1
return f"COUNTER {counter}"
def test_formats_are_evaluated_only_once():
global counter
counter = 0
@given(st.integers())
def test(i):
event(Foo())
stats = call_for_statistics(test)
assert "COUNTER 1" in unique_events(stats)
assert "COUNTER 2" not in unique_events(stats)
def test_does_not_report_on_examples():
@example("hi")
@given(st.integers())
def test(i):
if isinstance(i, str):
event("boo")
stats = call_for_statistics(test)
assert not unique_events(stats)
def test_exact_timing():
@settings(suppress_health_check=[HealthCheck.too_slow], deadline=None)
@given(st.integers())
def test(i):
time.sleep(0.5)
stats = describe_statistics(call_for_statistics(test))
assert "~ 529ms" in stats
def test_apparently_instantaneous_tests():
time.freeze()
@given(st.integers())
def test(i):
pass
stats = describe_statistics(call_for_statistics(test))
assert "< 1ms" in stats
def test_flaky_exit():
first = [True]
@settings(derandomize=True)
@given(st.integers())
def test(i):
if i > 1001:
if first[0]:
first[0] = False
print("Hi")
raise AssertionError
stats = call_for_statistics(test)
assert stats["stopped-because"] == "test was flaky"
@pytest.mark.parametrize("draw_delay", [False, True])
@pytest.mark.parametrize("test_delay", [False, True])
def test_draw_timing(draw_delay, test_delay):
time.freeze()
@st.composite
def s(draw):
if draw_delay:
time.sleep(0.05)
draw(st.integers())
@given(s())
def test(_):
if test_delay:
time.sleep(0.05)
stats = describe_statistics(call_for_statistics(test))
if not draw_delay:
assert "< 1ms" in stats
else:
match = re.search(r"of which ~ (?P<gentime>\d+)", stats)
assert 49 <= int(match.group("gentime")) <= 51
def test_has_lambdas_in_output():
@settings(max_examples=100, database=None)
@given(st.integers().filter(lambda x: x % 2 == 0))
def test(i):
pass
stats = call_for_statistics(test)
assert any("lambda x: x % 2 == 0" in e for e in unique_events(stats))
def test_stops_after_x_shrinks(monkeypatch):
# the max_shrinks argument is deprecated, but we still stop after some
# number - which we can reduce to zero to check that this works.
from hypothesis.internal.conjecture import engine
monkeypatch.setattr(engine, "MAX_SHRINKS", 0)
@given(st.integers(min_value=0))
def test(n):
assert n < 10
stats = call_for_statistics(test)
assert "shrunk example" in stats["stopped-because"]
def test_stateful_states_are_deduped():
class DemoStateMachine(stateful.RuleBasedStateMachine):
Stuff = stateful.Bundle("stuff")
@stateful.rule(target=Stuff, name=st.text())
def create_stuff(self, name):
return name
@stateful.rule(item=Stuff)
def do(self, item):
return
stats = call_for_statistics(DemoStateMachine.TestCase().runTest)
assert len(unique_events(stats)) <= 2
def test_stateful_with_one_of_bundles_states_are_deduped():
class DemoStateMachine(stateful.RuleBasedStateMachine):
Things = stateful.Bundle("things")
Stuff = stateful.Bundle("stuff")
StuffAndThings = Things | Stuff
@stateful.rule(target=Things, name=st.text())
def METHOD_NAME(self, name):
return name
@stateful.rule(target=Stuff, name=st.text())
def create_stuff(self, name):
return name
@stateful.rule(item=StuffAndThings)
def do(self, item):
return
stats = call_for_statistics(DemoStateMachine.TestCase().runTest)
assert len(unique_events(stats)) <= 4
def test_statistics_for_threshold_problem():
@settings(max_examples=100)
@given(st.floats(min_value=0, allow_infinity=False))
def threshold(error):
target(error, label="error")
assert error <= 10
target(0.0, label="never in failing example")
stats = call_for_statistics(threshold)
assert " - Highest target scores:" in describe_statistics(stats)
assert "never in failing example" in describe_statistics(stats)
# Check that we report far-from-threshold failing examples
assert stats["targets"]["error"] > 10
def test_statistics_with_events_and_target():
@given(st.sampled_from("1234"))
def test(value):
event(value)
target(float(value), label="a target")
stats = describe_statistics(call_for_statistics(test))
assert "- Events:" in stats
assert "- Highest target score: " in stats
@given(st.booleans())
def test_event_with_non_weakrefable_keys(b):
event((b,)) |
5,206 | test remove composite map | """
Test Composite Map
"""
import numpy as np
import pytest
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
import sunpy.data.test
import sunpy.map
from sunpy.tests.helpers import figure_test
# Ignore missing metadata warnings
pytestmark = [pytest.mark.filterwarnings('ignore:Missing metadata for observer'),
pytest.mark.filterwarnings(r'ignore:Unable to treat `\.meta` as a FITS header')]
@pytest.fixture
def composite_test_map(aia171_test_map, hmi_test_map):
# The test maps have wildly different observation times, which throws off compositing
hmi_test_map.meta['date-obs'] = aia171_test_map.meta['date-obs']
# Also set the HMI observer location to be the same as the AIA observer location
del hmi_test_map.meta['crln_obs']
del hmi_test_map.meta['crlt_obs']
hmi_test_map.meta['hgln_obs'] = aia171_test_map.observer_coordinate.lon.to_value('deg')
hmi_test_map.meta['hglt_obs'] = aia171_test_map.observer_coordinate.lat.to_value('deg')
return sunpy.map.Map(aia171_test_map, hmi_test_map, composite=True)
def test_type_of_arguments_composite_map(composite_test_map):
with pytest.raises(ValueError) as excinfo:
sunpy.map.CompositeMap(23, composite=True)
assert str(excinfo.value) == 'CompositeMap expects pre-constructed map objects.'
@figure_test
def test_plot_composite_map(composite_test_map):
composite_test_map.plot()
@figure_test
def test_plot_composite_map_contours(composite_test_map):
composite_test_map.set_levels(1, np.arange(-75, 76, 25) << u.percent)
composite_test_map.plot()
@figure_test
def test_plot_composite_map_linewidths(composite_test_map):
composite_test_map.set_levels(1, np.arange(-75, 76, 25) << u.percent)
composite_test_map.plot(linewidths=0.5)
@figure_test
def test_plot_composite_map_linestyles(composite_test_map):
composite_test_map.set_levels(1, np.arange(-75, 76, 25) << u.percent)
composite_test_map.plot(linestyles='--')
@figure_test
def test_plot_composite_map_colors(composite_test_map):
composite_test_map.set_levels(1, np.arange(-75, 76, 25) << u.percent)
composite_test_map.plot(colors='red')
def test_plot_composite_map_mplkwargs(composite_test_map):
composite_test_map.set_levels(1, np.arange(-75, 76, 25) << u.percent)
with pytest.raises(TypeError) as e:
composite_test_map.plot(linestyles='--', unused_a=1, unused_b=2)
assert 'plot() got unexpected keyword arguments' in str(e.value)
assert 'unused_a' in str(e.value)
assert 'unused_b' in str(e.value)
assert 'linestyles' not in str(e.value)
def METHOD_NAME(composite_test_map):
composite_test_map.remove_map(0)
with pytest.raises(IndexError):
composite_test_map.get_map(1)
def test_get_composite_map(composite_test_map, aia171_test_map, hmi_test_map):
assert composite_test_map.get_map(0) == aia171_test_map
assert composite_test_map.get_map(1) == hmi_test_map
def test_get_alpha_composite_map(composite_test_map, aia171_test_map, hmi_test_map):
assert composite_test_map.get_alpha() == [aia171_test_map.alpha, hmi_test_map.alpha]
def test_get_alpha_with_index_composite_map(composite_test_map, aia171_test_map, hmi_test_map):
assert composite_test_map.get_alpha(0) == aia171_test_map.alpha
assert composite_test_map.get_alpha(1) == hmi_test_map.alpha
def test_get_levels_composite_map(composite_test_map, aia171_test_map, hmi_test_map):
assert composite_test_map.get_levels() == [aia171_test_map.levels, hmi_test_map.levels]
def test_get_levels_with_index_composite_map(composite_test_map, aia171_test_map, hmi_test_map):
assert composite_test_map.get_levels(0) == aia171_test_map.levels
assert composite_test_map.get_levels(1) == hmi_test_map.levels
@figure_test
def test_set_alpha_composite_map(composite_test_map):
composite_test_map.set_alpha(1, 0.5)
composite_test_map.plot()
@pytest.mark.parametrize(('index', 'alpha'), [(0, 5.0), (1, -3.0)])
def test_set_alpha_out_of_range_composite_map(composite_test_map, index, alpha):
with pytest.raises(Exception) as excinfo:
composite_test_map.set_alpha(index, alpha)
assert str(excinfo.value) == 'Alpha value must be between 0 and 1.'
def test_set_levels_percent(composite_test_map):
numbers = np.arange(10, 100, 10)
composite_test_map.set_levels(0, numbers)
np.testing.assert_allclose(composite_test_map.get_levels(0), numbers)
implicit_percentage = np.arange(10, 100, 10)
composite_test_map.set_levels(0, implicit_percentage, percent=True)
assert_quantity_allclose(composite_test_map.get_levels(0), implicit_percentage << u.percent)
@figure_test
def test_peek_composite_map(composite_test_map):
composite_test_map.peek() |
5,207 | trigger | from __future__ import annotations
import weakref
from functools import wraps
from typing import Callable, TypeVar, cast
# See https://mypy.readthedocs.io/en/stable/generics.html#declaring-decorators
TCallable = TypeVar("TCallable", bound=Callable)
__events = {}
__disabled_events = set()
__all__ = ["Callables", "disable", "enable", "noop", "register", "subscribe", "trigger"]
def __dir__():
return __all__
def noop(*args, **kwargs):
pass
class Callables:
def __init__(self):
self._callbacks = []
@property
def callbacks(self):
"""
Get the current list of living callbacks.
"""
self._flush()
return self._callbacks
def append(self, callback):
"""
Append a new bound method as a callback to the list of callables.
"""
try:
# methods
callback_ref = weakref.ref(callback.__func__), weakref.ref(
callback.__self__
)
except AttributeError:
callback_ref = weakref.ref(callback), None
self._callbacks.append(callback_ref)
def _flush(self):
"""
Flush the list of callbacks with those who are weakly-referencing deleted objects.
Note: must interact with the self._callbacks directly, and not
self.callbacks, to avoid infinite recursion.
"""
_callbacks = []
for func, arg in self._callbacks:
if arg is not None:
arg_ref = arg()
if arg_ref is None:
continue
_callbacks.append((func, arg))
self._callbacks = _callbacks
def __call__(self, *args, **kwargs):
for func, arg in self._callbacks:
# weakref: needs to be de-ref'd first before calling
if arg is not None:
arg_ref = arg()
if arg_ref is not None:
func()(arg_ref, *args, **kwargs)
else:
func()(*args, **kwargs)
# Flush after calling all the callbacks, not before, as callbacks in the
# beginning of the iteration might cause new dead arg weakrefs in
# callbacks that are iterated over later.
# Checking for dead weakrefs in each iteration and flushing at the end
# avoids redundant dead weakref checking in subsequent calls.
self._flush()
def __iter__(self):
return iter(self.callbacks)
def __getitem__(self, index):
return self.callbacks[index]
def __len__(self):
return len(self.callbacks)
def __repr__(self):
return f"Callables({self.callbacks})"
def subscribe(event: str):
"""
Subscribe a function or object method as a callback to an event.
.. note::
This is meant to be used as a decorator.
Args:
event (:obj:`str`): The name of the event to subscribe to.
Returns:
:obj:`function`: Decorated function.
Example:
>>> import pyhf
>>> @pyhf.events.subscribe("myevent")
... def test(a, b):
... print(a + b)
...
>>> pyhf.events.trigger("myevent")(1, 2)
3
"""
global __events
def __decorator(func: TCallable) -> TCallable:
__events.setdefault(event, Callables()).append(func)
return func
return cast(TCallable, __decorator)
def register(event: str) -> Callable[[TCallable], TCallable]:
"""
Register a function or object method to trigger an event. This creates two
events: ``{event_name}::before`` and ``{event_name}::after``.
.. note::
This is meant to be used as a decorator.
Args:
event (:obj:`str`): The name of the event to subscribe to.
Returns:
:obj:`function`: Decorated function.
Example:
>>> import pyhf
>>> @pyhf.events.register("test_func")
... def test(a, b):
... print(a + b)
...
>>> @pyhf.events.subscribe("test_func::before")
... def precall():
... print("before call")
...
>>> @pyhf.events.subscribe("test_func::after")
... def postcall():
... print("after call")
...
>>> test(1, 2)
before call
3
after call
"""
def _register(func: TCallable) -> TCallable:
@wraps(func)
def register_wrapper(*args, **kwargs): # type: ignore
METHOD_NAME(f"{event:s}::before")()
result = func(*args, **kwargs)
METHOD_NAME(f"{event:s}::after")()
return result
return register_wrapper
return cast(TCallable, _register)
def METHOD_NAME(event: str) -> Callables:
"""
Trigger an event if not disabled.
"""
global __events, __disabled_events, noop
is_noop = bool(event in __disabled_events or event not in __events)
return noop if is_noop else __events.get(event)
def disable(event: str):
"""
Disable an event from firing.
"""
global __disabled_events
__disabled_events.add(event)
def enable(event: str):
"""
Enable an event to be fired if disabled.
"""
global __disabled_events
__disabled_events.remove(event) |
5,208 | test train scores | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import sys
import unittest
# noinspection PyProtectedMember
from numpy.testing import assert_allclose
from numpy.testing import assert_array_less
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from sklearn.base import clone
from scipy.stats import rankdata
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pyod.models.sos import SOS
from pyod.utils.data import generate_data
# TODO: check performance is turned off due to poor performance.
class TestSOS(unittest.TestCase):
def setUp(self):
self.n_train = 200
self.n_test = 100
self.contamination = 0.1
# self.roc_floor = 0.8
self.X_train, self.X_test, self.y_train, self.y_test = generate_data(
n_train=self.n_train, n_test=self.n_test, n_features=5,
contamination=self.contamination, random_state=42)
self.clf = SOS(contamination=self.contamination)
self.clf.fit(self.X_train)
def test_parameters(self):
assert (hasattr(self.clf, 'decision_scores_') and
self.clf.decision_scores_ is not None)
assert (hasattr(self.clf, 'labels_') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'threshold_') and
self.clf.threshold_ is not None)
def METHOD_NAME(self):
assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])
def test_prediction_scores(self):
pred_scores = self.clf.decision_function(self.X_test)
# check score shapes
assert_equal(pred_scores.shape[0], self.X_test.shape[0])
# check performance
# assert (roc_auc_score(self.y_test, pred_scores) >= self.roc_floor)
def test_prediction_labels(self):
pred_labels = self.clf.predict(self.X_test)
assert_equal(pred_labels.shape, self.y_test.shape)
def test_prediction_proba(self):
pred_proba = self.clf.predict_proba(self.X_test)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_prediction_proba_linear(self):
pred_proba = self.clf.predict_proba(self.X_test, method='linear')
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_prediction_proba_unify(self):
pred_proba = self.clf.predict_proba(self.X_test, method='unify')
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_prediction_proba_parameter(self):
with assert_raises(ValueError):
self.clf.predict_proba(self.X_test, method='something')
def test_prediction_labels_confidence(self):
pred_labels, confidence = self.clf.predict(self.X_test,
return_confidence=True)
assert_equal(pred_labels.shape, self.y_test.shape)
assert_equal(confidence.shape, self.y_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)
def test_prediction_proba_linear_confidence(self):
pred_proba, confidence = self.clf.predict_proba(self.X_test,
method='linear',
return_confidence=True)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
assert_equal(confidence.shape, self.y_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)
def test_fit_predict(self):
pred_labels = self.clf.fit_predict(self.X_train)
assert_equal(pred_labels.shape, self.y_train.shape)
def test_fit_predict_score(self):
self.clf.fit_predict_score(self.X_test, self.y_test)
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='roc_auc_score')
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='prc_n_score')
with assert_raises(NotImplementedError):
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='something')
def test_predict_rank(self):
pred_socres = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), atol=2)
assert_array_less(pred_ranks, self.X_train.shape[0] + 1)
assert_array_less(-0.1, pred_ranks)
def test_predict_rank_normalized(self):
pred_socres = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test, normalized=True)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), atol=2)
assert_array_less(pred_ranks, 1.01)
assert_array_less(-0.1, pred_ranks)
def test_model_clone(self):
clone_clf = clone(self.clf)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main() |
5,209 | apply fn | # Copyright (c) Facebook, Inc. and its affiliates.
import collections
import collections.abc
import copy
import warnings
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from mmf.common.sample import detach_tensor, SampleList
class Report(OrderedDict):
def __init__(
self, batch: SampleList = None, model_output: Dict[str, Any] = None, *args
):
super().__init__(self)
if batch is None:
return
if model_output is None:
model_output = {}
if self._check_and_load_tuple(batch):
return
all_args = [batch, model_output] + [*args]
for idx, arg in enumerate(all_args):
if not isinstance(arg, collections.abc.Mapping):
raise TypeError(
"Argument {:d}, {} must be of instance of "
"collections.abc.Mapping".format(idx, arg)
)
self.batch_size = batch.get_batch_size()
self.warning_string = (
"Updating forward report with key {}"
"{}, but it already exists in {}. "
"Please consider using a different key, "
"as this can cause issues during loss and "
"metric calculations."
)
for idx, arg in enumerate(all_args):
for key, item in arg.items():
if key in self and idx >= 2:
log = self.warning_string.format(
key, "", "in previous arguments to report"
)
warnings.warn(log)
self[key] = item
def get_batch_size(self) -> int:
return self.batch_size
@property
def batch_size(self) -> int:
return self._batch_size
@batch_size.setter
def batch_size(self, batch_size: int):
self._batch_size = batch_size
def _check_and_load_tuple(self, batch):
if isinstance(batch, collections.abc.Mapping):
return False
if isinstance(batch[0], (tuple, list)) and isinstance(batch[0][0], str):
for kv_pair in batch:
self[kv_pair[0]] = kv_pair[1]
return True
else:
return False
def __setattr__(self, key: str, value: Any):
self[key] = value
def __getattr__(self, key: str):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def fields(self) -> List[str]:
return list(self.keys())
def METHOD_NAME(self, fn: Callable, fields: Optional[List[str]] = None):
"""Applies a function `fn` on all items in a report. Can apply to specific
fields if `fields` parameter is passed
Args:
fn (Callable): A callable to called on each item in report
fields (List[str], optional): Use to apply on specific fields.
Defaults to None.
Returns:
Report: Update report after apply fn
"""
for key in self.keys():
if fields is not None and isinstance(fields, (list, tuple)):
if key not in fields:
continue
self[key] = fn(self[key])
if isinstance(self[key], collections.abc.MutableSequence):
for idx, item in enumerate(self[key]):
self[key][idx] = fn(item)
elif isinstance(self[key], dict):
for subkey in self[key].keys():
self[key][subkey] = fn(self[key][subkey])
return self
def detach(self) -> "Report":
"""Similar to tensor.detach, detach all items in a report from their graphs.
This is useful in clearing up memory sometimes.
Returns:
Report: Detached report is returned back.
"""
return self.METHOD_NAME(detach_tensor)
def to(
self,
device: Union[torch.device, str],
non_blocking: bool = True,
fields: Optional[List[str]] = None,
):
"""Move report to a specific device defined 'device' parameter.
This is similar to how one moves a tensor or sample_list to a device
Args:
device (torch.device): Device can be str defining device or torch.device
non_blocking (bool, optional): Whether transfer should be non_blocking.
Defaults to True.
fields (List[str], optional): Use this is you only want to move some
specific fields to the device instead of full report. Defaults to None.
Raises:
TypeError: If device type is not correct
Returns:
Report: Updated report is returned back
"""
if not isinstance(device, torch.device):
if not isinstance(device, str):
raise TypeError(
"device must be either 'str' or "
"'torch.device' type, {} found".format(type(device))
)
device = torch.device(device)
def fn(x):
if hasattr(x, "to"):
x = x.to(device, non_blocking=non_blocking)
return x
return self.METHOD_NAME(fn, fields)
def accumulate_tensor_fields_and_loss(
self, report: "Report", field_list: List[str]
):
for key in field_list:
if key == "__prediction_report__":
continue
if key not in self.keys():
warnings.warn(
f"{key} not found in report. Metrics calculation "
+ "might not work as expected."
)
continue
if isinstance(self[key], torch.Tensor):
self[key] = torch.cat((self[key], report[key]), dim=0)
elif isinstance(self[key], List):
self[key].extend(report[key])
self._accumulate_loss(report)
def _accumulate_loss(self, report: "Report"):
for key, value in report.losses.items():
if key not in self.losses.keys():
warnings.warn(
f"{key} not found in report. Loss calculation "
+ "might not work as expected."
)
self.losses[key] = value
if isinstance(self.losses[key], torch.Tensor):
self.losses[key] += value
def copy(self) -> "Report":
"""Get a copy of the current Report
Returns:
Report: Copy of current Report.
"""
report = Report()
fields = self.fields()
for field in fields:
report[field] = copy.deepcopy(self[field])
return report |
5,210 | write all | import os
import logging
import ruamel.yaml
from a2ml.api.utils.config_yaml import ConfigYaml
from a2ml.api.utils import fsclient
log = logging.getLogger("a2ml")
'''Config to serialize pass to server side deserialize, then pass back and save on CLI side'''
class SerializableConfigYaml(ConfigYaml):
#For pickle serialization
def __getstate__(self):
return {
'filename': self.filename,
'yaml': ruamel.yaml.YAML(typ='rt').dump(self.yaml)
}
def __setstate__(self, state):
self.filename = state['filename']
self.yaml = ruamel.yaml.YAML(typ='rt').load(state['yaml'])
def write(self, filename=None, client_side=True):
if client_side:
super().write(filename)
class ConfigParts(object):
def __init__(self):
self.parts = {}
self.part_names = ['config', 'auger', 'azure', 'google']
self.is_loaded = False
def load(self, path=None):
if path is None:
path = os.getcwd()
for pname in self.part_names:
filename = os.path.join(path, '%s.yaml' % pname)
if not fsclient.is_s3_path(filename):
filename = os.path.abspath(filename)
if fsclient.is_file_exists(filename):
self.parts[pname] = self._load(filename)
self.is_loaded = True
def ismultipart(self):
return(len(self.parts.keys()) > 1)
def keys(self):
return self.parts.keys()
def part(self, name, create_if_not_exist=False):
if name not in self.parts:
if create_if_not_exist:
self.parts[name] = SerializableConfigYaml()
else:
return SerializableConfigYaml()
return self.parts[name]
def _load(self, name):
part = SerializableConfigYaml()
part.load_from_file(name)
return part
class Config(object):
def __init__(self, name='config', path=None):
super(Config, self).__init__()
self.runs_on_server = False
self.name = name
self.path = path
self.parts = ConfigParts()
self.parts_changes = ConfigParts()
self.load(path)
def get(self, path, default=None, config_name=None, parts=None):
if parts is None:
parts = self.parts
if len(parts.keys()) == 0:
return default
if 'config' in parts.keys():
default = parts.part('config').get(path, default)
if not config_name:
config_name = self.name
return parts.part(config_name).get(path, default)
def get_list(self, path, default=None, config_name=None):
data = self.get(path, default, config_name)
res = data
if data:
if isinstance(data, str):
res = data.split(",")
res = [item.strip() for item in res]
else:
res = list(data)
return res
def get_path(self):
path = self.path
if path is None:
path = os.getcwd()
return path
def get_model_path(self, model_id):
return os.path.join(self.get_path(), 'models', model_id)
def set(self, path, value, config_name=None):
if not config_name:
config_name = self.name
self.parts.part(config_name, True).set(path, value)
if self.runs_on_server:
self.parts_changes.part(config_name, True).set(path, value)
def clean_changes(self):
self.parts_changes = ConfigParts()
def remove(self, path, config_name=None):
if not config_name:
config_name = self.name
self.parts.part(config_name).remove(path)
def write(self, config_name=None):
if not config_name:
config_name = self.name
self.parts.part(config_name).write(client_side=not self.runs_on_server)
def METHOD_NAME(self):
for part_name in self.parts.parts.keys():
self.write(part_name)
def ismultipart(self):
return self.parts.ismultipart()
def load(self, path=None, reload=False):
if (not self.parts.is_loaded) or reload:
self.parts.load(path)
return self
def set_validation_source(self, validation_source):
self.set('experiment/validation_source', validation_source)
self.remove('experiment/validation_dataset' |
5,211 | add update | ##############################################################
# Copyright 2023 Lawrence Livermore National Security, LLC
# (c.f. AUTHORS, NOTICE.LLNS, COPYING)
#
# This file is part of the Flux resource manager framework.
# For details, see https://github.com/flux-framework.
#
# SPDX-License-Identifier: LGPL-3.0
##############################################################
import argparse
import json
import logging
import math
import sys
import flux
import flux.job
import flux.util
LOGGER = logging.getLogger("flux-update")
class JobspecUpdates:
"""
Convenience class for building a jobspec-update payload from a
set of KEY=VALUE pairs on the command line, and a method to send
the update as a request to the job manager.
"""
# Mapping of short key names, i.e. as given on the command line,
# to full dotted-path location in jobspec.
#
# Note: If a key doesn't exist in this mapping, but also does not start
# with 'attributes.', 'resources.' or 'tasks.', then 'attributes.system'
# is assumed.
#
key_aliases = {"name": "attributes.system.job.name"}
def __init__(self, jobid, flux_handle=None):
self._flux_handle = flux_handle
self.jobid = jobid
self.updates = None
self.jobspec = None
@property
def flux_handle(self):
if self._flux_handle is None:
self._flux_handle = flux.Flux()
return self._flux_handle
def _apply_jobspec_updates(self, eventlog):
"""
Apply jobspec updates from eventlog to internal jobspec:
"""
for entry in eventlog.splitlines():
event = flux.job.EventLogEvent(entry)
if event.name == "jobspec-update":
for key, value in event.context.items():
self.jobspec.setattr(key, value)
def _fetch_jobspec(self, key):
"""
Fetch dotted key 'key' in jobspec for this job, fetching jobspec
and eventlog (to apply jobspec-updates) if necessary.
"""
if self.jobspec is None:
lookup = flux.job.job_kvs_lookup(
self.flux_handle, jobid=self.jobid, keys=["jobspec", "eventlog"]
)
self.jobspec = flux.job.JobspecV1(**lookup["jobspec"])
self._apply_jobspec_updates(lookup["eventlog"])
return self.jobspec.getattr(key)
def update_attributes_system_duration(self, value):
"""
Handle a duration update.
If update begins with "+" or "-", then get duration from jobspec and
increase or decrease by the amount of the remaining argument. O/w,
treat value as an explicit new duration.
"""
result = None
if value.startswith(("-", "+")):
# relative update, fetch value first
duration = self._fetch_jobspec("attributes.system.duration")
if duration == 0:
raise ValueError(
f"duration for {self.jobid} is unlimited, "
f"can't update by {value}"
)
arg = flux.util.parse_fsd(value[1:])
if value.startswith("-"):
result = duration - arg
if result <= 0.0:
duration = flux.util.fsd(duration)
raise ValueError(
f"current duration for {self.jobid} ({duration})"
f" cannot be reduced by {value[1:]}"
)
else:
result = duration + arg
else:
result = flux.util.parse_fsd(value)
# An unlimited duration is represented as 0. in jobspec, so
# check for infinity here and replace with 0.
#
if math.isinf(result):
result = 0.0
return result
def METHOD_NAME(self, key, value):
"""
Append an update to the current updates object.
"""
if self.updates is None:
self.updates = {}
# Handle any special keys aliases
if key in self.key_aliases:
key = self.key_aliases[key]
# If key doesn't start with attributes, resources, or tasks,
# assume 'attributes.system.' for convenience:
if not key.startswith(("attributes.", "resources.", "tasks.")):
key = f"attributes.system.{key}"
try:
# Use any function update_attributes_system_blah() if
# registered to process the value:
#
function_signature = "update_" + key.replace(".", "_")
value = getattr(self, function_signature)(value)
except AttributeError:
# Otherwise, attempt to load value as JSON:
#
try:
value = json.loads(value)
except json.decoder.JSONDecodeError:
# Otherwise, load value as string:
#
value = str(value)
self.updates[key] = value
def items(self):
"""
Convenience wrapper to return a copy of the current update
dictionary key, value pairs
"""
return self.updates.items()
def to_json(self):
return json.dumps(self.updates)
def send_rpc(self):
payload = {"id": self.jobid, "updates": self.updates}
return self.flux_handle.rpc("job-manager.update", payload)
def parse_args():
parser = argparse.ArgumentParser(
prog="flux-update", formatter_class=flux.util.help_formatter()
)
parser.add_argument(
"-n",
"--dry-run",
action="store_true",
help="Do not apply any updates, just emit update payload to stdout",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=0,
help="Be more verbose. Log updated items after success.",
)
parser.add_argument(
"jobid",
metavar="JOBID",
type=flux.job.JobID,
help="Target jobid",
)
parser.add_argument(
"updates",
metavar="KEY=VALUE",
type=str,
nargs="+",
help="Requested jobspec updates in KEY=VALUE form",
)
return parser.parse_args()
@flux.util.CLIMain(LOGGER)
def main():
sys.stdout = open(
sys.stdout.fileno(), "w", encoding="utf8", errors="surrogateescape"
)
sys.stderr = open(
sys.stderr.fileno(), "w", encoding="utf8", errors="surrogateescape"
)
args = parse_args()
updates = JobspecUpdates(args.jobid)
for arg in args.updates:
key, _, value = arg.partition("=")
updates.METHOD_NAME(key, value)
if args.dry_run:
print(updates.to_json())
sys.exit(0)
updates.send_rpc().get()
if args.verbose:
for key, value in updates.items():
LOGGER.info(f"updated {key} to {value}")
if __name__ == "__main__":
main()
# vi: ts=4 sw=4 expandtab |
5,212 | pair info | "light-weight, simple source_path, target_path dependency management"
from __future__ import absolute_import, division, print_function
from libtbx import easy_pickle
from libtbx.utils import Sorry
import hashlib
import time
import sys, os
def try_loading_db(file_name):
'''
This function tries to load an existing pickle file. If the pickle file
cannot be loaded, an empty target_db object is returned. This helps
simplify the rebuilding of chem_data databases when switching Python
versions.
Parameters
----------
file_name : str
The filename for the database file
Returns
-------
target_db : target_db
If the file exists and the pickle file can be loaded, the target_db
object. If the pickle protocol is not supported, the file is removed
and an empty target_db object is returned.
'''
db = None
try:
db = target_db(file_name)
except Sorry as s:
if 'unsupported pickle protocol' in str(s):
os.remove(file_name)
db = target_db(file_name)
assert db is not None
return db
class node_info(object):
def __init__(self, path):
self.path = path
self.mtime = None
self.md5 = None
def full_path(self, path_prefix=None):
if (path_prefix is None): return self.path
return os.path.join(path_prefix, self.path)
def current_mtime(self, path_prefix=None):
full_path = self.full_path(path_prefix=path_prefix)
if (not os.path.exists(full_path)): return None
return os.path.getmtime(full_path)
def current_md5(self, path_prefix=None):
full_path = self.full_path(path_prefix=path_prefix)
if (not os.path.exists(full_path)): return None
m = hashlib.md5()
with open(full_path, "rb") as f:
m.update(f.read())
return m.hexdigest()
def has_changed(self, path_prefix=None, mtime_resolution=2):
old_mtime = self.mtime
if (old_mtime is None): return True
self.mtime = self.current_mtime(path_prefix=path_prefix)
if (self.mtime == old_mtime
and time.time() > old_mtime + mtime_resolution): return False
if (self.md5 is None): return True
old_md5 = self.md5
self.md5 = self.current_md5(path_prefix=path_prefix)
return self.md5 != old_md5
class METHOD_NAME(object):
def __init__(self, source_path, target_path, needs_update=True):
self.source = node_info(path=source_path)
self.target = node_info(path=target_path)
self.needs_update = needs_update
def eval_needs_update(self, source_path=None, path_prefix=None):
if (source_path != self.source.path):
self.source = node_info(path=source_path)
self.needs_update = True
elif (not self.needs_update):
if ( self.source.has_changed(path_prefix=path_prefix)
or self.target.has_changed(path_prefix=path_prefix)):
self.needs_update = True
return self.needs_update
def start_building_target(self, path_prefix=None):
if (self.source.mtime is None):
self.source.mtime = self.source.current_mtime(path_prefix=path_prefix)
if (self.source.md5 is None):
self.source.md5 = self.source.current_md5(path_prefix=path_prefix)
def done_building_target(self, path_prefix=None):
self.target.mtime = self.target.current_mtime(path_prefix=path_prefix)
self.target.md5 = self.target.current_md5(path_prefix=path_prefix)
self.needs_update = False
class target_db(object):
def __init__(self, file_name, file_name_during_write=None):
self.file_name = file_name
if (file_name_during_write is None and self.file_name is not None):
self.file_name_during_write = self.file_name + ".new"
else:
self.file_name_during_write = file_name_during_write
if (self.file_name is None
or not os.path.exists(self.file_name)):
self.pair_infos = {}
else:
self.pair_infos = easy_pickle.load(file_name=self.file_name)
def write(self):
assert self.file_name is not None
easy_pickle.dump(file_name=self.file_name_during_write, obj=self.pair_infos)
if (os.path.exists(self.file_name)):
os.remove(self.file_name)
os.rename(self.file_name_during_write, self.file_name)
def METHOD_NAME(self, source_path, target_path, path_prefix=None):
result = self.pair_infos.get(target_path)
if (result is None):
result = METHOD_NAME(source_path=source_path, target_path=target_path)
self.pair_infos[target_path] = result
else:
result.eval_needs_update(
source_path=source_path, path_prefix=path_prefix)
return result
def show(self, out=None):
if (out is None): out = sys.stdout
for METHOD_NAME in self.pair_infos.values():
for attr in ["source", "target"]:
node = getattr(METHOD_NAME, attr)
print(attr+":", node.path, "mtime:", node.mtime,\
"md5:", node.md5, file=out)
print("-"*79, file=out) |
5,213 | kernel version | """Mock of hostname dbus service."""
from json import dumps
from dbus_fast.service import PropertyAccess, dbus_property
from .base import DBusServiceMock, dbus_method
BUS_NAME = "org.freedesktop.hostname1"
def setup(object_path: str | None = None) -> DBusServiceMock:
"""Create dbus mock object."""
return Hostname()
# pylint: disable=invalid-name
class Hostname(DBusServiceMock):
"""Hostname mock.
gdbus introspect --system --dest org.freedesktop.hostname1 --object-path /org/freedesktop/hostname1
"""
object_path = "/org/freedesktop/hostname1"
interface = "org.freedesktop.hostname1"
@dbus_property(access=PropertyAccess.READ)
def Hostname(self) -> "s":
"""Get Hostname."""
return "homeassistant-n2"
@dbus_property(access=PropertyAccess.READ)
def StaticHostname(self) -> "s":
"""Get StaticHostname."""
return "homeassistant-n2"
@dbus_property(access=PropertyAccess.READ)
def PrettyHostname(self) -> "s":
"""Get PrettyHostname."""
return ""
@dbus_property(access=PropertyAccess.READ)
def IconName(self) -> "s":
"""Get IconName."""
return "computer-embedded"
@dbus_property(access=PropertyAccess.READ)
def Chassis(self) -> "s":
"""Get Chassis."""
return "embedded"
@dbus_property(access=PropertyAccess.READ)
def Deployment(self) -> "s":
"""Get Deployment."""
return "development"
@dbus_property(access=PropertyAccess.READ)
def Location(self) -> "s":
"""Get Location."""
return ""
@dbus_property(access=PropertyAccess.READ)
def KernelName(self) -> "s":
"""Get KernelName."""
return "Linux"
@dbus_property(access=PropertyAccess.READ)
def KernelRelease(self) -> "s":
"""Get KernelRelease."""
return "5.10.33"
@dbus_property(access=PropertyAccess.READ)
def METHOD_NAME(self) -> "s":
"""Get KernelVersion."""
return "#1 SMP PREEMPT Wed May 5 00:55:38 UTC 2021"
@dbus_property(access=PropertyAccess.READ)
def OperatingSystemPrettyName(self) -> "s":
"""Get OperatingSystemPrettyName."""
return "Home Assistant OS 6.0.dev20210504"
@dbus_property(access=PropertyAccess.READ)
def OperatingSystemCPEName(self) -> "s":
"""Get OperatingSystemCPEName."""
return "cpe:2.3:o:home-assistant:haos:6.0.dev20210504:*:development:*:*:*:odroid-n2:*"
@dbus_property(access=PropertyAccess.READ)
def HomeURL(self) -> "s":
"""Get HomeURL."""
return "https://hass.io/"
@dbus_method()
def SetHostname(self, hostname: "s", interactive: "b") -> None:
"""Set hostname."""
self.emit_properties_changed({"Hostname": hostname})
@dbus_method()
def SetStaticHostname(self, hostname: "s", interactive: "b") -> None:
"""Set static hostname."""
self.emit_properties_changed({"StaticHostname": hostname})
@dbus_method()
def SetPrettyHostname(self, hostname: "s", interactive: "b") -> None:
"""Set pretty hostname."""
self.emit_properties_changed({"PrettyHostname": hostname})
@dbus_method()
def SetIconName(self, icon: "s", interactive: "b") -> None:
"""Set icon name."""
self.emit_properties_changed({"IconName": icon})
@dbus_method()
def SetChassis(self, chassis: "s", interactive: "b") -> None:
"""Set chassis."""
self.emit_properties_changed({"Chassis": chassis})
@dbus_method()
def SetDeployment(self, deployment: "s", interactive: "b") -> None:
"""Set deployment."""
self.emit_properties_changed({"Deployment": deployment})
@dbus_method()
def SetLocation(self, location: "s", interactive: "b") -> None:
"""Set location."""
self.emit_properties_changed({"Location": location})
@dbus_method()
def GetProductUUID(self, interactive: "b") -> "ay":
"""Get product UUID."""
return b"d153e353-2a32-4763-b930-b27fbc980da5"
@dbus_method()
def Describe(self) -> "s":
"""Describe."""
return dumps(
{
"Hostname": "odroid-dev",
"StaticHostname": "odroid-dev",
"PrettyHostname": None,
"DefaultHostname": "homeassistant",
"HostnameSource": "static",
"IconName": "computer-embedded",
"Chassis": "embedded",
"Deployment": "development",
"Location": None,
"KernelName": "Linux",
"KernelRelease": "5.15.88",
"KernelVersion": "#1 SMP PREEMPT Mon Jan 16 23:45:23 UTC 2023",
"OperatingSystemPrettyName": "Home Assistant OS 10.0.dev20230116",
"OperatingSystemCPEName": "cpe:2.3:o:home-assistant:haos:10.0.dev20230116:*:development:*:*:*:odroid-n2:*",
"OperatingSystemHomeURL": "https://hass.io/",
"HardwareVendor": None,
"HardwareModel": None,
"ProductUUID": None,
}
) |
5,214 | test poll recap servers no matches | from unittest.mock import call, patch
import pytest
import responses
from sentry import eventstore
from sentry.tasks.recap_servers import (
RECAP_SERVER_LATEST_ID,
RECAP_SERVER_TOKEN_OPTION,
RECAP_SERVER_URL_OPTION,
poll_project_recap_server,
poll_recap_servers,
)
from sentry.testutils.cases import TestCase
from sentry.testutils.helpers import Feature
from sentry.utils import json
crash_payload = {
"_links": {
"self": {"href": "ApiBaseUrl/burp/137?field=stopReason"},
"files": {"href": "ApiBaseUrl/burp/137/files", "custom": True},
},
"id": 1,
"uploadDate": "2018-11-06T21:19:55.271Z",
"stopReason": "SEGFAULT",
"detailedStackTrace": [
{
"sourceFile": "/usr/build/src/foo.c",
"sourceLine": 42,
"moduleName": "boot.bin",
"moduleFingerprint": "iddqd",
"moduleOffset": "0x1",
"resolvedSymbol": "Foo::Run()+0x4",
"absoluteAddress": "0xaa00bb4",
"displayValue": "boot.bin!Foo::Update()+0x4",
},
{
"sourceFile": "/usr/build/src/bar.c",
"sourceLine": 1337,
"moduleName": "boot.bin",
"moduleFingerprint": "idkfa",
"moduleOffset": "0x10",
"resolvedSymbol": "Executor::Run()+0x30",
"absoluteAddress": "0xbb11aa4",
"displayValue": "boot.bin!Bar::Trigger()+0x30",
},
],
"userData": {
"password": "should_be_redacted",
},
}
@pytest.mark.django_db
@patch("sentry.tasks.recap_servers.poll_project_recap_server.delay")
class PollRecapServersTest(TestCase):
def setUp(self):
self.org = self.create_organization(owner=self.user)
def METHOD_NAME(
self,
poll_project_recap_server,
):
poll_recap_servers()
assert poll_project_recap_server.call_count == 0
def test_poll_recap_servers_single_project(
self,
poll_project_recap_server,
):
project = self.create_project(organization=self.org, name="foo")
project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com")
poll_recap_servers()
assert poll_project_recap_server.call_count == 1
poll_project_recap_server.assert_has_calls([call(project.id)], any_order=True)
def test_poll_recap_servers_multiple_projects(self, poll_project_recap_server):
project = self.create_project(organization=self.org, name="foo")
project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com")
project_dos = self.create_project(organization=self.org, name="bar")
project_dos.update_option(RECAP_SERVER_URL_OPTION, "http://example-dos.com")
project_tres = self.create_project(organization=self.org, name="baz")
project_tres.update_option(RECAP_SERVER_URL_OPTION, "http://example-tres.com")
poll_recap_servers()
assert poll_project_recap_server.call_count == 3
poll_project_recap_server.assert_has_calls(
[call(project.id), call(project_dos.id), call(project_tres.id)], any_order=True
)
@pytest.mark.django_db
class PollProjectRecapServerTest(TestCase):
@pytest.fixture(autouse=True)
def initialize(self):
with Feature({"organizations:recap-server": True}):
yield # Run test case
def setUp(self):
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.org, name="foo")
def get_crash_payload(self, id):
crash = dict(crash_payload)
crash["id"] = id
return crash
def test_poll_project_recap_server_incorrect_project(self):
poll_project_recap_server(1337) # should not error
def test_poll_project_recap_server_missing_recap_url(self):
poll_project_recap_server(self.project.id) # should not error
def test_poll_project_recap_server_disabled_feature(self):
with Feature({"organizations:recap-server": False}):
self.project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com")
poll_project_recap_server(self.project.id) # should not error
@patch("sentry.tasks.recap_servers.store_crash")
@responses.activate
def test_poll_project_recap_server_initial_request(self, store_crash):
payload = {
"results": 3,
"_embedded": {
"crash": [
{"id": 1},
{"id": 1337},
{"id": 42},
]
},
}
outgoing_recap_request = responses.get(
url="http://example.com/rest/v1/crashes;sort=id:ascending;limit=1000",
body=json.dumps(payload),
content_type="application/json",
)
self.project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com")
assert self.project.get_option(RECAP_SERVER_LATEST_ID) is None
poll_project_recap_server(self.project.id)
assert outgoing_recap_request.call_count == 1
assert store_crash.call_count == 3
assert self.project.get_option(RECAP_SERVER_LATEST_ID) == 1337
@patch("sentry.tasks.recap_servers.store_crash")
@responses.activate
def test_poll_project_recap_server_following_request(self, store_crash):
payload = {
"results": 2,
"_embedded": {
"crash": [
{"id": 1337},
{"id": 42},
]
},
}
# Encoded query: {8 TO *}
outgoing_recap_request = responses.get(
url="http://example.com/rest/v1/crashes;sort=id:ascending;q=id:%7B8%20TO%20%2A%7D",
body=json.dumps(payload),
content_type="application/json",
)
self.project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com")
self.project.update_option(RECAP_SERVER_LATEST_ID, 8)
poll_project_recap_server(self.project.id)
assert outgoing_recap_request.call_count == 1
assert store_crash.call_count == 2
assert self.project.get_option(RECAP_SERVER_LATEST_ID) == 1337
@patch("sentry.tasks.recap_servers.store_crash")
@responses.activate
def test_poll_project_recap_server_auth_token_header(self, store_crash):
outgoing_recap_request = responses.get(
url="http://example.com/rest/v1/crashes;sort=id:ascending;limit=1000",
body=json.dumps({"results": 0}),
content_type="application/json",
match=[responses.matchers.header_matcher({"Authorization": "Bearer mkey"})],
)
self.project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com")
self.project.update_option(RECAP_SERVER_TOKEN_OPTION, "mkey")
poll_project_recap_server(self.project.id)
assert outgoing_recap_request.call_count == 1
# TODO(recap): Add more assetions on `event.data` when the time comes
@responses.activate
def test_poll_recap_servers_store_crash(self):
payload = {
"results": 2,
"_embedded": {"crash": [self.get_crash_payload(1337), self.get_crash_payload(42)]},
}
responses.get(
url="http://example.com/rest/v1/crashes;sort=id:ascending;limit=1000",
body=json.dumps(payload),
content_type="application/json",
)
self.project.update_option(RECAP_SERVER_URL_OPTION, "http://example.com")
poll_project_recap_server(self.project.id)
events = eventstore.backend.get_events(
eventstore.Filter(project_ids=[self.project.id]),
tenant_ids={"referrer": "relay-test", "organization_id": 123},
)
# Make sure that event went though the normalization and pii scrubbing process
assert events[0].data["contexts"]["userData"]["password"] == "[Filtered]"
assert events[1].data["contexts"]["userData"]["password"] == "[Filtered]" |
5,215 | run | #!/usr/bin/python3 -W ignore::DeprecationWarning
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import glob
from random import choice
import string
from optparse import OptionParser
import mysql.connector
import paramiko
from threading import Thread
# ---- This snippet of code adds the sources path and the waf configured PYTHONDIR to the Python path ----
# ---- We do this so cloud_utils can be looked up in the following order:
# ---- 1) Sources directory
# ---- 2) waf configured PYTHONDIR
# ---- 3) System Python path
for pythonpath in (
"@PYTHONDIR@",
os.path.join(os.path.dirname(__file__),os.path.pardir,os.path.pardir,"python","lib"),
):
if os.path.isdir(pythonpath): sys.path.insert(0,pythonpath)
# ---- End snippet of code ----
from cloud_utils import check_call, CalledProcessError, read_properties
cfg = "@MSCONF@/db.properties"
#---------------------- option parsing and command line checks ------------------------
usage = """%prog <license file> <-a | host names / IP addresses...>
This command deploys the license file specified in the command line into a specific XenServer host or all XenServer hosts known to the management server."""
parser = OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true", dest="all", default=False,
help="deploy to all known hosts rather that a single host")
#------------------ functions --------------------
def e(msg): parser.error(msg)
def getknownhosts(host,username,password):
conn = mysql.connector.connect(host=host, user=username, password=password)
cur = conn.cursor()
cur.execute("SELECT h.private_ip_address,d.value FROM cloud.host h inner join cloud.host_details d on (h.id = d.host_id) where d.name = 'username' and setup = 1")
usernames = dict(cur.fetchall())
cur.execute("SELECT h.private_ip_address,d.value FROM cloud.host h inner join cloud.host_details d on (h.id = d.host_id) where d.name = 'password' and setup = 1")
passwords = dict(cur.fetchall())
creds = dict( [ [x,(usernames[x],passwords[x])] for x in list(usernames.keys()) ] )
cur.close()
conn.close()
return creds
def splitlast(string,splitter):
splitted = string.split(splitter)
first,last = splitter.join(splitted[:-1]),splitted[-1]
return first,last
def parseuserpwfromhosts(hosts):
creds = {}
for host in hosts:
user = "root"
password = ""
if "@" in host:
user,host = splitlast(host,"@")
if ":" in user:
user,password = splitlast(user,":")
creds[host] = (user,password)
return creds
class XenServerConfigurator(Thread):
def __init__(self,host,user,password,keyfiledata):
Thread.__init__(self)
self.host = host
self.user = user
self.password = password
self.keyfiledata = keyfiledata
self.retval = None # means all's good
self.stdout = ""
self.stderr = ""
self.state = 'initialized'
def METHOD_NAME(self):
try:
self.state = 'running'
c = paramiko.SSHClient()
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(self.host,username=self.user,password=self.password)
sftp = c.open_sftp()
sftp.chdir("/tmp")
f = sftp.open("xen-license","w")
f.write(self.keyfiledata)
f.close()
sftp.close()
stdin,stdout,stderr = c.exec_command("xe host-license-add license-file=/tmp/xen-license")
c.exec_command("false")
self.stdout = stdout.read(-1)
self.stderr = stderr.read(-1)
self.retval = stdin.channel.recv_exit_status()
c.close()
if self.retval != 0: self.state = 'failed'
else: self.state = 'finished'
except Exception as e:
self.state = 'failed'
self.retval = e
#raise
def __str__(self):
if self.state == 'failed':
return "<%s XenServerConfigurator on %s@%s: %s>"%(self.state,self.user,self.host,str(self.retval))
else:
return "<%s XenServerConfigurator on %s@%s>"%(self.state,self.user,self.host)
#------------- actual code --------------------
(options, args) = parser.parse_args()
try:
licensefile,args = args[0],args[1:]
except IndexError: e("The first argument must be the license file to use")
if options.all:
if len(args) != 0: e("IP addresses cannot be specified if -a is specified")
config = read_properties(cfg)
creds = getknownhosts(config["db.cloud.host"],config["db.cloud.username"],config["db.cloud.password"])
hosts = list(creds.keys())
else:
if not args: e("You must specify at least one IP address, or -a")
hosts = args
creds = parseuserpwfromhosts(hosts)
try:
keyfiledata = file(licensefile).read(-1)
except OSError as e:
sys.stderr.write("The file %s cannot be opened"%licensefile)
sys.exit(1)
configurators = []
for host,(user,password) in list(creds.items()):
configurators.append ( XenServerConfigurator(host,user,password,keyfiledata ) )
for c in configurators: c.start()
for c in configurators:
print(c.host + "...", end=' ')
c.join()
if c.state == 'failed':
if c.retval:
msg = "failed with return code %s: %s%s"%(c.retval,c.stdout,c.stderr)
msg = msg.strip()
print(msg)
else: print("failed: %s"%c.retval)
else:
print("done")
successes = len( [ a for a in configurators if not a.state == 'failed' ] )
failures = len( [ a for a in configurators if a.state == 'failed' ] )
print("%3s successes"%successes)
print("%3s failures"%failures) |
5,216 | prepare request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.voiceservices.aio.VoiceServicesMgmtClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.Operation"]:
"""List the operations for the provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.voiceservices.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = METHOD_NAME(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.VoiceServices/operations"} |
5,217 | set test params | #!/usr/bin/env python3
# Copyright (c) 2018-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC help output."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from collections import defaultdict
import os
import re
def parse_string(s):
assert s[0] == '"'
assert s[-1] == '"'
return s[1:-1]
def process_mapping(fname):
"""Find and parse conversion table in implementation file `fname`."""
cmds = []
in_rpcs = False
with open(fname, "r", encoding="utf8") as f:
for line in f:
line = line.rstrip()
if not in_rpcs:
if line == 'static const CRPCConvertParam vRPCConvertParams[] =':
in_rpcs = True
else:
if line.startswith('};'):
in_rpcs = False
elif '{' in line and '"' in line:
m = re.search(r'{ *("[^"]*"), *([0-9]+) *, *("[^"]*") *},', line)
assert m, 'No match to table expression: %s' % line
name = parse_string(m.group(1))
idx = int(m.group(2))
argname = parse_string(m.group(3))
cmds.append((name, idx, argname))
assert not in_rpcs and cmds
return cmds
class HelpRpcTest(BitcoinTestFramework):
def add_options(self, parser):
self.add_wallet_options(parser)
def METHOD_NAME(self):
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
self.test_client_conversion_table()
self.test_categories()
self.dump_help()
if self.is_wallet_compiled():
self.wallet_help()
def test_client_conversion_table(self):
file_conversion_table = os.path.join(self.config["environment"]["SRCDIR"], 'src', 'rpc', 'client.cpp')
mapping_client = process_mapping(file_conversion_table)
# Ignore echojson in client table
mapping_client = [m for m in mapping_client if m[0] != 'echojson']
mapping_server = self.nodes[0].help("dump_all_command_conversions")
# Filter all RPCs whether they need conversion
mapping_server_conversion = [tuple(m[:3]) for m in mapping_server if not m[3]]
# Only check if all RPC methods have been compiled (i.e. wallet is enabled)
if self.is_wallet_compiled() and sorted(mapping_client) != sorted(mapping_server_conversion):
raise AssertionError("RPC client conversion table ({}) and RPC server named arguments mismatch!\n{}".format(
file_conversion_table,
set(mapping_client).symmetric_difference(mapping_server_conversion),
))
# Check for conversion difference by argument name.
# It is preferable for API consistency that arguments with the same name
# have the same conversion, so bin by argument name.
all_methods_by_argname = defaultdict(list)
converts_by_argname = defaultdict(list)
for m in mapping_server:
all_methods_by_argname[m[2]].append(m[0])
converts_by_argname[m[2]].append(m[3])
for argname, convert in converts_by_argname.items():
if all(convert) != any(convert):
# Only allow dummy and psbt to fail consistency check
assert argname in ['dummy', "psbt"], ('WARNING: conversion mismatch for argument named %s (%s)' % (argname, list(zip(all_methods_by_argname[argname], converts_by_argname[argname]))))
def test_categories(self):
node = self.nodes[0]
# wrong argument count
assert_raises_rpc_error(-1, 'help', node.help, 'foo', 'bar')
# invalid argument
assert_raises_rpc_error(-3, "JSON value of type number is not of expected type string", node.help, 0)
# help of unknown command
assert_equal(node.help('foo'), 'help: unknown command: foo')
# command titles
titles = [line[3:-3] for line in node.help().splitlines() if line.startswith('==')]
components = ['Blockchain', 'Control', 'Mining', 'Network', 'Rawtransactions', 'Util']
if self.is_wallet_compiled():
components.append('Wallet')
if self.is_external_signer_compiled():
components.append('Signer')
if self.is_zmq_compiled():
components.append('Zmq')
assert_equal(titles, sorted(components))
def dump_help(self):
dump_dir = os.path.join(self.options.tmpdir, 'rpc_help_dump')
os.mkdir(dump_dir)
calls = [line.split(' ', 1)[0] for line in self.nodes[0].help().splitlines() if line and not line.startswith('==')]
for call in calls:
with open(os.path.join(dump_dir, call), 'w', encoding='utf-8') as f:
# Make sure the node can generate the help at runtime without crashing
f.write(self.nodes[0].help(call))
def wallet_help(self):
assert 'getnewaddress ( "label" "address_type" )' in self.nodes[0].help('getnewaddress')
self.restart_node(0, extra_args=['-nowallet=1'])
assert 'getnewaddress ( "label" "address_type" )' in self.nodes[0].help('getnewaddress')
if __name__ == '__main__':
HelpRpcTest().main() |
5,218 | method | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vpn-connection show-device-config-script",
is_preview=True,
)
class ShowDeviceConfigScript(AAZCommand):
"""Get a XML format representation for VPN connection device configuration script.
:example: Get a XML format representation for VPN connection device configuration script.
az network vpn-connection show-device-config-script -g MyResourceGroup -n MyConnection --vendor "Cisco" --device-family "Cisco-ISR(IOS)" --firmware-version "Cisco-ISR-15.x-- IKEv2+BGP"
"""
_aaz_info = {
"version": "2018-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/connections/{}/vpndeviceconfigurationscript", "2018-11-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Connection name.",
required=True,
id_part="name",
)
_args_schema.device_family = AAZStrArg(
options=["--device-family"],
help="The device family for the vpn device.",
)
_args_schema.firmware_version = AAZStrArg(
options=["--firmware-version"],
help="The firmware version for the vpn device.",
)
_args_schema.vendor = AAZStrArg(
options=["--vendor"],
help="The vendor for the vpn device.",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.VirtualNetworkGatewaysVpnDeviceConfigurationScript(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False)
return result
class VirtualNetworkGatewaysVpnDeviceConfigurationScript(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/vpndeviceconfigurationscript",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualNetworkGatewayConnectionName", self.ctx.args.name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2018-11-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("deviceFamily", AAZStrType, ".device_family")
_builder.set_prop("firmwareVersion", AAZStrType, ".firmware_version")
_builder.set_prop("vendor", AAZStrType, ".vendor")
return self.serialize_content(_content_value)
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZStrType()
return cls._schema_on_200
class _ShowDeviceConfigScriptHelper:
"""Helper class for ShowDeviceConfigScript"""
__all__ = ["ShowDeviceConfigScript"] |
5,219 | get hwid device | #!/usr/bin/python3
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2021 Norbert Kamiński <norbert.kaminski@3mdeb.com>
#
# SPDX-License-Identifier: LGPL-2.1+
#
import subprocess
import tempfile
import os
import shutil
import xml.etree.ElementTree as ET
from packaging.version import Version
from qubes_fwupd_common import EXIT_CODES, create_dirs, LooseVersion
FWUPDTOOL = "/bin/fwupdtool"
BOOT = "/boot"
HEADS_UPDATES_DIR = os.path.join(BOOT, "updates")
class FwupdHeads:
def _get_hwids(self):
cmd_hwids = [FWUPDTOOL, "hwids"]
p = subprocess.Popen(cmd_hwids, stdout=subprocess.PIPE)
self.dom0_hwids_info = p.communicate()[0].decode()
if p.returncode != 0:
raise Exception("fwupd-qubes: Getting hwids info failed")
def _gather_firmware_version(self):
"""
Checks if Qubes works under heads
"""
if "Heads" in self.dom0_hwids_info:
self.heads_version = None
hwids = self.dom0_hwids_info.split("\n")
for line in hwids:
if "Heads" in line:
self.heads_version = line.split("Heads-v")[1]
else:
print("Device is not running under the heads firmware!!")
print("Exiting...")
return EXIT_CODES["NOTHING_TO_DO"]
def METHOD_NAME(self):
"""
Device model for Heads update, currently supports ThinkPad only.
"""
for line in self.dom0_hwids_info.splitlines():
if line.startswith("Family: ThinkPad"):
return line.split(":", 1)[1].split(" ", 1)[1].lower()
return None
def _parse_metadata(self, metadata_file):
"""
Parse metadata info.
"""
metadata_ext = os.path.splitext(metadata_file)[-1]
if metadata_ext == ".xz":
cmd_metadata = ["xzcat", metadata_file]
elif metadata_ext == ".gz":
cmd_metadata = ["zcat", metadata_file]
else:
raise NotImplementedError(
"Unsupported metadata compression " + metadata_ext
)
p = subprocess.Popen(cmd_metadata, stdout=subprocess.PIPE)
self.metadata_info = p.communicate()[0].decode()
if p.returncode != 0:
raise Exception("fwupd-qubes: Parsing metadata failed")
def _parse_heads_updates(self, device):
"""
Parses heads updates info.
Keyword arguments:
device -- Model of the updated device
"""
self.heads_update_url = None
self.heads_update_sha = None
self.heads_update_version = None
heads_metadata_info = None
root = ET.fromstring(self.metadata_info)
for component in root.findall("component"):
if f"heads.{device}" in component.find("id").text:
heads_metadata_info = component
if not heads_metadata_info:
print("No metadata info for chosen board")
return EXIT_CODES["NOTHING_TO_DO"]
for release in heads_metadata_info.find("releases").findall("release"):
release_ver = release.get("version")
if self.heads_version == "heads" or LooseVersion(
release_ver
) > LooseVersion(self.heads_version):
if not self.heads_update_version or LooseVersion(
release_ver
) > LooseVersion(self.heads_update_version):
self.heads_update_url = release.find("location").text
for sha in release.findall("checksum"):
if (
".cab" in sha.attrib["filename"]
and sha.attrib["type"] == "sha256"
):
self.heads_update_sha = sha.text
self.heads_update_version = release_ver
if self.heads_update_url:
return EXIT_CODES["SUCCESS"]
else:
print("Heads firmware is up to date.")
return EXIT_CODES["NOTHING_TO_DO"]
def _copy_heads_firmware(self, arch_path):
"""
Copies heads update to the boot path
"""
heads_boot_path = os.path.join(HEADS_UPDATES_DIR, self.heads_update_version)
heads_update_path = os.path.join(heads_boot_path, "firmware.rom")
create_dirs(HEADS_UPDATES_DIR)
if os.path.exists(heads_update_path):
print(f"Heads Update == {self.heads_update_version} " "already downloaded.")
return EXIT_CODES["NOTHING_TO_DO"]
else:
os.mkdir(heads_boot_path)
with tempfile.TemporaryDirectory() as tmpdir:
cmd_extract = ["gcab", "-x", f"--directory={tmpdir}", "--", arch_path]
p = subprocess.Popen(cmd_extract, stdout=subprocess.PIPE)
p.communicate()
if p.returncode != 0:
raise Exception(f"gcab: Error while extracting {arch_path}.")
update_path = os.path.join(tmpdir, "firmware.rom")
shutil.copyfile(update_path, heads_update_path)
print(
f"Heads Update == {self.heads_update_version} "
f"available at {heads_boot_path}"
)
return EXIT_CODES["SUCCESS"] |
5,220 | test eq | """Tests F beta metrics."""
# original code taken from
# https://github.com/tensorflow/addons/blob/master/tensorflow_addons/metrics/tests/f_scores_test.py
# (modified to our neeeds)
import numpy as np
import pytest
import tensorflow as tf
from rasa.utils.tensorflow.metrics import FBetaScore, F1Score
def test_config_fbeta():
fbeta_obj = FBetaScore(num_classes=3, beta=0.5, threshold=0.3, average=None)
assert fbeta_obj.beta == 0.5
assert fbeta_obj.average is None
assert fbeta_obj.threshold == 0.3
assert fbeta_obj.num_classes == 3
assert fbeta_obj.dtype == tf.float32
# Check save and restore config
fbeta_obj2 = FBetaScore.from_config(fbeta_obj.get_config())
assert fbeta_obj2.beta == 0.5
assert fbeta_obj2.average is None
assert fbeta_obj2.threshold == 0.3
assert fbeta_obj2.num_classes == 3
assert fbeta_obj2.dtype == tf.float32
def _test_tf(avg, beta, act, pred, sample_weights, threshold):
act = tf.constant(act, tf.float32)
pred = tf.constant(pred, tf.float32)
fbeta = FBetaScore(3, avg, beta, threshold)
fbeta.update_state(act, pred, sample_weights)
return fbeta.result().numpy()
def _test_fbeta_score(actuals, preds, sample_weights, avg, beta_val, result, threshold):
tf_score = _test_tf(avg, beta_val, actuals, preds, sample_weights, threshold)
np.testing.assert_allclose(tf_score, result, atol=1e-7, rtol=1e-6)
def test_fbeta_perfect_score():
preds = [[0.7, 0.7, 0.7], [1, 0, 0], [0.9, 0.8, 0]]
actuals = [[1, 1, 1], [1, 0, 0], [1, 1, 0]]
for avg_val in ["micro", "macro", "weighted"]:
for beta in [0.5, 1.0, 2.0]:
_test_fbeta_score(actuals, preds, None, avg_val, beta, 1.0, 0.66)
def test_fbeta_worst_score():
preds = [[0.7, 0.7, 0.7], [1, 0, 0], [0.9, 0.8, 0]]
actuals = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]
for avg_val in ["micro", "macro", "weighted"]:
for beta in [0.5, 1.0, 2.0]:
_test_fbeta_score(actuals, preds, None, avg_val, beta, 0.0, 0.66)
@pytest.mark.parametrize(
"avg_val, beta, result",
[
(None, 0.5, [0.71428573, 0.5, 0.833334]),
(None, 1.0, [0.8, 0.5, 0.6666667]),
(None, 2.0, [0.9090904, 0.5, 0.555556]),
("micro", 0.5, 0.6666667),
("micro", 1.0, 0.6666667),
("micro", 2.0, 0.6666667),
("macro", 0.5, 0.6825397),
("macro", 1.0, 0.6555555),
("macro", 2.0, 0.6548822),
("weighted", 0.5, 0.6825397),
("weighted", 1.0, 0.6555555),
("weighted", 2.0, 0.6548822),
],
)
def test_fbeta_random_score(avg_val, beta, result):
preds = [[0.7, 0.7, 0.7], [1, 0, 0], [0.9, 0.8, 0]]
actuals = [[0, 0, 1], [1, 1, 0], [1, 1, 1]]
_test_fbeta_score(actuals, preds, None, avg_val, beta, result, 0.66)
@pytest.mark.parametrize(
"avg_val, beta, result",
[
(None, 0.5, [0.9090904, 0.555556, 1.0]),
(None, 1.0, [0.8, 0.6666667, 1.0]),
(None, 2.0, [0.71428573, 0.833334, 1.0]),
("micro", 0.5, 0.833334),
("micro", 1.0, 0.833334),
("micro", 2.0, 0.833334),
("macro", 0.5, 0.821549),
("macro", 1.0, 0.822222),
("macro", 2.0, 0.849206),
("weighted", 0.5, 0.880471),
("weighted", 1.0, 0.844445),
("weighted", 2.0, 0.829365),
],
)
def test_fbeta_random_score_none(avg_val, beta, result):
preds = [
[0.9, 0.1, 0],
[0.2, 0.6, 0.2],
[0, 0, 1],
[0.4, 0.3, 0.3],
[0, 0.9, 0.1],
[0, 0, 1],
]
actuals = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 0, 1]]
_test_fbeta_score(actuals, preds, None, avg_val, beta, result, None)
@pytest.mark.parametrize(
"avg_val, beta, sample_weights, result",
[
(None, 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.909091, 0.555556, 1.0]),
(None, 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0]),
(None, 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [0.9375, 0.714286, 1.0]),
(None, 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.8, 0.666667, 1.0]),
(None, 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0]),
(None, 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [0.857143, 0.8, 1.0]),
(None, 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.714286, 0.833333, 1.0]),
(None, 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0]),
(None, 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], [0.789474, 0.909091, 1.0]),
("micro", 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.833333),
("micro", 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("micro", 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.9),
("micro", 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.833333),
("micro", 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("micro", 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.9),
("micro", 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.833333),
("micro", 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("micro", 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.9),
("macro", 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.821549),
("macro", 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 0.666667),
("macro", 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.883929),
("macro", 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.822222),
("macro", 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 0.666667),
("macro", 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.885714),
("macro", 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.849206),
("macro", 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 0.666667),
("macro", 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.899522),
("weighted", 0.5, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.880471),
("weighted", 0.5, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("weighted", 0.5, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.917857),
("weighted", 1.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.844444),
("weighted", 1.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("weighted", 1.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.902857),
("weighted", 2.0, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], 0.829365),
("weighted", 2.0, [1.0, 0.0, 1.0, 1.0, 0.0, 1.0], 1.0),
("weighted", 2.0, [0.5, 1.0, 1.0, 1.0, 0.5, 1.0], 0.897608),
],
)
def test_fbeta_weighted_random_score_none(avg_val, beta, sample_weights, result):
preds = [
[0.9, 0.1, 0],
[0.2, 0.6, 0.2],
[0, 0, 1],
[0.4, 0.3, 0.3],
[0, 0.9, 0.1],
[0, 0, 1],
]
actuals = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 0, 1]]
_test_fbeta_score(actuals, preds, sample_weights, avg_val, beta, result, None)
def METHOD_NAME():
f1 = F1Score(3)
fbeta = FBetaScore(3, beta=1.0)
preds = [
[0.9, 0.1, 0],
[0.2, 0.6, 0.2],
[0, 0, 1],
[0.4, 0.3, 0.3],
[0, 0.9, 0.1],
[0, 0, 1],
]
actuals = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 0, 1]]
fbeta.update_state(actuals, preds)
f1.update_state(actuals, preds)
np.testing.assert_allclose(fbeta.result().numpy(), f1.result().numpy())
def test_sample_eq():
f1 = F1Score(3)
f1_weighted = F1Score(3)
preds = [
[0.9, 0.1, 0],
[0.2, 0.6, 0.2],
[0, 0, 1],
[0.4, 0.3, 0.3],
[0, 0.9, 0.1],
[0, 0, 1],
]
actuals = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 0, 1]]
sample_weights = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
f1.update_state(actuals, preds)
f1_weighted(actuals, preds, sample_weights)
np.testing.assert_allclose(f1.result().numpy(), f1_weighted.result().numpy()) |
5,221 | unregister | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
bl_info = {
"name" : "Export FUDGE Scene",
"author" : "Simon Storl-Schulke",
"description" : "",
"blender" : (2, 92, 0),
"version" : (0, 0, 1),
"location" : "File → Export → FUDGE Scene",
"warning" : "",
"category" : "Import-Export"
}
import bpy
import bmesh
from bpy_extras.io_utils import ExportHelper
import json
import math
def serialize_mesh(mesh: bpy.types.Mesh) -> dict:
bm: bmesh.types.BMesh = bmesh.new()
bm.from_mesh(mesh)
bmesh.ops.triangulate(bm, faces=bm.faces[:], quad_method="BEAUTY", ngon_method="BEAUTY")
vertices = []
indices = []
normals = []
uvs = []
for i, c_vert in enumerate(bm.verts):
vertices.extend([c_vert.co[0], c_vert.co[1], c_vert.co[2]])
# print(mesh.uv_layers[0].data[i].uv)
for c_face in bm.faces:
indices.extend([c_face.verts[0].index, c_face.verts[1].index, c_face.verts[2].index])
# normals_face.extend([c_face.normal[0], c_face.normal[1], c_face.normal[2]])
meshdata = {
"name": mesh.name + "(mesh)",
"vertices": vertices,
"indices": indices,
# "facenormals": normals_face
}
bm.free()
return meshdata
def serialize_light(light: bpy.types.Light) -> dict:
return {
"name": light.name + "(light)",
"type": light.type, # possible Types are POINT, SUN, SPOT, AREA
"energy": light.energy,
"color": [light.color.r, light.color.g, light.color.b]
}
def serialize_camera(cam: bpy.types.Camera) -> dict:
return {
"name": cam.name + "(camera)",
"fov_vertical": cam.angle_y,
"fov_horizontal": cam.angle_x,
"fov_diagonal": cam.angle
}
def export_scene(context, filepath, human_readable, selected_only):
scenedata = {
"objects": [],
"objectdata": []
}
objectlist = bpy.context.selected_objects if selected_only else bpy.context.scene.objects
for c_obj in objectlist:
objectdata: dict = {}
objecttype = c_obj.type
objectdata_name = c_obj.data.name if c_obj.data is not None else ""
if objecttype == "MESH":
objectdata = serialize_mesh(c_obj.data)
objectdata_name += "(mesh)"
elif objecttype == "LIGHT":
objectdata = serialize_light(c_obj.data)
objectdata_name += "(light)"
elif objecttype == "CAMERA":
objectdata = serialize_camera(c_obj.data)
objectdata_name += "(camera)"
else:
objecttype = "EMPTY"
objectdata_name = ""
m = c_obj.matrix_local
if objectdata:
scenedata["objectdata"].append(objectdata)
matrix = [
[m[0][0], m[0][1], m[0][2], m[0][3]],
[m[1][0], m[1][1], m[1][2], m[1][3]],
[m[2][0], m[2][1], m[2][2], m[2][3]],
[m[3][0], m[3][1], m[3][2], m[3][3]],
]
obj = {
"name": c_obj.name,
"type": objecttype,
"matrix": matrix,
"data": objectdata_name
}
scenedata["objects"].append(obj)
f = open(filepath, 'w', encoding='utf-8')
jsonstring = json.dumps(scenedata, indent=4) if human_readable else json.dumps(scenedata)
f.write(jsonstring)
f.close()
return {'FINISHED'}
class IO_OT_export_fudge_scene(bpy.types.Operator, ExportHelper):
"""Export for use in the FUDGE Game Engine"""
bl_idname = "export_scene.fudge"
bl_label = "Export FUDGE Scene"
# ExportHelper mixin class uses this
filename_ext = ".fs"
filter_glob: bpy.props.StringProperty(
default="*.fs",
options={'HIDDEN'},
maxlen=255, # Max internal buffer length, longer would be clamped.
)
human_readable: bpy.props.BoolProperty(
name="human readable",
description="Format JSON Text nicely at the cost of storage space",
default=True,
)
selected_only: bpy.props.BoolProperty(
name="Selected Objects only",
default=False,
)
type: bpy.props.EnumProperty(
name="Color From",
description="Choose between two items",
items=(
('OPT_A', "Material Display Color", "Description one"),
('OPT_B', "Object Color", "Description two"),
('OPT_C', "Don't export Color", "Description two"),
),
default='OPT_A',
)
def execute(self, context):
return export_scene(context, self.filepath, self.human_readable, self.selected_only)
def menu_func_export(self, context):
self.layout.operator(IO_OT_export_fudge_scene.bl_idname, text="FUDGE Scene (.fs)")
def register():
bpy.utils.register_class(IO_OT_export_fudge_scene)
bpy.types.TOPBAR_MT_file_export.append(menu_func_export)
def METHOD_NAME():
bpy.utils.unregister_class(IO_OT_export_fudge_scene)
bpy.types.TOPBAR_MT_file_export.remove(menu_func_export |
5,222 | test incremental new | #!/usr/bin/env python3
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for make_db."""
import sys
import unittest
import make_db
import model
static_epoch = 1641585162
TEST_BUCKETS_DATA = {
'gs://kubernetes-jenkins/logs/': {'prefix': ''},
'gs://bucket1/': {'prefix': 'bucket1_prefix'},
'gs://bucket2/': {'prefix': 'bucket2_prefix'}
}
class MockedClient(make_db.GCSClient):
"""A GCSClient with stubs for external interactions."""
NOW = static_epoch
LOG_DIR = 'gs://kubernetes-jenkins/logs/'
JOB_DIR = LOG_DIR + 'fake/123/'
ART_DIR = JOB_DIR + 'artifacts/'
lists = {
LOG_DIR: [LOG_DIR + 'fake/'],
LOG_DIR + 'fake/': [JOB_DIR, LOG_DIR + 'fake/122/'],
LOG_DIR + 'bad-latest/': [LOG_DIR + 'bad-latest/6/'],
LOG_DIR + 'latest/': [LOG_DIR + 'latest/4/', LOG_DIR + 'latest/3/'],
'gs://kubernetes-jenkins/pr-logs/directory/': [],
ART_DIR: [ART_DIR + 'junit_01.xml'],
ART_DIR.replace('123', '122'): [],
}
gets = {
JOB_DIR + 'finished.json': {'timestamp': NOW, 'result': 'SUCCESS'},
JOB_DIR + 'started.json': {'timestamp': NOW - 5},
LOG_DIR + 'latest/latest-build.txt': '4',
LOG_DIR + 'bad-latest/latest-build.txt': 'asdf',
LOG_DIR + 'fake/122/finished.json': {'timestamp': 123},
ART_DIR + 'junit_01.xml': '''
<testsuite>
<testcase name="Foo" time="3" />
<testcase name="Bad" time="4">
<failure>stacktrace</failure>
</testcase>
<testcase name="Lazy" time="0">
<skipped />
</testcase>
</testsuite>
'''}
def get(self, path, as_json=True):
return self.gets.get(path)
def ls(self, path, **_kwargs): # pylint: disable=arguments-differ
return self.lists[path]
class GCSClientTest(unittest.TestCase):
"""Unit tests for GCSClient"""
# pylint: disable=protected-access
JOBS_DIR = 'gs://kubernetes-jenkins/logs/'
def setUp(self):
self.client = MockedClient(self.JOBS_DIR)
def test_get_junits(self):
junits = self.client.get_junits_from_build(self.JOBS_DIR + 'fake/123')
self.assertEqual(
sorted(junits),
['gs://kubernetes-jenkins/logs/fake/123/artifacts/junit_01.xml'])
def test_get_builds_normal_list(self):
# normal case: lists a directory
self.assertEqual((True, ['123', '122']), self.client._get_builds('fake'))
def test_get_builds_latest(self):
# optimization: does a range based on build-latest.txt
precise, gen = self.client._get_builds('latest')
self.assertFalse(precise)
self.assertEqual(['4', '3', '2', '1'], list(gen))
def test_get_builds_limit(self):
# optimization: does a range based on build-latest.txt
precise, gen = self.client._get_builds('latest', build_limit=2)
self.assertFalse(precise)
self.assertEqual(['4', '3'], list(gen))
def test_get_builds_latest_fallback(self):
# fallback: still lists a directory when build-latest.txt isn't an int
self.assertEqual((True, ['6']), self.client._get_builds('bad-latest'))
def test_get_builds_non_sequential(self):
# fallback: setting sequential=false causes directory listing
self.client.metadata = {'sequential': False}
self.assertEqual((True, ['4', '3']),
self.client._get_builds('latest'))
def test_get_builds_exclude_list_no_match(self):
# special case: job is not in excluded list
self.client.metadata = {'exclude_jobs': ['notfake']}
self.assertEqual([('fake', '123'), ('fake', '122')], list(self.client.get_builds(set())))
def test_get_builds_exclude_list_match(self):
# special case: job is in excluded list
self.client.metadata = {'exclude_jobs': ['fake']}
self.assertEqual([], list(self.client.get_builds(set())))
def test_get_builds_exclude_list_match_using_regexp(self):
# special case: job is in excluded list
self.client.metadata = {'exclude_jobs': ['.*(flaky|flake|fake).*']}
self.assertEqual([], list(self.client.get_builds(set())))
# special case: job is in excluded list
self.client.metadata = {'exclude_jobs': ['.*(flaky|flake).*']}
self.assertEqual([('fake', '123'), ('fake', '122')], list(self.client.get_builds(set())))
class MainTest(unittest.TestCase):
"""End-to-end test of the main function's output."""
JOBS_DIR = GCSClientTest.JOBS_DIR
def test_remove_system_out(self):
self.assertEqual(make_db.remove_system_out('not<xml<lol'), 'not<xml<lol')
self.assertEqual(
make_db.remove_system_out('<a><b>c<system-out>bar</system-out></b></a>'),
'<a><b>c</b></a>')
@staticmethod
def get_expected_builds():
return {
MockedClient.JOB_DIR.replace('123', '122')[:-1]:
(None, {'timestamp': 123}, []),
MockedClient.JOB_DIR[:-1]:
({'timestamp': MockedClient.NOW - 5},
{'timestamp': MockedClient.NOW, 'result': 'SUCCESS'},
[MockedClient.gets[MockedClient.ART_DIR + 'junit_01.xml']])
}
def assert_main_output(self, threads, expected=None, db=None,
client=MockedClient):
if expected is None:
expected = self.get_expected_builds()
if db is None:
db = model.Database(':memory:')
make_db.main(db, {self.JOBS_DIR: {}}, threads, True, sys.maxsize, False, client)
result = {path: (started, finished, db.test_results_for_build(path))
for _rowid, path, started, finished in db.get_builds()}
self.assertEqual(result, expected)
return db
def test_clean(self):
self.maxDiff = None
for threads in [1, 32]:
self.assert_main_output(threads)
def METHOD_NAME(self):
db = self.assert_main_output(1)
new_junit = '''
<testsuite>
<testcase name="New" time="8"/>
<testcase name="Foo" time="2.3"/>
</testsuite>
'''
class MockedClientNewer(MockedClient):
NOW = static_epoch
LOG_DIR = 'gs://kubernetes-jenkins/logs/'
JOB_DIR = LOG_DIR + 'fake/124/'
ART_DIR = JOB_DIR + 'artifacts/'
lists = {
LOG_DIR: [LOG_DIR + 'fake/'],
LOG_DIR + 'fake/': [JOB_DIR, LOG_DIR + 'fake/123/'],
ART_DIR: [ART_DIR + 'junit_01.xml'],
'gs://kubernetes-jenkins/pr-logs/directory/': [],
}
gets = {
JOB_DIR + 'finished.json': {'timestamp': NOW},
ART_DIR + 'junit_01.xml': new_junit,
}
expected = self.get_expected_builds()
expected[MockedClientNewer.JOB_DIR[:-1]] = (
None, {'timestamp': MockedClientNewer.NOW}, [new_junit])
self.assert_main_output(1, expected, db, MockedClientNewer)
if __name__ == '__main__':
unittest.main() |
5,223 | test beam schema survives roundtrip | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for arrow_type_compatibility."""
import logging
import unittest
from typing import Any
from typing import Optional
import pyarrow as pa
import pytest
from parameterized import parameterized
from parameterized import parameterized_class
from apache_beam.typehints import row_type
from apache_beam.typehints import typehints
from apache_beam.typehints.arrow_type_compatibility import arrow_schema_from_beam_schema
from apache_beam.typehints.arrow_type_compatibility import beam_schema_from_arrow_schema
from apache_beam.typehints.batch import BatchConverter
from apache_beam.typehints.batch_test import temp_seed
from apache_beam.typehints.schemas_test import get_test_beam_schemas_protos
@pytest.mark.uses_pyarrow
class ArrowTypeCompatibilityTest(unittest.TestCase):
@parameterized.expand([(beam_schema, )
for beam_schema in get_test_beam_schemas_protos()])
def METHOD_NAME(self, beam_schema):
roundtripped = beam_schema_from_arrow_schema(
arrow_schema_from_beam_schema(beam_schema))
self.assertEqual(beam_schema, roundtripped)
@parameterized_class([
{
'batch_typehint': pa.Table,
'element_typehint': row_type.RowTypeConstraint.from_fields([
('foo', Optional[int]),
('bar', Optional[float]),
('baz', Optional[str]),
]),
'batch': pa.Table.from_pydict({
'foo': pa.array(range(100), type=pa.int64()),
'bar': pa.array([i / 100 for i in range(100)], type=pa.float64()),
'baz': pa.array([str(i) for i in range(100)], type=pa.string()),
}),
},
{
'batch_typehint': pa.Table,
'element_typehint': row_type.RowTypeConstraint.from_fields([
('foo', Optional[int]),
(
'nested',
Optional[row_type.RowTypeConstraint.from_fields([
("bar", Optional[float]), # noqa: F821
("baz", Optional[str]), # noqa: F821
])]),
]),
'batch': pa.Table.from_pydict({
'foo': pa.array(range(100), type=pa.int64()),
'nested': pa.array([
None if i % 11 else {
'bar': i / 100, 'baz': str(i)
} for i in range(100)
]),
}),
},
{
'batch_typehint': pa.Array,
'element_typehint': int,
'batch': pa.array(range(100), type=pa.int64()),
},
{
'batch_typehint': pa.Array,
'element_typehint': row_type.RowTypeConstraint.from_fields([
("bar", Optional[float]), # noqa: F821
("baz", Optional[str]), # noqa: F821
]),
'batch': pa.array([
{
'bar': i / 100, 'baz': str(i)
} if i % 7 else None for i in range(100)
]),
}
])
@pytest.mark.uses_pyarrow
class ArrowBatchConverterTest(unittest.TestCase):
def create_batch_converter(self):
return BatchConverter.from_typehints(
element_type=self.element_typehint, batch_type=self.batch_typehint)
def setUp(self):
self.converter = self.create_batch_converter()
self.normalized_batch_typehint = typehints.normalize(self.batch_typehint)
self.normalized_element_typehint = typehints.normalize(
self.element_typehint)
def equality_check(self, left, right):
if isinstance(left, pa.Array):
self.assertTrue(left.equals(right))
else:
self.assertEqual(left, right)
def test_typehint_validates(self):
typehints.validate_composite_type_param(self.batch_typehint, '')
typehints.validate_composite_type_param(self.element_typehint, '')
def test_type_check(self):
typehints.check_constraint(self.normalized_batch_typehint, self.batch)
def test_type_check_element(self):
for element in self.converter.explode_batch(self.batch):
typehints.check_constraint(self.normalized_element_typehint, element)
def test_explode_rebatch(self):
exploded = list(self.converter.explode_batch(self.batch))
rebatched = self.converter.produce_batch(exploded)
typehints.check_constraint(self.normalized_batch_typehint, rebatched)
self.equality_check(self.batch, rebatched)
def test_estimate_byte_size_implemented(self):
# Just verify that we can call byte size
self.assertGreater(self.converter.estimate_byte_size(self.batch), 0)
@parameterized.expand([
(2, ),
(3, ),
(10, ),
])
def test_estimate_byte_size_partitions(self, N):
elements = list(self.converter.explode_batch(self.batch))
# Split elements into N contiguous partitions, create a batch out of each
batches = [
self.converter.produce_batch(
elements[len(elements) * i // N:len(elements) * (i + 1) // N])
for i in range(N)
]
# Some estimate_byte_size implementations use random samples,
# set a seed temporarily to make this test deterministic
with temp_seed(12345):
partitioned_size_estimate = sum(
self.converter.estimate_byte_size(batch) for batch in batches)
size_estimate = self.converter.estimate_byte_size(self.batch)
# Assert that size estimate for partitions is within 10% of size estimate
# for the whole partition.
self.assertLessEqual(
abs(partitioned_size_estimate / size_estimate - 1), 0.1)
@parameterized.expand([
(2, ),
(3, ),
(10, ),
])
def test_combine_batches(self, N):
elements = list(self.converter.explode_batch(self.batch))
# Split elements into N contiguous partitions, create a batch out of each
batches = [
self.converter.produce_batch(
elements[len(elements) * i // N:len(elements) * (i + 1) // N])
for i in range(N)
]
# Combine the batches, output should be equivalent to the original batch
combined = self.converter.combine_batches(batches)
self.equality_check(self.batch, combined)
def test_equals(self):
self.assertTrue(self.converter == self.create_batch_converter())
self.assertTrue(self.create_batch_converter() == self.converter)
def test_hash(self):
self.assertEqual(hash(self.create_batch_converter()), hash(self.converter))
class ArrowBatchConverterErrorsTest(unittest.TestCase):
@parameterized.expand([
(
pa.RecordBatch,
row_type.RowTypeConstraint.from_fields([
("bar", Optional[float]), # noqa: F821
("baz", Optional[str]), # noqa: F821
]),
r'batch type must be pa\.Table or pa\.Array',
),
(
pa.Table,
Any,
r'Element type must be compatible with Beam Schemas',
),
])
def test_construction_errors(
self, batch_typehint, element_typehint, error_regex):
with self.assertRaisesRegex(TypeError, error_regex):
BatchConverter.from_typehints(
element_type=element_typehint, batch_type=batch_typehint)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main() |
5,224 | validate | # #
# Copyright 2013-2023 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
This describes the easyconfig format versions 2.x
This is a mix between version 1.0 and configparser-style configuration
Authors:
* Stijn De Weirdt (Ghent University)
* Kenneth Hoste (Ghent University)
"""
import copy
import re
from easybuild.framework.easyconfig.format.pyheaderconfigobj import EasyConfigFormatConfigObj
from easybuild.framework.easyconfig.format.format import EBConfigObj
from easybuild.framework.easyconfig.format.version import EasyVersion
from easybuild.tools.build_log import EasyBuildError
class FormatTwoZero(EasyConfigFormatConfigObj):
"""
Support for easyconfig format 2.0
Simple extension of FormatOneZero with configparser blocks
Doesn't set version and toolchain/toolchain version like in FormatOneZero;
referencing 'version' directly in pyheader doesn't work => use templating '%(version)s'
NOT in 2.0
- order preservation: need more recent ConfigObj (more recent Python as minimal version)
- nested sections (need other ConfigParser/ConfigObj, eg INITools)
- type validation
- command line generation (--try-X command line options)
"""
VERSION = EasyVersion('2.0')
USABLE = True
PYHEADER_ALLOWED_BUILTINS = ['len', 'False', 'True']
PYHEADER_MANDATORY = ['name', 'homepage', 'description', 'software_license', 'software_license_urls', 'docurls']
PYHEADER_BLACKLIST = ['version', 'toolchain']
NAME_DOCSTRING_REGEX_TEMPLATE = r'^\s*@%s\s*:\s*(?P<name>\S.*?)\s*$' # non-greedy match in named pattern
AUTHOR_DOCSTRING_REGEX = re.compile(NAME_DOCSTRING_REGEX_TEMPLATE % 'author', re.M)
MAINTAINER_DOCSTRING_REGEX = re.compile(NAME_DOCSTRING_REGEX_TEMPLATE % 'maintainer', re.M)
AUTHOR_REQUIRED = True
MAINTAINER_REQUIRED = False
def METHOD_NAME(self):
"""Format validation"""
self._check_docstring()
self._validate_pyheader()
def _check_docstring(self):
"""
Verify docstring.
field :author: people who contributed to the easyconfig
field @maintainer: people who can be contacted in case of problems
"""
authors = []
maintainers = []
for auth_reg in self.AUTHOR_DOCSTRING_REGEX.finditer(self.docstring):
res = auth_reg.groupdict()
authors.append(res['name'])
for maint_reg in self.MAINTAINER_DOCSTRING_REGEX.finditer(self.docstring):
res = maint_reg.groupdict()
maintainers.append(res['name'])
if self.AUTHOR_REQUIRED and not authors:
raise EasyBuildError("No author in docstring (regex: '%s')", self.AUTHOR_DOCSTRING_REGEX.pattern)
if self.MAINTAINER_REQUIRED and not maintainers:
raise EasyBuildError("No maintainer in docstring (regex: '%s')", self.MAINTAINER_DOCSTRING_REGEX.pattern)
def get_config_dict(self):
"""Return the best matching easyconfig dict"""
self.log.experimental(self.__class__.__name__)
# the toolchain name/version should not be specified in the pyheader,
# but other toolchain options are allowed
cfg = copy.deepcopy(self.pyheader_localvars)
self.log.debug("Config dict based on Python header: %s" % cfg)
co = EBConfigObj(self.configobj)
version = self.specs.get('version', None)
tc_spec = self.specs.get('toolchain', {})
toolchain_name = tc_spec.get('name', None)
toolchain_version = tc_spec.get('version', None)
# parse and interpret, dealing with defaults etc
version, tcname, tcversion = co.get_version_toolchain(version, toolchain_name, toolchain_version)
# format 2.0 will squash
self.log.debug('Squashing with version %s and toolchain %s' % (version, (tcname, tcversion)))
res = co.squash(version, tcname, tcversion)
cfg.update(res)
self.log.debug("Config dict after processing applicable easyconfig sections: %s" % cfg)
# FIXME what about updating dict values/appending to list values?
# FIXME how do we allow both redefining and updating? = and +=?
# update config with correct version/toolchain (to avoid using values specified in default section)
cfg.update({
'version': version,
'toolchain': {'name': tcname, 'version': tcversion},
})
self.log.debug("Final config dict (including correct version/toolchain): %s" % cfg)
return cfg
def extract_comments(self, rawtxt):
"""Extract comments from raw content."""
# this is fine-ish, it only implies that comments will be lost for format v2 easyconfig files that are dumped
self.log.warning("Extraction of comments not supported yet for easyconfig format v2") |
5,225 | test set exc info | # Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from . import CPyExtType, CPyExtTestCase, CPyExtFunction, CPyExtFunctionOutVars, unhandled_error_compare, GRAALPYTHON
__dir__ = __file__.rpartition("/")[0]
try:
raise TypeError
except:
TB = sys.exc_info()[2]
class TestExceptionobject(object):
def test_exc_info(self):
TestExcInfo = CPyExtType("TestExcInfo",
"""
PyObject* get_exc_info(PyObject* self) {
PyObject* typ;
PyObject* val;
PyObject* tb;
PyErr_GetExcInfo(&typ, &val, &tb);
Py_XDECREF(val);
Py_XDECREF(tb);
if (typ == NULL) {
Py_RETURN_NONE;
}
return typ;
}
""",
tp_methods='{"get_exc_info", (PyCFunction)get_exc_info, METH_NOARGS, ""}'
)
tester = TestExcInfo()
try:
raise IndexError
except IndexError:
exc_type = tester.get_exc_info()
assert exc_type == IndexError
# do a second time because this time we won't do a stack walk
# disabled due to GR-34711
# exc_type = tester.get_exc_info()
# assert exc_type == IndexError
else:
assert False
def METHOD_NAME(self):
TestSetExcInfo = CPyExtType("TestSetExcInfo",
"""
PyObject* set_exc_info(PyObject* self, PyObject* args) {
PyObject* typ = PyTuple_GetItem(args, 0);
PyObject* val = PyTuple_GetItem(args, 1);
PyObject* tb = PyTuple_GetItem(args, 2);
PyObject* typ1 = NULL;
PyObject* val1 = NULL;
PyObject* tb1 = NULL;
Py_XINCREF(typ);
Py_XINCREF(val);
Py_XINCREF(tb);
PyErr_SetExcInfo(typ, val, tb);
PyErr_GetExcInfo(&typ1, &val1, &tb1);
// ignore the traceback for now
if(typ == typ1 && val == val1) {
return Py_True;
}
return Py_False;
}
""",
tp_methods='{"set_exc_info", (PyCFunction)set_exc_info, METH_O, ""}'
)
tester = TestSetExcInfo()
try:
raise IndexError
except:
typ, val, tb = sys.exc_info()
assert typ == IndexError
# overwrite exception info
expected = (ValueError, ValueError(), None)
res = tester.set_exc_info(expected)
assert res
# TODO uncomment once supported
# actual = sys.exc_info()
# assert actual == expected
else:
assert False
def raise_exception_with_cause():
try:
raise RuntimeError()
except RuntimeError as e:
exc = e
try:
raise IndexError from exc
except IndexError as e:
return e
class TestExceptionobjectFunctions(CPyExtTestCase):
def compile_module(self, name):
type(self).mro()[1].__dict__["test_%s" % name].create_module(name)
super().compile_module(name)
test_PyException_SetTraceback = CPyExtFunction(
lambda args: 0,
lambda: (
(
AssertionError(), TB
),
),
resultspec="i",
argspec="OO",
arguments=["PyObject* exc", "PyObject* tb"],
)
test_PyException_GetCause = CPyExtFunction(
lambda args: args[0].__cause__,
lambda: (
(raise_exception_with_cause(),),
),
resultspec="O",
argspec="O",
arguments=["PyObject* exc"],
) |
5,226 | test type singleton2 | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
import dns.rdataclass
import dns.rdatatype
class RdTypeAndClassTestCase(unittest.TestCase):
# Classes
def test_class_meta1(self):
self.assertTrue(dns.rdataclass.is_metaclass(dns.rdataclass.ANY))
def test_class_meta2(self):
self.assertFalse(dns.rdataclass.is_metaclass(dns.rdataclass.IN))
def test_class_bytext1(self):
self.assertEqual(dns.rdataclass.from_text("IN"), dns.rdataclass.IN)
def test_class_bytext2(self):
self.assertEqual(dns.rdataclass.from_text("CLASS1"), dns.rdataclass.IN)
def test_class_bytext_bounds1(self):
self.assertEqual(dns.rdataclass.from_text("CLASS0"), 0)
self.assertEqual(dns.rdataclass.from_text("CLASS65535"), 65535)
def test_class_bytext_bounds2(self):
def bad():
dns.rdataclass.from_text("CLASS65536")
self.assertRaises(ValueError, bad)
def test_class_bytext_unknown(self):
def bad():
dns.rdataclass.from_text("XXX")
self.assertRaises(dns.rdataclass.UnknownRdataclass, bad)
def test_class_totext1(self):
self.assertEqual(dns.rdataclass.to_text(dns.rdataclass.IN), "IN")
def test_class_totext2(self):
self.assertEqual(dns.rdataclass.to_text(999), "CLASS999")
def test_class_totext_bounds1(self):
def bad():
dns.rdataclass.to_text(-1)
self.assertRaises(ValueError, bad)
def test_class_totext_bounds2(self):
def bad():
dns.rdataclass.to_text(65536)
self.assertRaises(ValueError, bad)
# Types
def test_type_meta1(self):
self.assertTrue(dns.rdatatype.is_metatype(dns.rdatatype.ANY))
def test_type_meta2(self):
self.assertTrue(dns.rdatatype.is_metatype(dns.rdatatype.OPT))
def test_type_meta3(self):
self.assertFalse(dns.rdatatype.is_metatype(dns.rdatatype.A))
def test_type_singleton1(self):
self.assertTrue(dns.rdatatype.is_singleton(dns.rdatatype.SOA))
def METHOD_NAME(self):
self.assertFalse(dns.rdatatype.is_singleton(dns.rdatatype.A))
def test_type_bytext1(self):
self.assertEqual(dns.rdatatype.from_text("A"), dns.rdatatype.A)
def test_type_bytext2(self):
self.assertEqual(dns.rdatatype.from_text("TYPE1"), dns.rdatatype.A)
def test_type_bytext_bounds1(self):
self.assertEqual(dns.rdatatype.from_text("TYPE0"), 0)
self.assertEqual(dns.rdatatype.from_text("TYPE65535"), 65535)
def test_type_bytext_bounds2(self):
def bad():
dns.rdatatype.from_text("TYPE65536")
self.assertRaises(ValueError, bad)
def test_type_bytext_unknown(self):
def bad():
dns.rdatatype.from_text("XXX")
self.assertRaises(dns.rdatatype.UnknownRdatatype, bad)
def test_type_totext1(self):
self.assertEqual(dns.rdatatype.to_text(dns.rdatatype.A), "A")
def test_type_totext2(self):
self.assertEqual(dns.rdatatype.to_text(999), "TYPE999")
def test_type_totext_bounds1(self):
def bad():
dns.rdatatype.to_text(-1)
self.assertRaises(ValueError, bad)
def test_type_totext_bounds2(self):
def bad():
dns.rdatatype.to_text(65536)
self.assertRaises(ValueError, bad)
def test_type0_totext(self):
self.assertEqual(dns.rdatatype.to_text(0), "TYPE0")
if __name__ == "__main__":
unittest.main() |
5,227 | test clear with unicode str | import unittest
from unittest import mock
from ZEO.runzeo import ZEOServer
class TestStorageServer:
def __init__(self, fail_create_server):
self.called = []
if fail_create_server:
raise RuntimeError()
def close(self):
self.called.append("close")
class TestZEOServer(ZEOServer):
def __init__(self, fail_create_server=False, fail_loop_forever=False):
ZEOServer.__init__(self, None)
self.called = []
self.fail_create_server = fail_create_server
self.fail_loop_forever = fail_loop_forever
def setup_default_logging(self):
self.called.append("setup_default_logging")
def check_socket(self):
self.called.append("check_socket")
def clear_socket(self):
self.called.append("clear_socket")
def make_pidfile(self):
self.called.append("make_pidfile")
def open_storages(self):
self.called.append("open_storages")
def setup_signals(self):
self.called.append("setup_signals")
def create_server(self):
self.called.append("create_server")
self.server = TestStorageServer(self.fail_create_server)
def loop_forever(self):
self.called.append("loop_forever")
if self.fail_loop_forever:
raise RuntimeError()
def close_server(self):
self.called.append("close_server")
ZEOServer.close_server(self)
def remove_pidfile(self):
self.called.append("remove_pidfile")
class AttributeErrorTests(unittest.TestCase):
def testFailCreateServer(self):
#
# Fix AttributeError: 'ZEOServer' object has no attribute
# 'server' in ZEOServer.main
#
# Demonstrate the AttributeError
zeo = TestZEOServer(fail_create_server=True)
self.assertRaises(RuntimeError, zeo.main)
class CloseServerTests(unittest.TestCase):
def testCallSequence(self):
# The close_server hook is called after loop_forever
# has returned
zeo = TestZEOServer()
zeo.main()
self.assertEqual(zeo.called, [
"setup_default_logging",
"check_socket",
"clear_socket",
"make_pidfile",
"open_storages",
"setup_signals",
"create_server",
"loop_forever",
"close_server", # New
"clear_socket",
"remove_pidfile",
])
# The default implementation closes the storage server
self.assertEqual(hasattr(zeo, "server"), True)
self.assertEqual(zeo.server.called, ["close"])
def testFailLoopForever(self):
# The close_server hook is called if loop_forever exits
# with an exception
zeo = TestZEOServer(fail_loop_forever=True)
self.assertRaises(RuntimeError, zeo.main)
self.assertEqual(zeo.called, [
"setup_default_logging",
"check_socket",
"clear_socket",
"make_pidfile",
"open_storages",
"setup_signals",
"create_server",
"loop_forever",
"close_server",
"clear_socket",
"remove_pidfile",
])
# The storage server has been closed
self.assertEqual(hasattr(zeo, "server"), True)
self.assertEqual(zeo.server.called, ["close"])
def testFailCreateServer(self):
# The close_server hook is called if create_server exits
# with an exception
zeo = TestZEOServer(fail_create_server=True)
self.assertRaises(RuntimeError, zeo.main)
self.assertEqual(zeo.called, [
"setup_default_logging",
"check_socket",
"clear_socket",
"make_pidfile",
"open_storages",
"setup_signals",
"create_server",
"close_server",
"clear_socket",
"remove_pidfile",
])
# The server attribute is present but None
self.assertEqual(hasattr(zeo, "server"), True)
self.assertEqual(zeo.server, None)
@mock.patch('os.unlink')
class TestZEOServerSocket(unittest.TestCase):
def _unlinked(self, unlink, options):
server = ZEOServer(options)
server.clear_socket()
unlink.assert_called_once()
def _not_unlinked(self, unlink, options):
server = ZEOServer(options)
server.clear_socket()
unlink.assert_not_called()
def test_clear_with_native_str(self, unlink):
class Options:
address = "a str that does not exist"
self._unlinked(unlink, Options)
def METHOD_NAME(self, unlink):
class Options:
address = "a str that does not exist"
self._unlinked(unlink, Options)
def test_clear_with_bytes(self, unlink):
class Options:
address = b'a byte str that does not exist'
# bytes are not a string type under Py3
assertion = self._not_unlinked
assertion(unlink, Options)
def test_clear_with_tuple(self, unlink):
class Options:
address = ('abc', 1)
self._not_unlinked(unlink, Options) |
5,228 | test wf record with submitter source read | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
import pytest
from time import sleep
from flask import current_app
from invenio_db import db
from inspirehep.modules.records.api import InspireRecord
from inspirehep.modules.workflows.utils import (
get_document_url_for_reference_extraction,
insert_wf_record_source,
get_all_wf_record_sources,
read_wf_record_source,
timeout_with_config,
TimeoutError
)
from utils import override_config
from invenio_workflows import workflow_object_class
@pytest.fixture()
def dummy_record(workflow_app):
record = InspireRecord.create({
'$schema': 'http://localhost:5000/schemas/records/hep.json',
'_collections': ['Literature'],
'document_type': ['thesis'],
'titles': [{'title': 'foo'}],
})
yield record
record._delete(force=True)
def test_wf_record_source_read_and_write(dummy_record):
insert_wf_record_source(
json_data=dummy_record,
record_uuid=dummy_record.id,
source='arxiv'
)
db.session.commit()
retrieved_root = read_wf_record_source(
record_uuid=dummy_record.id, source='arxiv')
assert dummy_record == retrieved_root.json
assert 'arxiv' == retrieved_root.source
def test_wf_record_with_desy_source_read_and_write(dummy_record):
insert_wf_record_source(
json_data=dummy_record,
record_uuid=dummy_record.id,
source='desy'
)
db.session.commit()
retrieved_root = read_wf_record_source(
record_uuid=dummy_record.id, source='desy')
assert dummy_record == retrieved_root.json
assert 'publisher' == retrieved_root.source
def METHOD_NAME(dummy_record):
insert_wf_record_source(
json_data=dummy_record,
record_uuid=dummy_record.id,
source='submitter'
)
db.session.commit()
retrieved_root = read_wf_record_source(
record_uuid=dummy_record.id, source='submitter')
assert dummy_record == retrieved_root.json
assert 'submitter' == retrieved_root.source
def test_test_wf_record_source_update(dummy_record):
insert_wf_record_source(
json_data=dummy_record,
record_uuid=dummy_record.id,
source='arxiv'
)
db.session.commit()
# update the content
dummy_record['document_type'] = ['article']
insert_wf_record_source(
json_data=dummy_record,
record_uuid=dummy_record.id,
source='arxiv'
)
db.session.commit()
retrieved_root = read_wf_record_source(record_uuid=dummy_record.id, source='arxiv')
assert dummy_record == retrieved_root.json
def test_empty_root(dummy_record):
record_uuid = dummy_record.id
retrieved_root = read_wf_record_source(record_uuid=record_uuid, source='publisher')
assert retrieved_root is None
def test_wf_record_source_does_not_match_db_content(dummy_record):
dummy_record.commit()
db.session.commit() # write in the db
retrieved_root = read_wf_record_source(record_uuid=dummy_record.id, source='publisher')
assert retrieved_root is None
def test_get_all_wf_record_sources(dummy_record):
insert_wf_record_source(
json_data=dummy_record,
record_uuid=dummy_record.id,
source='arxiv'
)
insert_wf_record_source(
json_data=dummy_record,
record_uuid=dummy_record.id,
source='publisher'
)
db.session.commit()
entries = get_all_wf_record_sources(dummy_record.id)
assert len(entries) == 2
def test_timeout_with_config(workflow_app):
@timeout_with_config('MAX_NAP_TIME')
def long_nap():
sleep(5)
with override_config(MAX_NAP_TIME=1), pytest.raises(TimeoutError):
long_nap()
def test_get_document_url_for_reference_extraction(workflow_app):
with override_config(SERVER_NAME='inspirebeta.net', PREFERRED_URL_SCHEME='https'):
file_url = "/api/files/be4ca558-1948-478c-a7f1-05af95b94282/2205.02190.pdf"
data = {
'documents': [
{
'key': 'table_of_contents.pdf',
'url': file_url
},
{
'key': 'document.pdf',
},
],
}
wf = workflow_object_class.create(
data=data,
id_user=None,
data_type='hep'
)
url = get_document_url_for_reference_extraction(wf)
expected_url = '{0}://{1}{2}'.format(
current_app.config['PREFERRED_URL_SCHEME'],
current_app.config['SERVER_NAME'],
file_url
)
assert expected_url == url |
5,229 | decrypt plaq | # -*- coding: utf-8 -*-
import base64
from Crypto import Random
from Crypto.Cipher import XOR
from Crypto.Cipher import AES
import hashlib
import logging
from multiprocessing import Pool
import sys, os, re
import traceback
import struct
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pandas as pd
import numpy as np
# Config
SEP=';'
AES_BLOCK_SIZE = 16
CHUNK_SIZE = 20000 # size of each chunk
MAX_INPUT_ROWS = None # number of lines to process in the recipe, None if no limit
#VERBOSECHUNKSIZE = 10000 # display log every VERBOSECHUNKSIZE line
NUM_THREADS = 2 # number of parallel threads
COMMON_TRANSFER_SCHEMA = [
{'name': 'ida1', 'type': 'string'},
{'name': 'ida2', 'type': 'string'},
{'name': 'v', 'type': 'string'},
{'name': 'utac_ask_ct', 'type': 'string'},
{'name': 'utac_encrypted_immat', 'type': 'string'},
{'name': 'utac_encrypted_vin', 'type': 'string'},
{'name': 'controle_qualite', 'type': 'string'}
]
TRANSFER_COLUMNS = [c['name'] for c in COMMON_TRANSFER_SCHEMA]
_test_encrypt_decrypt = False
def pad(s):
"""Return a copy of the given string padded with between 1 and `AES_BLOCK_SIZE` characters to make its length a multiple of `AES_BLOCK_SIZE`"""
padding_length = AES_BLOCK_SIZE - len(s) % AES_BLOCK_SIZE
return s + padding_length * chr(padding_length).encode('utf8')
def unpad(s):
"""Return a copy of the given string with its padding removed"""
string = s.decode('utf8')
padding_length = ord(string[-1])
return string[0:-padding_length]
def encrypt_string(key, string):
padded = pad(string.encode('utf8'))
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
return (base64.urlsafe_b64encode(iv + cipher.encrypt(padded))).decode('utf8')
def decrypt_string(key, string):
enc = base64.urlsafe_b64decode(string)
iv = enc[:16]
cipher = AES.new(key, AES.MODE_CBC, iv)
return unpad(cipher.decrypt(enc[16:]))
def encrypt_df(df):
"""Encrypt the given dataframe in-place"""
global _test_encrypt_decrypt
month = datetime.today().strftime('%Y%m')
prev_month = (datetime.today() - relativedelta(months=1)).strftime('%Y%m')
df.fillna("", inplace=True)
df['id_personne'] = df['pers_raison_soc_tit'] + df['pers_siren_tit'] + df['pers_nom_naissance_tit']
if ('pers_prenom_tit' in list(df)):
df['id_personne'] = df['id_personne'] + (df['pers_prenom_tit']) #SIV
df['id_vehicle'] = df['plaq_immat']
if ('numero_formule' in list(df)):
df['id_vehicle'] = df['id_vehicle'] + df['numero_formule'] #SIV
else:
df['id_vehicle'] = df['id_vehicle'] + df['date_emission_CI'] #FNI
df['idv'] = df['id_personne'] + df['id_vehicle']
df['ida'] = df['idv']
df['key'] = df['id_vehicle']
for col in ['idv', 'ida', 'key']:
df[col]=df[col].str.lower()
df[col]=df[col].str.replace(r'\W', '', regex=True)
df['idv']=df['idv'].apply(lambda x: base64.urlsafe_b64encode(hashlib.sha256((x).encode('utf8','ignore')).digest()).decode('utf8'))
df['ida1']=df['ida'].apply(lambda x: base64.urlsafe_b64encode(hashlib.sha256((x+month).encode('utf8','ignore')).digest()).decode('utf8'))
df['ida2']=df['ida'].apply(lambda x: base64.urlsafe_b64encode(hashlib.sha256((x+prev_month).encode('utf8','ignore')).digest()).decode('utf8'))
df['key']=df['key'].apply(lambda x: hashlib.sha256(x.encode('utf8')).digest())
if _test_encrypt_decrypt:
df['v_orig']=df['v']
df['v']=df.apply(lambda row: encrypt_string(row['key'], row['v']), axis=1)
if _test_encrypt_decrypt:
# df['v_crypt']=df.apply(lambda row: encrypt_string(row['hash2'],row['v']), axis=1)
df['v_decrypt']=df.apply(lambda row: decrypt_string(row['key'],row['v']), axis=1)
df['v_test']=df.apply(lambda row: (row['v_decrypt'] == row['v_orig']), axis=1)
df['key']=df['key'].apply(lambda x: base64.b64encode(x).decode('utf8'))
df = df[['idv', 'ida1', 'ida2', 'v', 'utac_ask_ct', 'utac_encrypted_immat', 'utac_encrypted_vin', 'controle_qualite']]
return df
def chunk_row_range(chunk_index):
"""Return the index of the first and (maximum) last row of the chunk with the given index, in a string"""
return "%d-%d" % (chunk_index * CHUNK_SIZE + 1, (chunk_index + 1) * CHUNK_SIZE)
def process_chunk(arg):
"""Encrypt the given chunk in-place and return it (for use with Pool.imap_unordered)"""
i, df = arg
try:
encrypt_df(df)
# if last_row_index % VERBOSECHUNKSIZE == 0:
print("chunk {} encrypted".format(chunk_row_range(i)))
except:
logging.warning("chunk {} failed:".format(chunk_row_range(i)))
exc_type, exc_obj, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_obj, exc_tb)
# Return i and df for writing to the output dataset
return i, df
def encrypt_file(input_file, output_file, output_schema=COMMON_TRANSFER_SCHEMA, test_encrypt_decrypt=False):
global _test_encrypt_decrypt
_test_encrypt_decrypt = test_encrypt_decrypt
# Write output schema
if test_encrypt_decrypt:
output_schema += [
{'name': 'v_orig', 'type': 'string'},
{'name': 'v_decrypt', 'type': 'string'},
{'name': 'v_test', 'type': 'string'}
]
# Read input dataset as a number of fixed-size dataframes
chunks = pd.read_csv(input_file, sep=SEP, iterator=True, chunksize=CHUNK_SIZE, encoding='utf8', dtype={'pers_siren_tit': object})
# Encrypt
df_list=[encrypt_df(df) for df in chunks]
# print(df_list)
output_ds=pd.concat(df_list)
# print(output_ds)
output_ds.to_csv(output_file, sep=SEP, compression='gzip', index=False, header=False)
def encrypt_plaq(key, plaintext):
cipher = XOR.new(key)
return base64.b64encode(cipher.encrypt(plaintext))
def METHOD_NAME(key, ciphertext):
cipher = XOR.new(key)
return cipher.decrypt(base64.b64decode(ciphertext))
if __name__ == '__main__':
input_dir=sys.argv[1]
output_dir=sys.argv[2]
for file in [f for f in os.listdir(input_dir) if re.match(r'.*csv.gz$', f)]:
print(file)
encrypt_file(os.path.join(input_dir, file), os.path.join(output_dir, file) ) |
5,230 | get exc inh matrix | # -*- coding: utf-8 -*-
#
# network_params.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""PyNEST Microcircuit: Network Parameters
---------------------------------------------
A dictionary with base network and neuron parameters is enhanced with derived
parameters.
"""
import numpy as np
def METHOD_NAME(val_exc, val_inh, num_pops):
"""Creates a matrix for excitatory and inhibitory values.
Parameters
----------
val_exc
Excitatory value.
val_inh
Inhibitory value.
num_pops
Number of populations.
Returns
-------
matrix
A matrix of of size (num_pops x num_pops).
"""
matrix = np.zeros((num_pops, num_pops))
matrix[:, 0:num_pops:2] = val_exc
matrix[:, 1:num_pops:2] = val_inh
return matrix
net_dict = {
# factor to scale the number of neurons
"N_scaling": 0.1,
# factor to scale the indegrees
"K_scaling": 0.1,
# neuron model
"neuron_model": "iaf_psc_exp",
# names of the simulated neuronal populations
"populations": ["L23E", "L23I", "L4E", "L4I", "L5E", "L5I", "L6E", "L6I"],
# number of neurons in the different populations (same order as
# 'populations')
"full_num_neurons": np.array([20683, 5834, 21915, 5479, 4850, 1065, 14395, 2948]),
# mean rates of the different populations in the non-scaled version of the
# microcircuit (in spikes/s; same order as in 'populations');
# necessary for the scaling of the network.
# The values were obtained by running this PyNEST microcircuit without MPI,
# 'local_num_threads' 4 and both 'N_scaling' and 'K_scaling' set to 1.
"full_mean_rates": np.array([0.903, 2.965, 4.414, 5.876, 7.569, 8.633, 1.105, 7.829]),
# connection probabilities (the first index corresponds to the targets
# and the second to the sources)
"conn_probs": np.array(
[
[0.1009, 0.1689, 0.0437, 0.0818, 0.0323, 0.0, 0.0076, 0.0],
[0.1346, 0.1371, 0.0316, 0.0515, 0.0755, 0.0, 0.0042, 0.0],
[0.0077, 0.0059, 0.0497, 0.135, 0.0067, 0.0003, 0.0453, 0.0],
[0.0691, 0.0029, 0.0794, 0.1597, 0.0033, 0.0, 0.1057, 0.0],
[0.1004, 0.0622, 0.0505, 0.0057, 0.0831, 0.3726, 0.0204, 0.0],
[0.0548, 0.0269, 0.0257, 0.0022, 0.06, 0.3158, 0.0086, 0.0],
[0.0156, 0.0066, 0.0211, 0.0166, 0.0572, 0.0197, 0.0396, 0.2252],
[0.0364, 0.001, 0.0034, 0.0005, 0.0277, 0.008, 0.0658, 0.1443],
]
),
# mean amplitude of excitatory postsynaptic potential (in mV)
"PSP_exc_mean": 0.15,
# relative standard deviation of the weight
"weight_rel_std": 0.1,
# relative inhibitory weight
"g": -4,
# mean delay of excitatory connections (in ms)
"delay_exc_mean": 1.5,
# mean delay of inhibitory connections (in ms)
"delay_inh_mean": 0.75,
# relative standard deviation of the delay of excitatory and
# inhibitory connections
"delay_rel_std": 0.5,
# turn Poisson input on or off (True or False)
# if False: DC input is applied for compensation
"poisson_input": True,
# indegree of external connections to the different populations (same order
# as in 'populations')
"K_ext": np.array([1600, 1500, 2100, 1900, 2000, 1900, 2900, 2100]),
# rate of the Poisson generator (in spikes/s)
"bg_rate": 8.0,
# delay from the Poisson generator to the network (in ms)
"delay_poisson": 1.5,
# initial conditions for the membrane potential, options are:
# 'original': uniform mean and standard deviation for all populations as
# used in earlier implementations of the model
# 'optimized': population-specific mean and standard deviation, allowing a
# reduction of the initial activity burst in the network
# (default)
"V0_type": "optimized",
# parameters of the neuron model
"neuron_params": {
# membrane potential average for the neurons (in mV)
"V0_mean": {"original": -58.0, "optimized": [-68.28, -63.16, -63.33, -63.45, -63.11, -61.66, -66.72, -61.43]},
# standard deviation of the average membrane potential (in mV)
"V0_std": {"original": 10.0, "optimized": [5.36, 4.57, 4.74, 4.94, 4.94, 4.55, 5.46, 4.48]},
# reset membrane potential of the neurons (in mV)
"E_L": -65.0,
# threshold potential of the neurons (in mV)
"V_th": -50.0,
# membrane potential after a spike (in mV)
"V_reset": -65.0,
# membrane capacitance (in pF)
"C_m": 250.0,
# membrane time constant (in ms)
"tau_m": 10.0,
# time constant of postsynaptic currents (in ms)
"tau_syn": 0.5,
# refractory period of the neurons after a spike (in ms)
"t_ref": 2.0,
},
}
# derive matrix of mean PSPs,
# the mean PSP of the connection from L4E to L23E is doubled
PSP_matrix_mean = METHOD_NAME(
net_dict["PSP_exc_mean"], net_dict["PSP_exc_mean"] * net_dict["g"], len(net_dict["populations"])
)
PSP_matrix_mean[0, 2] = 2.0 * net_dict["PSP_exc_mean"]
updated_dict = {
# matrix of mean PSPs
"PSP_matrix_mean": PSP_matrix_mean,
# matrix of mean delays
"delay_matrix_mean": METHOD_NAME(
net_dict["delay_exc_mean"], net_dict["delay_inh_mean"], len(net_dict["populations"])
),
}
net_dict.update(updated_dict) |
5,231 | test recallscore | #!/usr/bin/env python
"""Tests for `octis` package."""
import pytest
from click.testing import CliRunner
from octis.evaluation_metrics.topic_significance_metrics import *
from octis.evaluation_metrics.classification_metrics import F1Score, PrecisionScore
from octis.evaluation_metrics.classification_metrics import AccuracyScore, RecallScore
from octis.evaluation_metrics.diversity_metrics import TopicDiversity, InvertedRBO, KLDivergence, LogOddsRatio, \
WordEmbeddingsInvertedRBO
from octis.evaluation_metrics.similarity_metrics import WordEmbeddingsRBOMatch, PairwiseJaccardSimilarity, RBO, \
WordEmbeddingsCentroidSimilarity, WordEmbeddingsPairwiseSimilarity
from octis.evaluation_metrics.coherence_metrics import *
from octis.dataset.dataset import Dataset
from octis.models.LDA import LDA
import os
@pytest.fixture
def root_dir():
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def dataset(root_dir):
dataset = Dataset()
dataset.load_custom_dataset_from_folder(root_dir + "/../preprocessed_datasets/" + '/M10')
return dataset
@pytest.fixture
def model_output(dataset):
model = LDA(num_topics=3, iterations=5)
output = model.train_model(dataset)
return output
def test_f1score(dataset, model_output):
metric = F1Score(dataset=dataset)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
def test_accuracyscore(dataset, model_output):
metric = AccuracyScore(dataset=dataset)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
def test_precisionscore(dataset, model_output):
metric = PrecisionScore(dataset=dataset)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
def METHOD_NAME(dataset, model_output):
metric = RecallScore(dataset=dataset)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
def test_svm_persistency(dataset, model_output):
metric = F1Score(dataset=dataset)
metric.score(model_output)
metric = AccuracyScore(dataset=dataset)
metric.score(model_output)
assert metric.same_svm
metric = F1Score(dataset=dataset, average="macro")
metric.score(model_output)
assert not metric.same_svm
def test_npmi_coherence_measures(dataset, model_output):
metric = Coherence(topk=10, texts=dataset.get_corpus())
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert -1 <= score <= 1
def test_we_coherence_measures(dataset, model_output):
metric = WECoherenceCentroid(topk=5)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == np.float32 or type(score) == float
assert -1 <= score <= 1
metric = WECoherencePairwise(topk=10)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == np.float32 or type(score) == float
assert -1 <= score <= 1
def test_we_coherence_measures_oov(dataset):
model_output = {'topics':
[['dsa', 'dsadgfd', '11111', '22222', 'bbbbbbbb'],
['aaaaa', 'bbb', 'cc', 'd', 'EEE']]}
metric = WECoherenceCentroid(topk=5)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == np.float32 or type(score) == float
assert -1 <= score <= 1
print(score)
metric = WECoherencePairwise(topk=10)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == np.float32 or type(score) == float
assert -1 <= score <= 1
print(score)
def test_diversity_measures(dataset, model_output):
metric = TopicDiversity(topk=10)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert 0 <= score <= 1
metric = KLDivergence()
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert 0 <= score <= 1
metric = LogOddsRatio()
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert 0 <= score <= 1
metric = WordEmbeddingsInvertedRBO(normalize=True)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert 0 <= score <= 1
def test_similarity_measures(dataset, model_output):
metric = RBO(topk=10)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert 0 <= score <= 1
metric = WordEmbeddingsRBOMatch(topk=10, normalize=True)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert 0 <= score <= 1
metric = PairwiseJaccardSimilarity(topk=10)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert 0 <= score <= 1
metric = WordEmbeddingsCentroidSimilarity(topk=10)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert 0 <= score <= 1
metric = WordEmbeddingsPairwiseSimilarity(topk=10)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert 0 <= score <= 1
def test_irbo(dataset, model_output):
metric = InvertedRBO(topk=10)
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert 0 <= score <= 1
def test_kl_b(dataset, model_output):
metric = KL_background()
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert score >= 0
def test_kl_v(dataset, model_output):
metric = KL_vacuous()
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert score >= 0
def test_kl_u(dataset, model_output):
metric = KL_uniform()
score = metric.score(model_output)
assert type(score) == np.float64 or type(score) == float
assert score >= 0 |
5,232 | get scope value | # -*- coding: utf-8 -*-
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from pipeline.core.flow.io import ObjectItemSchema, StringItemSchema
from api.collections.monitor import BKMonitorClient
from pipeline_plugins.base.utils.inject import supplier_account_for_business
from pipeline_plugins.components.collections.sites.open.monitor.base import (
MonitorBaseService,
)
from pipeline_plugins.components.utils.sites.open.choose_time_tools import choose_time
from pipeline_plugins.components.utils.sites.open.utils import (
get_module_id_list_by_name,
)
from pipeline_plugins.variables.utils import (
get_list_by_selected_names,
get_service_template_list,
get_service_template_list_by_names,
get_set_list,
)
ALL_SELECTED_STR = "all"
class MonitorAlarmShieldServiceBase(MonitorBaseService):
def inputs_format(self):
return [
self.InputItem(
name=_("屏蔽范围类型"),
key="bk_alarm_shield_info",
type="object",
schema=ObjectItemSchema(description=_("屏蔽范围类型"), property_schemas={}),
),
self.InputItem(
name=_("策略 ID"),
key="bk_alarm_shield_target",
type="string",
schema=StringItemSchema(description=_("需要执行屏蔽的指标")),
),
self.InputItem(
name=_("时间选择"),
key="bk_alarm_time_type",
type="string",
schema=StringItemSchema(description=_("开始屏蔽的时间")),
),
self.InputItem(
name=_("屏蔽开始时间"),
key="bk_alarm_shield_begin_time",
type="string",
schema=StringItemSchema(description=_("开始屏蔽的时间")),
),
self.InputItem(
name=_("屏蔽结束时间"),
key="bk_alarm_end_time",
type="string",
schema=StringItemSchema(description=_("结束屏蔽的时间")),
),
self.InputItem(
name=_("屏蔽持续时间"),
key="bk_alarm_shield_duration",
type="string",
schema=StringItemSchema(description=_("屏蔽持续的时间")),
),
]
def METHOD_NAME(self, bk_biz_id, scope_type, combine):
scope = {"business": "bk_alarm_shield_business", "IP": "bk_alarm_shield_IP", "node": "bk_alarm_shield_node"}
scope_value = combine.get(scope[scope_type])
return scope_value
def execute(self, data, parent_data):
bk_biz_id = parent_data.get_one_of_inputs("biz_cc_id")
executor = parent_data.get_one_of_inputs("executor")
client = BKMonitorClient(username=executor)
combine = data.get_one_of_inputs("bk_alarm_shield_info")
scope_type = combine.get("bk_alarm_shield_scope")
scope_value = self.METHOD_NAME(bk_biz_id, scope_type, combine)
target = data.get_one_of_inputs("bk_alarm_shield_target")
begin_time = data.get_one_of_inputs("bk_alarm_shield_begin_time")
end_time = data.get_one_of_inputs("bk_alarm_shield_end_time")
time_type = int(data.get_one_of_inputs("bk_alarm_time_type"))
shield_duration = data.get_one_of_inputs("bk_alarm_shield_duration")
try:
begin_time, end_time = choose_time(time_type, begin_time, end_time, shield_duration)
except ValueError:
return False
if parent_data.get_one_of_inputs("language"):
setattr(client, "language", parent_data.get_one_of_inputs("language"))
translation.activate(parent_data.get_one_of_inputs("language"))
supplier_account = supplier_account_for_business(bk_biz_id)
request_body = self.get_request_body(
bk_biz_id, begin_time, end_time, scope_type, scope_value, executor, supplier_account
)
if "all" not in target:
request_body["dimension_config"].update({"metric_id": target})
result_flag = self.send_request(request_body, data, client)
return result_flag
def get_dimension_config(self, shied_type, shied_value, bk_biz_id, username, bk_supplier_account):
dimension_map = {
"business": self.get_biz_dimension,
"IP": self.get_ip_dimension,
"node": self.get_node_dimension,
}
return dimension_map[shied_type](shied_value, bk_biz_id, username, bk_supplier_account)
def get_request_body(self, bk_biz_id, begin_time, end_time, shied_type, shied_value, username, bk_supplier_account):
dimension_config = self.get_dimension_config(shied_type, shied_value, bk_biz_id, username, bk_supplier_account)
request_body = self.build_request_body(
begin_time=begin_time,
bk_biz_id=bk_biz_id,
shied_type=shied_type,
dimension_config=dimension_config,
end_time=end_time,
)
return request_body
def get_ip_dimension(self, scope_value, bk_biz_id, username, bk_supplier_account):
ip_dimension = super(MonitorAlarmShieldServiceBase, self).get_ip_dimension_config(
scope_value, bk_biz_id, username
)
return ip_dimension
@staticmethod
def get_biz_dimension(scope_value, bk_biz_id, username, bk_supplier_account):
return {"scope_type": "biz"}
@staticmethod
def get_node_dimension(scope_value, bk_biz_id, username, bk_supplier_account):
bk_set_method = scope_value["bk_set_method"]
if bk_set_method == "select":
bk_set_value = scope_value["bk_set_select"]
else:
bk_set_value = scope_value["bk_set_text"]
bk_module_method = scope_value["bk_module_method"]
if bk_module_method == "select":
bk_module_value = scope_value["bk_module_select"]
else:
bk_module_value = scope_value["bk_module_text"]
# 获取全部集群列表
set_list = get_set_list(username, bk_biz_id, bk_supplier_account)
# 集群全选,筛选条件不为空则调接口获取集群id列表
if ALL_SELECTED_STR not in bk_set_value:
selected_set_names = bk_set_value
# 根据选中的集群名称获取选中的集群列表
set_list = get_list_by_selected_names(selected_set_names, set_list)
# 获取全部服务模板列表
service_template_list = get_service_template_list(username, bk_biz_id, bk_supplier_account)
# 服务模板全选,则调接口获取服务模板列表
if ALL_SELECTED_STR not in bk_module_value:
selected_service_template_names = bk_module_value
# 通过选中的或输入的集群模板获取集群模板列表
service_template_list = get_service_template_list_by_names(
selected_service_template_names, service_template_list
)
# 获取模块id列表
module_ids = get_module_id_list_by_name(bk_biz_id, username, set_list, service_template_list)
target = [{"bk_obj_id": "module", "bk_inst_id": module_id["bk_module_id"]} for module_id in module_ids]
return {"scope_type": "node", "target": target} |
5,233 | wrapper | from __future__ import annotations
import contextvars
import functools
import logging
import typing as t
from timeit import default_timer
from typing import TYPE_CHECKING
from simple_di import Provide
from simple_di import inject
from bentoml._internal.configuration.containers import BentoMLContainer
from bentoml._internal.context import component_context
from bentoml.grpc.utils import import_generated_stubs
from bentoml.grpc.utils import import_grpc
from bentoml.grpc.utils import to_http_status
from bentoml.grpc.utils import wrap_rpc_handler
START_TIME_VAR: contextvars.ContextVar[float] = contextvars.ContextVar("START_TIME_VAR")
if TYPE_CHECKING:
import grpc
from grpc import aio
from bentoml._internal.server.metrics.prometheus import PrometheusClient
from bentoml.grpc.types import AsyncHandlerMethod
from bentoml.grpc.types import BentoServicerContext
from bentoml.grpc.types import HandlerCallDetails
from bentoml.grpc.types import Request
from bentoml.grpc.types import Response
from bentoml.grpc.types import RpcMethodHandler
from bentoml.grpc.v1 import service_pb2 as pb
else:
pb, _ = import_generated_stubs()
grpc, aio = import_grpc()
logger = logging.getLogger(__name__)
class PrometheusServerInterceptor(aio.ServerInterceptor):
"""
An async interceptor for Prometheus metrics.
"""
def __init__(self, *, namespace: str = "bentoml_api_server"):
self._is_setup = False
self.namespace = namespace
@inject
def _setup(
self,
metrics_client: PrometheusClient = Provide[BentoMLContainer.metrics_client],
duration_buckets: tuple[float, ...] = Provide[
BentoMLContainer.duration_buckets
],
): # pylint: disable=attribute-defined-outside-init
self.metrics_request_duration = metrics_client.Histogram(
namespace=self.namespace,
name="request_duration_seconds",
documentation="API GRPC request duration in seconds",
labelnames=[
"api_name",
"service_name",
"service_version",
"http_response_code",
],
buckets=duration_buckets,
)
self.metrics_request_total = metrics_client.Counter(
namespace=self.namespace,
name="request_total",
documentation="Total number of GRPC requests",
labelnames=[
"api_name",
"service_name",
"service_version",
"http_response_code",
],
)
self.metrics_request_in_progress = metrics_client.Gauge(
namespace=self.namespace,
name="request_in_progress",
documentation="Total number of GRPC requests in progress now",
labelnames=["api_name", "service_name", "service_version"],
multiprocess_mode="livesum",
)
self._is_setup = True
async def intercept_service(
self,
continuation: t.Callable[[HandlerCallDetails], t.Awaitable[RpcMethodHandler]],
handler_call_details: HandlerCallDetails,
) -> RpcMethodHandler:
if not self._is_setup:
self._setup()
handler = await continuation(handler_call_details)
if handler and (handler.response_streaming or handler.request_streaming):
return handler
START_TIME_VAR.set(default_timer())
def METHOD_NAME(behaviour: AsyncHandlerMethod[Response]):
@functools.wraps(behaviour)
async def new_behaviour(
request: Request, context: BentoServicerContext
) -> Response | t.Awaitable[Response]:
if not isinstance(request, pb.Request):
return await behaviour(request, context)
api_name = request.api_name
# instrument request total count
self.metrics_request_total.labels(
api_name=api_name,
service_name=component_context.bento_name,
service_version=component_context.bento_version,
http_response_code=to_http_status(
t.cast(grpc.StatusCode, context.code())
),
).inc()
# instrument request duration
assert START_TIME_VAR.get() != 0
total_time = max(default_timer() - START_TIME_VAR.get(), 0)
self.metrics_request_duration.labels( # type: ignore (unfinished prometheus types)
api_name=api_name,
service_name=component_context.bento_name,
service_version=component_context.bento_version,
http_response_code=to_http_status(
t.cast(grpc.StatusCode, context.code())
),
).observe(
total_time
)
START_TIME_VAR.set(0)
# instrument request in progress
with self.metrics_request_in_progress.labels(
api_name=api_name,
service_version=component_context.bento_version,
service_name=component_context.bento_name,
).track_inprogress():
response = await behaviour(request, context)
return response
return new_behaviour
return wrap_rpc_handler(METHOD_NAME, handler) |
5,234 | set up | from homeassistant.components.binary_sensor import BinarySensorDeviceClass
from homeassistant.components.climate.const import ClimateEntityFeature, HVACMode
from homeassistant.components.number.const import NumberDeviceClass
from homeassistant.const import PRECISION_TENTHS, UnitOfTemperature
from ..const import BEOK_TR9B_PAYLOAD
from ..mixins.binary_sensor import MultiBinarySensorTests
from ..mixins.climate import TargetTemperatureTests
from ..mixins.lock import BasicLockTests
from ..mixins.number import MultiNumberTests
from ..mixins.select import MultiSelectTests
from ..mixins.switch import BasicSwitchTests
from .base_device_tests import TuyaDeviceTestCase
POWER_DPS = "1"
HVACMODE_DPS = "2"
ANTIFROST_DPS = "10"
TEMPERATURE_DPS = "16"
MAXTEMP_DPS = "19"
UNIT_DPS = "23"
CURRENTTEMP_DPS = "24"
MINTEMP_DPS = "26"
SCHED_DPS = "31"
VALVE_DPS = "36"
LOCK_DPS = "40"
ERROR_DPS = "45"
UNKNOWN101_DPS = "101"
UNKNOWN102_DPS = "102"
class TestBeokTR9BThermostat(
MultiBinarySensorTests,
BasicLockTests,
MultiNumberTests,
MultiSelectTests,
BasicSwitchTests,
TargetTemperatureTests,
TuyaDeviceTestCase,
):
__test__ = True
def METHOD_NAME(self):
self.setUpForConfig(
"beok_tr9b_thermostat.yaml",
BEOK_TR9B_PAYLOAD,
)
self.subject = self.entities.get("climate")
self.setUpTargetTemperature(
TEMPERATURE_DPS,
self.subject,
min=41.0,
max=99.0,
scale=10,
step=10,
)
self.setUpBasicLock(LOCK_DPS, self.entities.get("lock_child_lock"))
self.setUpMultiSelect(
[
{
"dps": SCHED_DPS,
"name": "select_schedule",
"options": {
"5_2": "Weekday+Weekend",
"6_1": "Mon-Sat+Sun",
"7": "Daily",
},
},
{
"dps": UNIT_DPS,
"name": "select_temperature_unit",
"options": {
"c": "Celsius",
"f": "Fahrenheit",
},
},
],
)
self.setUpBasicSwitch(
ANTIFROST_DPS,
self.entities.get("switch_anti_frost"),
)
self.setUpMultiBinarySensors(
[
{
"dps": ERROR_DPS,
"name": "binary_sensor_error",
"device_class": BinarySensorDeviceClass.PROBLEM,
"testdata": (1, 0),
},
{
"dps": VALVE_DPS,
"name": "binary_sensor_valve",
"device_class": BinarySensorDeviceClass.OPENING,
"testdata": ("open", "close"),
},
],
)
self.setUpMultiNumber(
[
{
"dps": MINTEMP_DPS,
"name": "number_low_temperature_limit",
"device_class": NumberDeviceClass.TEMPERATURE,
"min": 5.0,
"max": 1000.0,
"step": 1.0,
"scale": 10,
"unit": UnitOfTemperature.CELSIUS,
},
{
"dps": MAXTEMP_DPS,
"name": "number_high_temperature_limit",
"device_class": NumberDeviceClass.TEMPERATURE,
"min": 5.0,
"max": 1000.0,
"step": 1.0,
"scale": 10,
"unit": UnitOfTemperature.CELSIUS,
},
],
)
self.mark_secondary(
[
"binary_sensor_error",
"binary_sensor_valve",
"lock_child_lock",
"number_low_temperature_limit",
"number_high_temperature_limit",
"select_schedule",
"select_temperature_unit",
"switch_anti_frost",
],
)
def test_supported_features(self):
self.assertEqual(
self.subject.supported_features,
ClimateEntityFeature.TARGET_TEMPERATURE,
)
def test_temperature_unit(self):
self.dps[UNIT_DPS] = "c"
self.assertEqual(
self.subject.temperature_unit,
UnitOfTemperature.CELSIUS,
)
self.assertEqual(self.subject.target_temperature_step, 0.5)
self.dps[UNIT_DPS] = "f"
self.assertEqual(
self.subject.temperature_unit,
UnitOfTemperature.FAHRENHEIT,
)
self.assertEqual(self.subject.target_temperature_step, 1.0)
def test_precision(self):
self.assertEqual(self.subject.precision, PRECISION_TENTHS)
def test_current_temperature(self):
self.dps[CURRENTTEMP_DPS] = 685
self.assertEqual(self.subject.current_temperature, 68.5)
def test_hvac_mode(self):
self.dps[POWER_DPS] = False
self.dps[HVACMODE_DPS] = "auto"
self.assertEqual(self.subject.hvac_mode, HVACMode.OFF)
self.dps[POWER_DPS] = True
self.assertEqual(self.subject.hvac_mode, HVACMode.AUTO)
self.dps[HVACMODE_DPS] = "manual"
self.assertEqual(self.subject.hvac_mode, HVACMode.HEAT)
def test_hvac_modes(self):
self.assertCountEqual(
self.subject.hvac_modes,
[
HVACMode.HEAT,
HVACMode.AUTO,
HVACMode.OFF,
],
)
# Override - since min and max are set by attributes, the range
# allowed when setting is wider than normal. The thermostat seems
# to be configurable as at least a water heater (to 212F), as tuya
# doc says max 1000.0 (after scaling)
async def test_set_target_temperature_fails_outside_valid_range(self):
with self.assertRaisesRegex(
ValueError,
f"temperature \\(4.5\\) must be between 5.0 and 1000.0",
):
await self.subject.async_set_target_temperature(4.5)
with self.assertRaisesRegex(
ValueError,
f"temperature \\(1001\\) must be between 5.0 and 1000.0",
):
await self.subject.async_set_target_temperature(1001)
def test_extra_state_attributes(self):
self.dps[ERROR_DPS] = 8
self.dps[UNKNOWN101_DPS] = 101
self.dps[UNKNOWN102_DPS] = 102
self.assertDictEqual(
self.subject.extra_state_attributes,
{"Error Code": 8, "unknown_101": 101, "unknown_102": 102},
)
def test_icons(self):
self.dps[LOCK_DPS] = True
self.assertEqual(self.basicLock.icon, "mdi:hand-back-right-off")
self.dps[LOCK_DPS] = False
self.assertEqual(self.basicLock.icon, "mdi:hand-back-right") |
5,235 | process | from maya import cmds
import pyblish.api
import openpype.hosts.maya.api.action
from openpype.hosts.maya.api.lib import (
maintained_selection,
delete_after,
undo_chunk,
get_attribute,
set_attribute
)
from openpype.pipeline.publish import (
OptionalPyblishPluginMixin,
RepairAction,
ValidateMeshOrder,
PublishValidationError
)
class ValidateMeshArnoldAttributes(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Validate the mesh has default Arnold attributes.
It compares all Arnold attributes from a default mesh. This is to ensure
later published looks can discover non-default Arnold attributes.
"""
order = ValidateMeshOrder
hosts = ["maya"]
families = ["model"]
label = "Mesh Arnold Attributes"
actions = [
openpype.hosts.maya.api.action.SelectInvalidAction,
RepairAction
]
optional = True
# cache (will be `dict` when cached)
arnold_mesh_defaults = None
@classmethod
def get_default_attributes(cls):
if cls.arnold_mesh_defaults is not None:
# Use from cache
return cls.arnold_mesh_defaults
# Get default arnold attribute values for mesh type.
defaults = {}
with delete_after() as tmp:
transform = cmds.createNode("transform", skipSelect=True)
tmp.append(transform)
mesh = cmds.createNode("mesh", parent=transform, skipSelect=True)
arnold_attributes = cmds.listAttr(mesh,
string="ai*",
fromPlugin=True) or []
for attr in arnold_attributes:
plug = "{}.{}".format(mesh, attr)
try:
defaults[attr] = get_attribute(plug)
except PublishValidationError:
cls.log.debug("Ignoring arnold attribute: {}".format(attr))
cls.arnold_mesh_defaults = defaults # assign cache
return defaults
@classmethod
def get_invalid_attributes(cls, instance, compute=False):
invalid = []
if compute:
meshes = cmds.ls(instance, type="mesh", long=True)
if not meshes:
return []
# Compare the values against the defaults
defaults = cls.get_default_attributes()
for mesh in meshes:
for attr_name, default_value in defaults.items():
plug = "{}.{}".format(mesh, attr_name)
if get_attribute(plug) != default_value:
invalid.append(plug)
instance.data["nondefault_arnold_attributes"] = invalid
return instance.data.get("nondefault_arnold_attributes", [])
@classmethod
def get_invalid(cls, instance):
invalid_attrs = cls.get_invalid_attributes(instance, compute=False)
invalid_nodes = set(attr.split(".", 1)[0] for attr in invalid_attrs)
return sorted(invalid_nodes)
@classmethod
def repair(cls, instance):
with maintained_selection():
with undo_chunk():
defaults = cls.get_default_attributes()
attributes = cls.get_invalid_attributes(
instance, compute=False
)
for attr in attributes:
node, attr_name = attr.split(".", 1)
value = defaults[attr_name]
set_attribute(
node=node,
attribute=attr_name,
value=value
)
def METHOD_NAME(self, instance):
if not self.is_active(instance.data):
return
if not cmds.pluginInfo("mtoa", query=True, loaded=True):
# Arnold attributes only exist if plug-in is loaded
return
invalid = self.get_invalid_attributes(instance, compute=True)
if invalid:
raise PublishValidationError(
"Non-default Arnold attributes found in instance:"
" {0}".format(invalid)
) |
5,236 | decompress | from django import forms
from django.core.exceptions import ValidationError
from parties.models import Party
from people.forms.fields import BallotInputWidget
from utils.widgets import SelectWithAttrs
def party_and_description_dict_from_string(value):
"""
Given an input string in the form of "party" or "party__description"
return a dict containing party and description name, id and objects
"""
if not value:
return value
if "__" in value:
party_id, description_id = value.split("__")
else:
party_id = value
description_id = None
try:
party = Party.objects.current().get(ec_id__iexact=party_id.strip())
ret = {
"party_obj": party,
"party_id": party.ec_id,
"party_name": party.name,
"description_id": None,
"description_obj": None,
"description_text": None,
}
if description_id:
description = party.descriptions.get(pk=description_id)
ret.update(
{
"description_id": description.pk,
"description_obj": description,
"description_text": description.description,
}
)
return ret
except Party.DoesNotExist:
raise ValidationError(
f"'{value}' is not a current party " f"identifier"
)
class PartyIdentifierInput(forms.CharField):
def clean(self, value):
return party_and_description_dict_from_string(value)
class PartyChoiceField(forms.ChoiceField):
def clean(self, value):
value = super().clean(value)
return party_and_description_dict_from_string(value)
def validate(self, value):
"""
Because we don't always show all parties on the initial page load (we
leave JavaScript to add the non-current parties sometimes), we need to
ignore any input value for this field type. The MultiWidget will
raise a ValidataionError if the party isn't actually found, so there's
no problem with ignoring validation here.
"""
try:
Party.objects.current().get(ec_id__iexact=value.strip())
return True
except Party.DoesNotExist:
return False
class PartySelectField(forms.MultiWidget):
def __init__(self, choices, attrs=None):
widgets = [
SelectWithAttrs(
choices=choices,
attrs={"disabled": True, "class": "party_widget_select"},
),
forms.TextInput(attrs={"class": "party_widget_input"}),
]
super().__init__(widgets, attrs)
def METHOD_NAME(self, value):
if value:
return value
return ["", ""]
class PreviousPartyAffiliationsField(forms.MultipleChoiceField):
def __init__(self, required=False, choices=None, *args, **kwargs):
self.membership = kwargs.pop("membership", None)
choices = choices or self.get_previous_party_affiliations_choices()
super().__init__(required=required, choices=choices, *args, **kwargs)
def widget_attrs(self, widget):
"""
Sets the class used to initialise select2
"""
return {"class": "previous-party-affiliations"}
def get_previous_party_affiliations_choices(self):
"""
Return a party choices made up of parties that have been active any time
within a year of the election date. Only applicable to welsh run ballots
"""
if self.membership is None:
return []
ballot = self.membership.ballot
if not ballot.is_welsh_run:
return []
parties = Party.objects.register("GB").active_in_last_year(
date=ballot.election.election_date
)
return parties.values_list("ec_id", "name")
class PartyIdentifierField(forms.MultiValueField):
def compress(self, data_list):
if data_list:
return self.to_python([v for v in data_list if v][-1])
return None
def __init__(self, *args, choices=None, **kwargs):
if not choices:
choices = [("", "")]
kwargs["require_all_fields"] = False
kwargs["label"] = kwargs.get("label", "Party")
fields = (
PartyChoiceField(required=False, disabled=True),
PartyIdentifierInput(required=False),
)
super().__init__(fields, *args, **kwargs)
self.widget = PartySelectField(choices=choices)
self.widget.widgets[0].choices = choices
self.fields[0].choices = choices
def to_python(self, value):
if not value:
return value
return value
class PopulatePartiesMixin:
_cached_choices = None
def __init__(self, **kwargs):
party_choices = kwargs.pop("party_choices", None)
if party_choices:
self._cached_choices = party_choices
super().__init__(**kwargs)
self.populate_parties()
def populate_parties(self):
register = None
for field_name, field_class in self.fields.items():
if not isinstance(field_class.widget, BallotInputWidget):
continue
if field_name in self.initial:
ballot = field_class.to_python(self.initial[field_name])
register = ballot.post.party_set.slug.upper()
# Populate the choices
for field_name, field_class in self.fields.items():
if not isinstance(field_class, PartyIdentifierField):
continue
choices_kwargs = {"include_description_ids": True}
if field_name in self.initial:
initial_for_field = self.initial[field_name]
if not isinstance(initial_for_field, (list, tuple)):
raise ValueError("list or tuple required for initial")
if len(initial_for_field) != 2:
continue
extra_party_id = initial_for_field[1]
if extra_party_id:
choices_kwargs["extra_party_ids"] = ([extra_party_id],)
# Set the initial value of the select
self.initial[field_name][0] = extra_party_id
if not self._cached_choices:
base_qs = Party.objects.all().current()
if register:
base_qs = base_qs.register(register)
self._cached_choices = base_qs.party_choices(**choices_kwargs)
choices = self._cached_choices
self.fields[field_name] = PartyIdentifierField(choices=choices)
self.fields[field_name].fields[0].choices = choices |
5,237 | mapping file provider | # Copyright 2023 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definition of j2objc_library rule.
"""
load(":common/cc/cc_helper.bzl", "cc_helper")
load(":common/objc/attrs.bzl", "common_attrs")
load(":common/objc/transitions.bzl", "apple_crosstool_transition")
load(":common/cc/semantics.bzl", "semantics")
load(":common/objc/providers.bzl", "J2ObjcEntryClassInfo", "J2ObjcMappingFileInfo")
load(":common/cc/cc_info.bzl", "CcInfo")
load(":common/cc/cc_common.bzl", "cc_common")
load(":common/objc/compilation_support.bzl", "compilation_support")
load(":common/objc/j2objc_aspect.bzl", "j2objc_aspect")
_MIGRATION_TAG = "__J2OBJC_LIBRARY_MIGRATION_DO_NOT_USE_WILL_BREAK__"
def _jre_deps_aspect_impl(_, ctx):
if "j2objc_jre_lib" not in ctx.rule.attr.tags:
fail("in jre_deps attribute of j2objc_library rule: objc_library rule '%s' is misplaced here (Only J2ObjC JRE libraries are allowed)" %
str(ctx.label).removeprefix("@"))
return []
jre_deps_aspect = aspect(
implementation = _jre_deps_aspect_impl,
)
def _check_entry_classes(ctx):
entry_classes = ctx.attr.entry_classes
remove_dead_code = ctx.fragments.j2objc.remove_dead_code()
if remove_dead_code and not entry_classes:
fail("Entry classes must be specified when flag --compilation_mode=opt is on in order to perform J2ObjC dead code stripping.")
def _entry_class_provider(entry_classes, deps):
transitive_entry_classes = [dep[J2ObjcEntryClassInfo].entry_classes for dep in deps if J2ObjcEntryClassInfo in dep]
return J2ObjcEntryClassInfo(entry_classes = depset(entry_classes, transitive = transitive_entry_classes))
def METHOD_NAME(deps):
infos = [dep[J2ObjcMappingFileInfo] for dep in deps if J2ObjcMappingFileInfo in dep]
transitive_header_mapping_files = [info.header_mapping_files for info in infos]
transitive_class_mapping_files = [info.class_mapping_files for info in infos]
transitive_dependency_mapping_files = [info.dependency_mapping_files for info in infos]
transitive_archive_source_mapping_files = [info.archive_source_mapping_files for info in infos]
return J2ObjcMappingFileInfo(
header_mapping_files = depset([], transitive = transitive_header_mapping_files),
class_mapping_files = depset([], transitive = transitive_class_mapping_files),
dependency_mapping_files = depset([], transitive = transitive_dependency_mapping_files),
archive_source_mapping_files = depset([], transitive = transitive_archive_source_mapping_files),
)
def j2objc_library_lockdown(ctx):
if not ctx.fragments.j2objc.j2objc_library_migration():
return
if _MIGRATION_TAG not in ctx.attr.tags:
fail("j2objc_library is locked. Please do not use this rule since it will be deleted in the future.")
def _j2objc_library_impl(ctx):
j2objc_library_lockdown(ctx)
_check_entry_classes(ctx)
common_variables = compilation_support.build_common_variables(
ctx = ctx,
toolchain = None,
deps = ctx.attr.deps + ctx.attr.jre_deps,
empty_compilation_artifacts = True,
direct_cc_compilation_contexts = [dep[CcInfo].compilation_context for dep in ctx.attr.deps if CcInfo in dep],
)
return [
_entry_class_provider(ctx.attr.entry_classes, ctx.attr.deps),
METHOD_NAME(ctx.attr.deps),
common_variables.objc_provider,
CcInfo(
compilation_context = common_variables.objc_compilation_context.create_cc_compilation_context(),
linking_context = cc_common.merge_linking_contexts(linking_contexts = common_variables.objc_linking_context.cc_linking_contexts),
),
]
J2OBJC_ATTRS = {
"deps": attr.label_list(
allow_rules = ["j2objc_library", "java_library", "java_import", "java_proto_library"],
aspects = [j2objc_aspect],
),
"entry_classes": attr.string_list(),
"jre_deps": attr.label_list(
allow_rules = ["objc_library"],
aspects = [jre_deps_aspect],
),
}
j2objc_library = rule(
_j2objc_library_impl,
attrs = common_attrs.union(
J2OBJC_ATTRS,
common_attrs.CC_TOOLCHAIN_RULE,
),
cfg = apple_crosstool_transition,
fragments = ["apple", "cpp", "j2objc", "objc"] + semantics.additional_fragments(),
toolchains = cc_helper.use_cpp_toolchain(),
provides = [CcInfo, J2ObjcEntryClassInfo, J2ObjcMappingFileInfo],
) |
5,238 | get async driver | # Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import sys
from functools import wraps
import pytest
import pytest_asyncio
from neo4j import (
AsyncGraphDatabase,
GraphDatabase,
)
from neo4j.debug import watch
from . import env
# from neo4j.debug import watch
#
# watch("neo4j")
@pytest.fixture(scope="session")
def uri():
return env.NEO4J_SERVER_URI
@pytest.fixture(scope="session")
def bolt_uri(uri):
if env.NEO4J_SCHEME != "bolt":
pytest.skip("Test requires bolt scheme")
return uri
@pytest.fixture(scope="session")
def _forced_bolt_uri():
return f"bolt://{env.NEO4J_HOST}:{env.NEO4J_PORT}"
@pytest.fixture(scope="session")
def neo4j_uri():
if env.NEO4J_SCHEME != "neo4j":
pytest.skip("Test requires neo4j scheme")
return uri
@pytest.fixture(scope="session")
def _forced_neo4j_uri():
return f"neo4j://{env.NEO4J_HOST}:{env.NEO4J_PORT}"
@pytest.fixture(scope="session")
def auth():
return env.NEO4J_USER, env.NEO4J_PASS
@pytest.fixture
def driver(uri, auth):
with GraphDatabase.driver(uri, auth=auth) as driver:
yield driver
@pytest.fixture
def bolt_driver(bolt_uri, auth):
with GraphDatabase.driver(bolt_uri, auth=auth) as driver:
yield driver
@pytest.fixture
def neo4j_driver(neo4j_uri, auth):
with GraphDatabase.driver(neo4j_uri, auth=auth) as driver:
yield driver
@wraps(AsyncGraphDatabase.driver)
def METHOD_NAME(*args, **kwargs):
return AsyncGraphDatabase.driver(*args, **kwargs)
@pytest_asyncio.fixture
async def async_driver(uri, auth):
async with METHOD_NAME(uri, auth=auth) as driver:
yield driver
@pytest_asyncio.fixture
async def async_bolt_driver(bolt_uri, auth):
async with METHOD_NAME(bolt_uri, auth=auth) as driver:
yield driver
@pytest_asyncio.fixture
async def async_neo4j_driver(neo4j_uri, auth):
async with METHOD_NAME(neo4j_uri, auth=auth) as driver:
yield driver
@pytest.fixture
def _forced_bolt_driver(_forced_bolt_uri):
with GraphDatabase.driver(_forced_bolt_uri, auth=auth) as driver:
yield driver
@pytest.fixture
def _forced_neo4j_driver(_forced_neo4j_uri):
with GraphDatabase.driver(_forced_neo4j_uri, auth=auth) as driver:
yield driver
@pytest.fixture(scope="session")
def server_info(_forced_bolt_driver):
return _forced_bolt_driver.get_server_info()
@pytest.fixture(scope="session")
def bolt_protocol_version(server_info):
return server_info.protocol_version
def mark_requires_min_bolt_version(version="3.5"):
return pytest.mark.skipif(
env.NEO4J_VERSION < version,
reason=f"requires server version '{version}' or higher, "
f"found '{env.NEO4J_VERSION}'"
)
def mark_requires_edition(edition):
return pytest.mark.skipif(
env.NEO4J_EDITION != edition,
reason=f"requires server edition '{edition}', "
f"found '{env.NEO4J_EDITION}'"
)
@pytest.fixture
def session(driver):
with driver.session() as session:
yield session
@pytest.fixture
def bolt_session(bolt_driver):
with bolt_driver.session() as session:
yield session
@pytest.fixture
def neo4j_session(neo4j_driver):
with neo4j_driver.session() as session:
yield session
# async support for pytest-benchmark
# https://github.com/ionelmc/pytest-benchmark/issues/66
@pytest_asyncio.fixture
async def aio_benchmark(benchmark, event_loop):
def _wrapper(func, *args, **kwargs):
if asyncio.iscoroutinefunction(func):
@benchmark
def _():
return event_loop.run_until_complete(func(*args, **kwargs))
else:
benchmark(func, *args, **kwargs)
return _wrapper
@pytest.fixture
def watcher():
with watch("neo4j", out=sys.stdout, colour=True):
yield |
5,239 | n | #
# GPT - Grid Python Toolkit
# Copyright (C) 2020 Christoph Lehner (christoph.lehner@ur.de, https://github.com/lehner/gpt)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import gpt
# General block matrix with domain D and its complement C
#
# ( DD DC ) ( 1 DC CC^-1 ) ( Mpc 0 ) ( DD 0 )
# M = ( CD CC ) = ( 0 1 ) ( 0 1 ) ( CD CC )
#
# Mpc = 1 - DC CC^-1 CD DD^-1 (Schur complement two)
#
# Then
#
# det(M) = det(DD) det(CC) det(Mpc)
#
# and
#
# ( DD 0 )^-1 ( Mpc 0 )^-1 ( 1 DC CC^-1 )^-1
# M^-1 = ( CD CC ) ( 0 1 ) ( 0 1 )
#
# ( DD^-1 0 ) ( Mpc^-1 0 ) ( 1 - DC CC^-1 )
# = ( -CC^-1 CD DD^-1 CC^-1 ) ( 0 1 ) ( 0 1 )
#
# M^-1 = L Mpc^-1 R + S
#
# R = ( 1 - DC CC^-1 ) ; R^dag = ( 1 - DC CC^-1 )^dag
#
# ( DD^-1 )
# L = ( -CC^-1 CD DD^-1 )
#
# ( 0 0 )
# S = ( 0 CC^-1 )
#
# A2A:
#
# M^-1 = L |n><n| R + S = v w^dag + S ; -> v = L |n>, w = R^dag |n>
#
class schur_complement_two:
def __init__(self, op, domain_decomposition):
dd_op = domain_decomposition(op)
DD = dd_op.DD
CC = dd_op.CC
CD = dd_op.CD
DC = dd_op.DC
D_domain = dd_op.D_domain
C_domain = dd_op.C_domain
op_vector_space = op.vector_space[0]
D_vector_space = DD.vector_space[0]
C_vector_space = CC.vector_space[0]
tmp_d = [D_vector_space.lattice() for i in range(2)]
tmp_c = [C_vector_space.lattice() for i in range(2)]
def METHOD_NAME(o_d, i_d):
DD.inv_mat(tmp_d[0], i_d)
CD.mat(tmp_c[0], tmp_d[0])
CC.inv_mat(tmp_c[1], tmp_c[0])
DC.mat(o_d, tmp_c[1])
o_d @= i_d - o_d
def _N_dag(o_d, i_d):
DC.adj_mat(tmp_c[0], i_d)
CC.adj_inv_mat(tmp_c[1], tmp_c[0])
CD.adj_mat(tmp_d[0], tmp_c[1])
DD.adj_inv_mat(o_d, tmp_d[0])
o_d @= i_d - o_d
def _L(o, i_d):
DD.inv_mat(tmp_d[0], i_d)
D_domain.promote(o, tmp_d[0])
CD.mat(tmp_c[0], tmp_d[0])
CC.inv_mat(tmp_c[1], tmp_c[0])
tmp_c[1] @= -tmp_c[1]
C_domain.promote(o, tmp_c[1])
def _L_pseudo_inverse(o_d, i):
D_domain.project(tmp_d[0], i)
DD.mat(o_d, tmp_d[0])
def _S(o, i):
C_domain.project(tmp_c[0], i)
CC.inv_mat(tmp_c[1], tmp_c[0])
C_domain.promote(o, tmp_c[1])
tmp_d[0][:] = 0
D_domain.promote(o, tmp_d[0])
self.L = gpt.matrix_operator(
mat=_L,
inv_mat=_L_pseudo_inverse,
vector_space=(op_vector_space, D_vector_space),
)
def _R(o_d, i):
C_domain.project(tmp_c[0], i)
D_domain.project(tmp_d[0], i)
CC.inv_mat(tmp_c[1], tmp_c[0])
DC.mat(o_d, tmp_c[1])
o_d @= tmp_d[0] - o_d
def _R_dag(o, i_d):
D_domain.promote(o, i_d)
DC.adj_mat(tmp_c[0], i_d)
tmp_c[0] @= -tmp_c[0]
CC.adj_inv_mat(tmp_c[1], tmp_c[0])
C_domain.promote(o, tmp_c[1])
self.R = gpt.matrix_operator(
mat=_R, adj_mat=_R_dag, vector_space=(D_vector_space, op_vector_space)
)
self.S = gpt.matrix_operator(mat=_S, vector_space=(op_vector_space, op_vector_space))
self.Mpc = gpt.matrix_operator(
mat=METHOD_NAME, adj_mat=_N_dag, vector_space=(D_vector_space, D_vector_space)
).inherit(op, lambda nop: schur_complement_two(nop, domain_decomposition).Mpc) |
5,240 | test databricks notebook run context in context | from unittest import mock
from mlflow.entities import SourceType
from mlflow.tracking.context.databricks_notebook_context import DatabricksNotebookRunContext
from mlflow.utils.mlflow_tags import (
MLFLOW_DATABRICKS_NOTEBOOK_ID,
MLFLOW_DATABRICKS_NOTEBOOK_PATH,
MLFLOW_DATABRICKS_WEBAPP_URL,
MLFLOW_DATABRICKS_WORKSPACE_ID,
MLFLOW_DATABRICKS_WORKSPACE_URL,
MLFLOW_SOURCE_NAME,
MLFLOW_SOURCE_TYPE,
)
from tests.helper_functions import multi_context
def METHOD_NAME():
with mock.patch("mlflow.utils.databricks_utils.is_in_databricks_notebook") as in_notebook_mock:
assert DatabricksNotebookRunContext().in_context() == in_notebook_mock.return_value
def test_databricks_notebook_run_context_tags():
patch_notebook_id = mock.patch("mlflow.utils.databricks_utils.get_notebook_id")
patch_notebook_path = mock.patch("mlflow.utils.databricks_utils.get_notebook_path")
patch_webapp_url = mock.patch("mlflow.utils.databricks_utils.get_webapp_url")
patch_workspace_url = mock.patch(
"mlflow.utils.databricks_utils.get_workspace_url",
return_value="https://dev.databricks.com",
)
patch_workspace_url_none = mock.patch(
"mlflow.utils.databricks_utils.get_workspace_url", return_value=None
)
patch_workspace_info = mock.patch(
"mlflow.utils.databricks_utils.get_workspace_info_from_dbutils",
return_value=("https://databricks.com", "123456"),
)
with multi_context(
patch_notebook_id,
patch_notebook_path,
patch_webapp_url,
patch_workspace_url,
patch_workspace_info,
) as (
notebook_id_mock,
notebook_path_mock,
webapp_url_mock,
workspace_url_mock,
workspace_info_mock,
):
assert DatabricksNotebookRunContext().tags() == {
MLFLOW_SOURCE_NAME: notebook_path_mock.return_value,
MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.NOTEBOOK),
MLFLOW_DATABRICKS_NOTEBOOK_ID: notebook_id_mock.return_value,
MLFLOW_DATABRICKS_NOTEBOOK_PATH: notebook_path_mock.return_value,
MLFLOW_DATABRICKS_WEBAPP_URL: webapp_url_mock.return_value,
MLFLOW_DATABRICKS_WORKSPACE_URL: workspace_url_mock.return_value,
MLFLOW_DATABRICKS_WORKSPACE_ID: workspace_info_mock.return_value[1],
}
with multi_context(
patch_notebook_id,
patch_notebook_path,
patch_webapp_url,
patch_workspace_url_none,
patch_workspace_info,
) as (
notebook_id_mock,
notebook_path_mock,
webapp_url_mock,
workspace_url_mock,
workspace_info_mock,
):
assert DatabricksNotebookRunContext().tags() == {
MLFLOW_SOURCE_NAME: notebook_path_mock.return_value,
MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.NOTEBOOK),
MLFLOW_DATABRICKS_NOTEBOOK_ID: notebook_id_mock.return_value,
MLFLOW_DATABRICKS_NOTEBOOK_PATH: notebook_path_mock.return_value,
MLFLOW_DATABRICKS_WEBAPP_URL: webapp_url_mock.return_value,
MLFLOW_DATABRICKS_WORKSPACE_URL: workspace_info_mock.return_value[0], # fallback value
MLFLOW_DATABRICKS_WORKSPACE_ID: workspace_info_mock.return_value[1],
}
def test_databricks_notebook_run_context_tags_nones():
patch_notebook_id = mock.patch(
"mlflow.utils.databricks_utils.get_notebook_id", return_value=None
)
patch_notebook_path = mock.patch(
"mlflow.utils.databricks_utils.get_notebook_path", return_value=None
)
patch_webapp_url = mock.patch("mlflow.utils.databricks_utils.get_webapp_url", return_value=None)
patch_workspace_info = mock.patch(
"mlflow.utils.databricks_utils.get_workspace_info_from_dbutils", return_value=(None, None)
)
with patch_notebook_id, patch_notebook_path, patch_webapp_url, patch_workspace_info:
assert DatabricksNotebookRunContext().tags() == {
MLFLOW_SOURCE_NAME: None,
MLFLOW_SOURCE_TYPE: SourceType.to_string(SourceType.NOTEBOOK),
} |
5,241 | test phase started draft no email | from datetime import timedelta
import pytest
from dateutil.parser import parse
from django.contrib.contenttypes.models import ContentType
from django.core import mail
from django.core.management import call_command
from freezegun import freeze_time
from adhocracy4.actions.models import Action
from adhocracy4.actions.verbs import Verbs
from adhocracy4.phases.models import Phase
from adhocracy4.test.helpers import freeze_phase
from adhocracy4.test.helpers import setup_phase
from meinberlin.apps.budgeting import phases
from meinberlin.apps.offlineevents.models import OfflineEvent
from meinberlin.config import settings
START = Verbs.START.value
EVENT_STARTING_HOURS = 0
if hasattr(settings, "ACTIONS_OFFLINE_EVENT_STARTING_HOURS"):
EVENT_STARTING_HOURS = settings.ACTIONS_OFFLINE_EVENT_STARTING_HOURS
else:
EVENT_STARTING_HOURS = 72
@pytest.mark.django_db
def test_event_soon_email(offline_event_factory):
EVENT_DATE = parse("2020-01-05 17:00:00 UTC")
ACTION_DATE = EVENT_DATE - timedelta(hours=EVENT_STARTING_HOURS)
CURRENT_DATE = ACTION_DATE + timedelta(minutes=30)
content_type = ContentType.objects.get_for_model(OfflineEvent)
offline_event_factory(
date=EVENT_DATE,
)
action_count = Action.objects.filter(
verb=START, obj_content_type=content_type
).count()
assert action_count == 0
with freeze_time(CURRENT_DATE):
call_command("create_offlineevent_system_actions")
# NotifyFollowersOnUpcomingEventEmail
assert len(mail.outbox) == 1
assert mail.outbox[0].subject.startswith("Einladung zu einer Veranstaltung")
@pytest.mark.django_db
def test_event_soon_draft_no_email(offline_event_factory):
EVENT_DATE = parse("2020-01-05 17:00:00 UTC")
ACTION_DATE = EVENT_DATE - timedelta(hours=EVENT_STARTING_HOURS)
CURRENT_DATE = ACTION_DATE + timedelta(minutes=30)
content_type = ContentType.objects.get_for_model(OfflineEvent)
event = offline_event_factory(
date=EVENT_DATE,
)
project = event.project
project.is_draft = True
project.save()
project.refresh_from_db()
action_count = Action.objects.filter(
verb=START, obj_content_type=content_type
).count()
assert action_count == 0
with freeze_time(CURRENT_DATE):
call_command("create_offlineevent_system_actions")
action_count = Action.objects.filter(
verb=START, obj_content_type=content_type
).count()
assert action_count == 0
# NotifyFollowersOnUpcomingEventEmail
assert len(mail.outbox) == 0
@pytest.mark.django_db
def test_phase_started_email(apiclient, phase_factory, proposal_factory):
phase, module, project, proposal = setup_phase(
phase_factory, proposal_factory, phases.VotingPhase
)
phase.end_date += timedelta(hours=48)
phase.save()
phase.refresh_from_db()
content_type = ContentType.objects.get_for_model(Phase)
action_count = Action.objects.filter(
verb=START, obj_content_type=content_type
).count()
assert action_count == 0
with freeze_phase(phase):
call_command("create_system_actions")
action_count = Action.objects.filter(
verb=START, obj_content_type=content_type
).count()
assert action_count == 1
assert len(mail.outbox) == 1
assert mail.outbox[0].subject.startswith("Los geht's:")
@pytest.mark.django_db
def METHOD_NAME(apiclient, phase_factory, proposal_factory):
phase, module, project, proposal = setup_phase(
phase_factory, proposal_factory, phases.VotingPhase
)
project = phase.module.project
project.is_draft = True
project.save()
project.refresh_from_db()
phase.end_date += timedelta(hours=48)
phase.save()
phase.refresh_from_db()
content_type = ContentType.objects.get_for_model(Phase)
action_count = Action.objects.filter(
verb=START, obj_content_type=content_type
).count()
assert action_count == 0
with freeze_phase(phase):
call_command("create_system_actions")
action_count = Action.objects.filter(
verb=START, obj_content_type=content_type
).count()
assert action_count == 0
assert len(mail.outbox) == 0 |
5,242 | exit code | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""AiiDA specific implementation of plumpy's ProcessSpec."""
from typing import Optional
import plumpy.process_spec
from aiida.orm import Dict
from .METHOD_NAME import ExitCode, ExitCodesNamespace
from .ports import CalcJobOutputPort, InputPort, PortNamespace
__all__ = ('ProcessSpec', 'CalcJobProcessSpec')
class ProcessSpec(plumpy.process_spec.ProcessSpec):
"""Default process spec for process classes defined in `aiida-core`.
This sub class defines custom classes for input ports and port namespaces. It also adds support for the definition
of exit codes and retrieving them subsequently.
"""
METADATA_KEY: str = 'metadata'
METADATA_OPTIONS_KEY: str = 'options'
INPUT_PORT_TYPE = InputPort
PORT_NAMESPACE_TYPE = PortNamespace
def __init__(self) -> None:
super().__init__()
self._exit_codes = ExitCodesNamespace()
@property
def metadata_key(self) -> str:
return self.METADATA_KEY
@property
def options_key(self) -> str:
return self.METADATA_OPTIONS_KEY
@property
def exit_codes(self) -> ExitCodesNamespace:
"""
Return the namespace of exit codes defined for this ProcessSpec
:returns: ExitCodesNamespace of ExitCode named tuples
"""
return self._exit_codes
def METHOD_NAME(self, status: int, label: str, message: str, invalidates_cache: bool = False) -> None:
"""
Add an exit code to the ProcessSpec
:param status: the exit status integer
:param label: a label by which the exit code can be addressed
:param message: a more detailed description of the exit code
:param invalidates_cache: when set to `True`, a process exiting
with this exit code will not be considered for caching
"""
if not isinstance(status, int):
raise TypeError(f'status should be of integer type and not of {type(status)}')
if status < 0:
raise ValueError(f'status should be a positive integer, received {type(status)}')
if not isinstance(label, str):
raise TypeError(f'label should be of str type and not of {type(label)}')
if not isinstance(message, str):
raise TypeError(f'message should be of str type and not of {type(message)}')
if not isinstance(invalidates_cache, bool):
raise TypeError(f'invalidates_cache should be of type bool and not of {type(invalidates_cache)}')
self._exit_codes[label] = ExitCode(status, message, invalidates_cache=invalidates_cache)
# override return type to aiida's PortNamespace subclass
@property
def ports(self) -> PortNamespace:
return super().ports # type: ignore[return-value]
@property
def inputs(self) -> PortNamespace:
return super().inputs # type: ignore[return-value]
@property
def outputs(self) -> PortNamespace:
return super().outputs # type: ignore[return-value]
class CalcJobProcessSpec(ProcessSpec):
"""Process spec intended for the `CalcJob` process class."""
OUTPUT_PORT_TYPE = CalcJobOutputPort
def __init__(self) -> None:
super().__init__()
self._default_output_node: Optional[str] = None
@property
def default_output_node(self) -> Optional[str]:
return self._default_output_node
@default_output_node.setter
def default_output_node(self, port_name: str) -> None:
if port_name not in self.outputs:
raise ValueError(f'{port_name} is not a registered output port')
valid_type_port = self.outputs[port_name].valid_type
valid_type_required = Dict
if valid_type_port is not valid_type_required:
raise ValueError(
f'the valid type of a default output has to be a {valid_type_required} but it is {valid_type_port}'
)
self._default_output_node = port_name |
5,243 | update ok | #!/usr/bin/python
# -*- coding: latin-1 -*-
# Copyright 2016 Telefonica Investigacion y Desarrollo, S.A.U
#
# This file is part of Orion Context Broker.
#
# Orion Context Broker is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Orion Context Broker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Orion Context Broker. If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by this license please contact with
# iot_support at tid dot es
# This script is inspired by old attrsvector2attrsobject.py
__author__ = 'fermin'
from pymongo import MongoClient
import json
import sys
from time import sleep
CONDITIONS = "conditions"
def METHOD_NAME(doc, n_conditions):
"""
Check that csub document was updated correctly at DB.
:param doc: the doc to check
:param n_conditions: the expected number of conditions in the document
:return: True if the check is ok, False otherwise
"""
if (not CONDITIONS in doc):
print "debug1"
return False
ck_conditions = len(doc[CONDITIONS])
if (n_conditions != ck_conditions):
print "debug 2: %d %d" % (n_conditions, ck_conditions)
return False
return True
if len(sys.argv) != 2:
print "invalid number of arguments, please check https://fiware-orion.readthedocs.io/en/master/admin/upgrading_crossing_1-3-0/index.html"
sys.exit()
DB = sys.argv[1]
COL = 'csubs'
# Warn user
print "WARNING!!!! This script modifies your '%s' database. It is STRONGLY RECOMMENDED that you" % DB
print "do a backup of your database before using it as described in https://fiware-orion.readthedocs.io/en/master/admin/database_admin/index.html#backup. Use this script at your own risk."
print "If you are sure you want to continue type 'yes' and press Enter"
confirm = raw_input()
if (confirm != 'yes'):
sys.exit()
client = MongoClient('localhost', 27017)
db = client[DB]
need_fix = False
skipped_conditions_not_found = 0
skipped_conditions_not_vector = 0
skipped_conditions_empty = 0
skipped_conditions_without_object = 0
skipped_conditions_without_value = 0
skipped_conditions_without_type = 0
skipped_value_not_vector = 0
skipped_invalid_type = 0
changed = 0
error = 0
processed = 0
total = db[COL].count()
print "- processing csubs collection (%d csubs) merging all condValues into a single conditions vector... " % total
# The sort() is a way of ensuring that a modified document doesn't enters again at the end of the cursor (we have
# observed that this may happen with large collections, e.g ~50,000 entities). In addition, we have to use
# batch_size so the cursor doesn't expires at server (see http://stackoverflow.com/questions/10298354/mongodb-cursor-id-not-valid-error).
# The used batch_size value is an heuristic
for doc in db[COL].find().sort([('_id', 1)]).batch_size(100):
processed += 1
sys.stdout.write('- processing csub: %d/%d \r' % (processed, total) )
sys.stdout.flush()
n_sum_md_names = 0
n_sum_mds = 0
n_attrs = 0
# Does conditions field exist?
if not CONDITIONS in doc:
print '- %d: csub without conditions field: %s. Skipping' % (processed, doc['_id'])
skipped_conditions_not_found += 1
continue # csubs loop
# Is it a vector?
if not isinstance(doc[CONDITIONS], list):
print '- %d: csub conditions field is not a vector: %s. Skipping' % (processed, doc['_id'])
skipped_conditions_not_vector += 1
continue # csubs loop
# Is empty?
if len(doc[CONDITIONS]) == 0:
print '- %d: csub conditions is empty: %s. Skipping' % (processed, doc['_id'])
skipped_conditions_empty += 1
continue # csubs loop
to_skip = False
new_conditions = [ ]
for cond in doc[CONDITIONS]:
# Is it a object?
if not isinstance(cond, dict):
print '- %d: csub has condition that is not an object: %s. Skipping' % (processed, doc['_id'])
skipped_conditions_without_object += 1
to_skip = True
break # conds loop
# Has type?
if not 'type' in cond:
print '- %d: csub has condition without type: %s. Skipping' % (processed, doc['_id'])
skipped_conditions_without_type += 1
to_skip = True
break # conds loop
# Is ONCHANGE type?
if cond['type'] != "ONCHANGE":
print '- %d: csub has invalid type "%s": %s. Skipping' % (processed, cond['type'], doc['_id'])
skipped_invalid_type += 1
to_skip = True
break # conds loop
# Has value?
if not 'value' in cond:
print '- %d: csub has condition without value: %s. Skipping' % (processed, doc['_id'])
skipped_conditions_without_value += 1
to_skip = True
break # conds loop
# Is value a vector?
if not isinstance(cond['value'], list):
print '- %d: csub has condition which value is not a vector: %s. Skipping' % (processed, doc['_id'])
skipped_value_not_vector += 1
to_skip = True
break # conds loop
for value in cond['value']:
new_conditions.append(value)
# Need to skip to next csub?
if to_skip:
continue # csub loop
# Update document with the new condtions field
db[COL].update({'_id': doc['_id']}, {'$set': {CONDITIONS: new_conditions}})
# Check update was ok (this is not an exhaustive checking that is better than nothing :)
check_doc = db[COL].find_one(doc['_id'])
if METHOD_NAME(check_doc, len(new_conditions)):
changed += 1
else:
print "- %d: ERROR: document <%s> change attempt failed!" % (processed, json.dumps(check_doc['_id']))
need_fix = True
error += 1
skipped = skipped_conditions_not_found + skipped_conditions_not_vector + skipped_conditions_empty + skipped_conditions_without_object + skipped_conditions_without_value + skipped_conditions_without_type + skipped_invalid_type + skipped_value_not_vector
print '- processing entity: %d/%d' % (processed, total)
print '- documents processed: %d' % processed
print ' * changed: %d' % changed
print ' * skipped: %d' % skipped
print ' - conditions not found %d' % skipped_conditions_not_found
print ' - conditions not a vector %d' % skipped_conditions_not_vector
print ' - empty conditions: %d' % skipped_conditions_empty
print ' - condition not object %d' % skipped_conditions_without_object
print ' - condition w/o value %d' % skipped_conditions_without_value
print ' - condition w/o type %d' % skipped_conditions_without_type
print ' - invalid type %d' % skipped_invalid_type
print ' - value not a vector %d' % skipped_value_not_vector
print ' * error: %d' % error
if skipped > 0:
print "------------------------------------------------------"
print "WARNING: some csub were skipped. Please check the documentation at https://fiware-orion.readthedocs.io/en/master/admin/upgrading_crossing_1-3-0/index.html" |
5,244 | missing callback | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Functions to delete entities from the database, preserving provenance integrity."""
import logging
from typing import Callable, Iterable, Set, Tuple, Union
from aiida.common.log import AIIDA_LOGGER
from aiida.manage import get_manager
from aiida.orm import Group, Node, QueryBuilder
from aiida.tools.graph.graph_traversers import get_nodes_delete
__all__ = ('DELETE_LOGGER', 'delete_nodes', 'delete_group_nodes')
DELETE_LOGGER = AIIDA_LOGGER.getChild('delete')
def delete_nodes(
pks: Iterable[int],
dry_run: Union[bool, Callable[[Set[int]], bool]] = True,
backend=None,
**traversal_rules: bool
) -> Tuple[Set[int], bool]:
"""Delete nodes given a list of "starting" PKs.
This command will delete not only the specified nodes, but also the ones that are
linked to these and should be also deleted in order to keep a consistent provenance
according to the rules explained in the Topics - Provenance section of the documentation.
In summary:
1. If a DATA node is deleted, any process nodes linked to it will also be deleted.
2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as
well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes
(outputs) will be deleted by default but this can be disabled.
3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as
well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by
default, but deletion of either of both kind of connected nodes can be enabled.
These rules are 'recursive', so if a CALC node is deleted, then its output DATA
nodes will be deleted as well, and then any CALC node that may have those as
inputs, and so on.
:param pks: a list of starting PKs of the nodes to delete
(the full set will be based on the traversal rules)
:param dry_run:
If True, return the pks to delete without deleting anything.
If False, delete the pks without confirmation
If callable, a function that return True/False, based on the pks, e.g. ``dry_run=lambda pks: True``
:param traversal_rules: graph traversal rules.
See :const:`aiida.common.links.GraphTraversalRules` for what rule names
are toggleable and what the defaults are.
:returns: (pks to delete, whether they were deleted)
"""
backend = backend or get_manager().get_profile_storage()
# pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements
def METHOD_NAME(_pks: Iterable[int]):
for _pk in _pks:
DELETE_LOGGER.warning(f'warning: node with pk<{_pk}> does not exist, skipping')
pks_set_to_delete = get_nodes_delete(
pks, get_links=False, missing_callback=METHOD_NAME, backend=backend, **traversal_rules
)['nodes']
DELETE_LOGGER.report('%s Node(s) marked for deletion', len(pks_set_to_delete))
if pks_set_to_delete and DELETE_LOGGER.level == logging.DEBUG:
builder = QueryBuilder(
backend=backend
).append(Node, filters={'id': {
'in': pks_set_to_delete
}}, project=('uuid', 'id', 'node_type', 'label'))
DELETE_LOGGER.debug('Node(s) to delete:')
for uuid, pk, type_string, label in builder.iterall():
try:
short_type_string = type_string.split('.')[-2]
except IndexError:
short_type_string = type_string
DELETE_LOGGER.debug(f' {uuid} {pk} {short_type_string} {label}')
if dry_run is True:
DELETE_LOGGER.report('This was a dry run, exiting without deleting anything')
return (pks_set_to_delete, False)
# confirm deletion
if callable(dry_run) and dry_run(pks_set_to_delete):
DELETE_LOGGER.report('This was a dry run, exiting without deleting anything')
return (pks_set_to_delete, False)
if not pks_set_to_delete:
return (pks_set_to_delete, True)
DELETE_LOGGER.report('Starting node deletion...')
with backend.transaction():
backend.delete_nodes_and_connections(pks_set_to_delete)
DELETE_LOGGER.report('Deletion of nodes completed.')
return (pks_set_to_delete, True)
def delete_group_nodes(
pks: Iterable[int],
dry_run: Union[bool, Callable[[Set[int]], bool]] = True,
backend=None,
**traversal_rules: bool
) -> Tuple[Set[int], bool]:
"""Delete nodes contained in a list of groups (not the groups themselves!).
This command will delete not only the nodes, but also the ones that are
linked to these and should be also deleted in order to keep a consistent provenance
according to the rules explained in the concepts section of the documentation.
In summary:
1. If a DATA node is deleted, any process nodes linked to it will also be deleted.
2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as
well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes
(outputs) will be deleted by default but this can be disabled.
3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as
well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by
default, but deletion of either of both kind of connected nodes can be enabled.
These rules are 'recursive', so if a CALC node is deleted, then its output DATA
nodes will be deleted as well, and then any CALC node that may have those as
inputs, and so on.
:param pks: a list of the groups
:param dry_run:
If True, return the pks to delete without deleting anything.
If False, delete the pks without confirmation
If callable, a function that return True/False, based on the pks, e.g. ``dry_run=lambda pks: True``
:param traversal_rules: graph traversal rules. See :const:`aiida.common.links.GraphTraversalRules` what rule names
are toggleable and what the defaults are.
:returns: (node pks to delete, whether they were deleted)
"""
group_node_query = QueryBuilder(backend=backend).append(
Group,
filters={
'id': {
'in': list(pks)
}
},
tag='groups',
).append(Node, project='id', with_group='groups')
group_node_query.distinct()
node_pks = group_node_query.all(flat=True)
return delete_nodes(node_pks, dry_run=dry_run, backend=backend, **traversal_rules) |
5,245 | run slow | import pytest
import time
import numpy as np
import pandas as pd
from deephyper.evaluator import RunningJob
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
from deephyper.stopper import SuccessiveHalvingStopper
def run(job: RunningJob) -> dict:
assert isinstance(job.stopper, SuccessiveHalvingStopper)
max_budget = 50
objective_i = 0
for budget_i in range(1, max_budget + 1):
objective_i += job["x"]
job.record(budget_i, objective_i)
if job.stopped():
break
return {
"objective": job.objective,
"metadata": {"budget": budget_i, "stopped": budget_i < max_budget},
}
@pytest.mark.fast
@pytest.mark.hps
def test_successive_halving_stopper(tmp_path):
# define the variable you want to optimize
problem = HpProblem()
problem.add_hyperparameter((0.0, 10.0), "x")
stopper = SuccessiveHalvingStopper(max_steps=50, reduction_factor=3)
search = CBO(
problem,
run,
surrogate_model="DUMMY",
stopper=stopper,
random_state=42,
log_dir=tmp_path,
)
results = search.search(max_evals=30)
assert "m:budget" in results.columns
assert "m:stopped" in results.columns
assert "p:x" in results.columns
assert "objective" in results.columns
budgets = np.sort(np.unique(results["m:budget"].to_numpy())).tolist()
assert budgets == [1, 3, 9, 50]
def METHOD_NAME(job: RunningJob) -> dict:
assert isinstance(job.stopper, SuccessiveHalvingStopper)
max_budget = 50
objective_i = 0
for budget_i in range(1, max_budget + 1):
objective_i += job["x"]
time.sleep(0.001)
job.record(budget_i, objective_i)
if job.stopped():
break
# print(f"job {job.id} stopped at budget {budget_i} with objective {objective_i}")
return {
"objective": job.objective,
"metadata": {"budget": budget_i, "stopped": budget_i < max_budget},
}
@pytest.mark.slow
@pytest.mark.hps
@pytest.mark.ray
def test_successive_halving_stopper_with_ray(tmp_path):
import os
import ray
from deephyper.evaluator import Evaluator
if ray.is_initialized():
ray.shutdown()
evaluator = Evaluator.create(
METHOD_NAME,
method="ray",
method_kwargs={
"num_cpus": 4,
"num_cpus_per_task": 1,
"ray_kwargs": {
"runtime_env": {
"working_dir": os.path.dirname(os.path.abspath(__file__))
}
},
},
)
assert evaluator.num_workers == 4
# define the variable you want to optimize
problem = HpProblem()
problem.add_hyperparameter((0.0, 10.0), "x")
stopper = SuccessiveHalvingStopper(max_steps=50, reduction_factor=3)
search = CBO(
problem,
evaluator,
surrogate_model="RF",
stopper=stopper,
random_state=42,
log_dir=tmp_path,
)
results = search.search(timeout=5)
print(results)
assert "m:budget" in results.columns
assert "m:stopped" in results.columns
assert "p:x" in results.columns
assert "objective" in results.columns
budgets = np.sort(np.unique(results["m:budget"].to_numpy())).tolist()
assert any(b in budgets for b in [1, 3, 9, 27, 50])
def run_with_failures(job: RunningJob) -> dict:
assert isinstance(job.stopper, SuccessiveHalvingStopper)
max_budget = 50
objective_i = 0
for budget_i in range(1, max_budget + 1):
objective_i += job["x"]
if objective_i >= 450:
objective_i = "F"
job.record(budget_i, objective_i)
if job.stopped():
break
return {
"objective": job.objective,
"metadata": {"budget": budget_i, "stopped": budget_i < max_budget},
}
@pytest.mark.fast
@pytest.mark.hps
def test_successive_halving_stopper_with_failing_evaluations(tmp_path):
# define the variable you want to optimize
problem = HpProblem()
problem.add_hyperparameter((0.0, 10.0), "x")
stopper = SuccessiveHalvingStopper(max_steps=50, reduction_factor=3)
search = CBO(
problem,
run_with_failures,
surrogate_model="RF",
stopper=stopper,
random_state=42,
filter_failures="mean",
log_dir=tmp_path,
)
results = search.search(max_evals=50)
assert "m:budget" in results.columns
assert "m:stopped" in results.columns
assert "p:x" in results.columns
assert "objective" in results.columns
assert pd.api.types.is_string_dtype(results.objective)
results = results[~results.objective.str.startswith("F")]
results.objective = results.objective.astype(float)
# The constraint inside the run-function should make the job fail if > 450
assert results.objective.max() < 450
# Test the optimization worked
assert results.objective.max() > 449
if __name__ == "__main__":
# test_successive_halving_stopper(tmp_path=".")
test_successive_halving_stopper_with_ray(tmp_path=".")
# test_successive_halving_stopper_with_failing_evaluations(tmp_path=".") |
5,246 | kafka consumer | from __future__ import absolute_import
import uuid
import pytest
from test.testutil import env_kafka_version, random_string
from test.fixtures import KafkaFixture, ZookeeperFixture
@pytest.fixture(scope="module")
def zookeeper():
"""Return a Zookeeper fixture"""
zk_instance = ZookeeperFixture.instance()
yield zk_instance
zk_instance.close()
@pytest.fixture(scope="module")
def kafka_broker(kafka_broker_factory):
"""Return a Kafka broker fixture"""
return kafka_broker_factory()[0]
@pytest.fixture(scope="module")
def kafka_broker_factory(zookeeper):
"""Return a Kafka broker fixture factory"""
assert env_kafka_version(), 'KAFKA_VERSION must be specified to run integration tests'
_brokers = []
def factory(**broker_params):
params = {} if broker_params is None else broker_params.copy()
params.setdefault('partitions', 4)
num_brokers = params.pop('num_brokers', 1)
brokers = tuple(KafkaFixture.instance(x, zookeeper, **params)
for x in range(num_brokers))
_brokers.extend(brokers)
return brokers
yield factory
for broker in _brokers:
broker.close()
@pytest.fixture
def kafka_client(kafka_broker, request):
"""Return a KafkaClient fixture"""
(client,) = kafka_broker.get_clients(cnt=1, client_id='%s_client' % (request.node.name,))
yield client
client.close()
@pytest.fixture
def METHOD_NAME(kafka_consumer_factory):
"""Return a KafkaConsumer fixture"""
return kafka_consumer_factory()
@pytest.fixture
def kafka_consumer_factory(kafka_broker, topic, request):
"""Return a KafkaConsumer factory fixture"""
_consumer = [None]
def factory(**kafka_consumer_params):
params = {} if kafka_consumer_params is None else kafka_consumer_params.copy()
params.setdefault('client_id', 'consumer_%s' % (request.node.name,))
params.setdefault('auto_offset_reset', 'earliest')
_consumer[0] = next(kafka_broker.get_consumers(cnt=1, topics=[topic], **params))
return _consumer[0]
yield factory
if _consumer[0]:
_consumer[0].close()
@pytest.fixture
def kafka_producer(kafka_producer_factory):
"""Return a KafkaProducer fixture"""
yield kafka_producer_factory()
@pytest.fixture
def kafka_producer_factory(kafka_broker, request):
"""Return a KafkaProduce factory fixture"""
_producer = [None]
def factory(**kafka_producer_params):
params = {} if kafka_producer_params is None else kafka_producer_params.copy()
params.setdefault('client_id', 'producer_%s' % (request.node.name,))
_producer[0] = next(kafka_broker.get_producers(cnt=1, **params))
return _producer[0]
yield factory
if _producer[0]:
_producer[0].close()
@pytest.fixture
def kafka_admin_client(kafka_admin_client_factory):
"""Return a KafkaAdminClient fixture"""
yield kafka_admin_client_factory()
@pytest.fixture
def kafka_admin_client_factory(kafka_broker):
"""Return a KafkaAdminClient factory fixture"""
_admin_client = [None]
def factory(**kafka_admin_client_params):
params = {} if kafka_admin_client_params is None else kafka_admin_client_params.copy()
_admin_client[0] = next(kafka_broker.get_admin_clients(cnt=1, **params))
return _admin_client[0]
yield factory
if _admin_client[0]:
_admin_client[0].close()
@pytest.fixture
def topic(kafka_broker, request):
"""Return a topic fixture"""
topic_name = '%s_%s' % (request.node.name, random_string(10))
kafka_broker.create_topics([topic_name])
return topic_name
@pytest.fixture
def conn(mocker):
"""Return a connection mocker fixture"""
from kafka.conn import ConnectionStates
from kafka.future import Future
from kafka.protocol.metadata import MetadataResponse
conn = mocker.patch('kafka.client_async.BrokerConnection')
conn.return_value = conn
conn.state = ConnectionStates.CONNECTED
conn.send.return_value = Future().success(
MetadataResponse[0](
[(0, 'foo', 12), (1, 'bar', 34)], # brokers
[])) # topics
conn.blacked_out.return_value = False
def _set_conn_state(state):
conn.state = state
return state
conn._set_conn_state = _set_conn_state
conn.connect.side_effect = lambda: conn.state
conn.connect_blocking.return_value = True
conn.connecting = lambda: conn.state in (ConnectionStates.CONNECTING,
ConnectionStates.HANDSHAKE)
conn.connected = lambda: conn.state is ConnectionStates.CONNECTED
conn.disconnected = lambda: conn.state is ConnectionStates.DISCONNECTED
return conn
@pytest.fixture()
def send_messages(topic, kafka_producer, request):
"""A factory that returns a send_messages function with a pre-populated
topic topic / producer."""
def _send_messages(number_range, partition=0, topic=topic, producer=kafka_producer, request=request):
"""
messages is typically `range(0,100)`
partition is an int
"""
messages_and_futures = [] # [(message, produce_future),]
for i in number_range:
# request.node.name provides the test name (including parametrized values)
encoded_msg = '{}-{}-{}'.format(i, request.node.name, uuid.uuid4()).encode('utf-8')
future = kafka_producer.send(topic, value=encoded_msg, partition=partition)
messages_and_futures.append((encoded_msg, future))
kafka_producer.flush()
for (msg, f) in messages_and_futures:
assert f.succeeded()
return [msg for (msg, f) in messages_and_futures]
return _send_messages |
5,247 | test merge sub dicts | # -*- coding: utf-8 -*-
# Copyright: (c) 2017, Will Thames <will.thames@xvt.com.au>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible.module_utils.common.dict_transformations import (
_camel_to_snake,
_snake_to_camel,
camel_dict_to_snake_dict,
dict_merge,
recursive_diff,
)
EXPECTED_SNAKIFICATION = {
'alllower': 'alllower',
'TwoWords': 'two_words',
'AllUpperAtEND': 'all_upper_at_end',
'AllUpperButPLURALs': 'all_upper_but_plurals',
'TargetGroupARNs': 'target_group_arns',
'HTTPEndpoints': 'http_endpoints',
'PLURALs': 'plurals'
}
EXPECTED_REVERSIBLE = {
'TwoWords': 'two_words',
'AllUpperAtEND': 'all_upper_at_e_n_d',
'AllUpperButPLURALs': 'all_upper_but_p_l_u_r_a_ls',
'TargetGroupARNs': 'target_group_a_r_ns',
'HTTPEndpoints': 'h_t_t_p_endpoints',
'PLURALs': 'p_l_u_r_a_ls'
}
class TestCaseCamelToSnake:
def test_camel_to_snake(self):
for (k, v) in EXPECTED_SNAKIFICATION.items():
assert _camel_to_snake(k) == v
def test_reversible_camel_to_snake(self):
for (k, v) in EXPECTED_REVERSIBLE.items():
assert _camel_to_snake(k, reversible=True) == v
class TestCaseSnakeToCamel:
def test_snake_to_camel_reversed(self):
for (k, v) in EXPECTED_REVERSIBLE.items():
assert _snake_to_camel(v, capitalize_first=True) == k
class TestCaseCamelToSnakeAndBack:
def test_camel_to_snake_and_back(self):
for (k, v) in EXPECTED_REVERSIBLE.items():
assert _snake_to_camel(_camel_to_snake(k, reversible=True), capitalize_first=True) == k
class TestCaseCamelDictToSnakeDict:
def test_ignore_list(self):
camel_dict = dict(Hello=dict(One='one', Two='two'), World=dict(Three='three', Four='four'))
snake_dict = camel_dict_to_snake_dict(camel_dict, ignore_list='World')
assert snake_dict['hello'] == dict(one='one', two='two')
assert snake_dict['world'] == dict(Three='three', Four='four')
class TestCaseDictMerge:
def test_dict_merge(self):
base = dict(obj2=dict(), b1=True, b2=False, b3=False,
one=1, two=2, three=3, obj1=dict(key1=1, key2=2),
l1=[1, 3], l2=[1, 2, 3], l4=[4],
nested=dict(n1=dict(n2=2)))
other = dict(b1=True, b2=False, b3=True, b4=True,
one=1, three=4, four=4, obj1=dict(key1=2),
l1=[2, 1], l2=[3, 2, 1], l3=[1],
nested=dict(n1=dict(n2=2, n3=3)))
result = dict_merge(base, other)
# string assertions
assert 'one' in result
assert 'two' in result
assert result['three'] == 4
assert result['four'] == 4
# dict assertions
assert 'obj1' in result
assert 'key1' in result['obj1']
assert 'key2' in result['obj1']
# list assertions
# this line differs from the network_utils/common test of the function of the
# same name as this method does not merge lists
assert result['l1'], [2, 1]
assert 'l2' in result
assert result['l3'], [1]
assert 'l4' in result
# nested assertions
assert 'obj1' in result
assert result['obj1']['key1'], 2
assert 'key2' in result['obj1']
# bool assertions
assert 'b1' in result
assert 'b2' in result
assert result['b3']
assert result['b4']
class TestCaseAzureIncidental:
def test_dict_merge_invalid_dict(self):
''' if b is not a dict, return b '''
res = dict_merge({}, None)
assert res is None
def METHOD_NAME(self):
'''merge sub dicts '''
a = {'a': {'a1': 1}}
b = {'a': {'b1': 2}}
c = {'a': {'a1': 1, 'b1': 2}}
res = dict_merge(a, b)
assert res == c
class TestCaseRecursiveDiff:
def test_recursive_diff(self):
a = {'foo': {'bar': [{'baz': {'qux': 'ham_sandwich'}}]}}
c = {'foo': {'bar': [{'baz': {'qux': 'ham_sandwich'}}]}}
b = {'foo': {'bar': [{'baz': {'qux': 'turkey_sandwich'}}]}}
assert recursive_diff(a, b) is not None
assert len(recursive_diff(a, b)) == 2
assert recursive_diff(a, c) is None
@pytest.mark.parametrize(
'p1, p2', (
([1, 2], [2, 3]),
({1: 2}, [2, 3]),
([1, 2], {2: 3}),
({2: 3}, 'notadict'),
('notadict', {2: 3}),
)
)
def test_recursive_diff_negative(self, p1, p2):
with pytest.raises(TypeError, match="Unable to diff"):
recursive_diff(p1, p2) |
5,248 | set max evals | """The callback module contains sub-classes of the ``Callback`` class used to trigger custom actions on the start and completion of jobs by the ``Evaluator``. Callbacks can be used with any Evaluator implementation.
"""
import deephyper.core.exceptions
import numpy as np
import pandas as pd
from deephyper.evaluator._evaluator import _test_ipython_interpretor
if _test_ipython_interpretor():
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
class Callback:
def on_launch(self, job):
"""Called each time a ``Job`` is created by the ``Evaluator``.
Args:
job (Job): The created job.
"""
def on_done(self, job):
"""Called each time a Job is completed by the Evaluator.
Args:
job (Job): The completed job.
"""
def on_done_other(self, job):
"""Called each time a Job is collected from an other process.
Args:
job (Job): The completed Job.
"""
class ProfilingCallback(Callback):
"""Collect profiling data. Each time a ``Job`` is completed by the ``Evaluator`` a the different timestamps corresponding to the submit and gather (and run function start and end if the ``profile`` decorator is used on the run function) are collected.
An example usage can be:
>>> profiler = ProfilingCallback()
>>> evaluator.create(method="ray", method_kwargs={..., "callbacks": [profiler]})
...
>>> profiler.profile
"""
def __init__(self):
self.history = []
def on_launch(self, job):
...
def on_done(self, job):
start = job.timestamp_submit
end = job.timestamp_gather
if job.timestamp_start is not None and job.timestamp_end is not None:
start = job.timestamp_start
end = job.timestamp_end
self.history.append((start, 1))
self.history.append((end, -1))
@property
def profile(self):
n_jobs = 0
profile = []
for t, incr in sorted(self.history):
n_jobs += incr
profile.append([t, n_jobs])
cols = ["timestamp", "n_jobs_running"]
df = pd.DataFrame(profile, columns=cols)
return df
class LoggerCallback(Callback):
"""Print information when jobs are completed by the ``Evaluator``.
An example usage can be:
>>> evaluator.create(method="ray", method_kwargs={..., "callbacks": [LoggerCallback()]})
"""
def __init__(self):
self._best_objective = None
self._n_done = 0
def on_done_other(self, job):
self.on_done(job)
def on_done(self, job):
self._n_done += 1
# Test if multi objectives are received
if np.ndim(job.objective) > 0:
if np.isreal(job.objective).all():
if self._best_objective is None:
self._best_objective = np.sum(job.objective)
else:
self._best_objective = max(
np.sum(job.objective), self._best_objective
)
print(
f"[{self._n_done:05d}] -- best sum(objective): {self._best_objective:.5f} -- received sum(objective): {np.sum(job.objective):.5f}"
)
elif np.any(type(res) is str and "F" == res[0] for res in job.objective):
print(f"[{self._n_done:05d}] -- received failure: {job.objective}")
elif np.isreal(job.objective):
if self._best_objective is None:
self._best_objective = job.objective
else:
self._best_objective = max(job.objective, self._best_objective)
print(
f"[{self._n_done:05d}] -- best objective: {self._best_objective:.5f} -- received objective: {job.objective:.5f}"
)
elif type(job.objective) is str and "F" == job.objective[0]:
print(f"[{self._n_done:05d}] -- received failure: {job.objective}")
class TqdmCallback(Callback):
"""Print information when jobs are completed by the ``Evaluator``.
An example usage can be:
>>> evaluator.create(method="ray", method_kwargs={..., "callbacks": [TqdmCallback()]})
"""
def __init__(self):
self._best_objective = None
self._n_done = 0
self._n_failures = 0
self._max_evals = None
self._tqdm = None
def METHOD_NAME(self, max_evals):
self._max_evals = max_evals
self._tqdm = None
def on_done_other(self, job):
self.on_done(job)
def on_done(self, job):
if self._tqdm is None:
if self._max_evals:
self._tqdm = tqdm(total=self._max_evals)
else:
self._tqdm = tqdm()
self._n_done += 1
self._tqdm.update(1)
# Test if multi objectives are received
if np.ndim(job.objective) > 0:
if not (any(not (np.isreal(objective_i)) for objective_i in job.objective)):
if self._best_objective is None:
self._best_objective = np.sum(job.objective)
else:
self._best_objective = max(
np.sum(job.objective), self._best_objective
)
else:
self._n_failures += 1
self._tqdm.set_postfix(
{"failures": self._n_failures, "sum(objective)": self._best_objective}
)
else:
if np.isreal(job.objective):
if self._best_objective is None:
self._best_objective = job.objective
else:
self._best_objective = max(job.objective, self._best_objective)
else:
self._n_failures += 1
self._tqdm.set_postfix(
objective=self._best_objective, failures=self._n_failures
)
class SearchEarlyStopping(Callback):
"""Stop the search gracefully when it does not improve for a given number of evaluations.
Args:
patience (int, optional): The number of not improving evaluations to wait for before stopping the search. Defaults to 10.
objective_func (callable, optional): A function that takes a ``Job`` has input and returns the maximized scalar value monitored by this callback. Defaults to ``lambda j: j.result``.
"""
def __init__(self, patience: int = 10, objective_func=lambda j: j.result):
self._best_objective = None
self._n_lower = 0
self._patience = patience
self._objective_func = objective_func
def on_done_other(self, job):
self.on_done(job)
def on_done(self, job):
job_objective = self._objective_func(job)
# if multi objectives are received
if np.ndim(job_objective) > 0:
job_objective = np.sum(job_objective)
if self._best_objective is None:
self._best_objective = job_objective
else:
if job_objective > self._best_objective:
print(
f"Objective has improved from {self._best_objective:.5f} -> {job_objective:.5f}"
)
self._best_objective = job_objective
self._n_lower = 0
else:
self._n_lower += 1
if self._n_lower >= self._patience:
print(
f"Stopping the search because it did not improve for the last {self._patience} evaluations!"
)
raise deephyper.core.exceptions.SearchTerminationError |
5,249 | test get enabled initial | import unittest
from django.test import RequestFactory, TestCase, override_settings
from django.urls import reverse
from wagtail.admin.views.home import WhatsNewInWagtailVersionPanel
from wagtail.test.utils import WagtailTestUtils
from wagtail.users.models import UserProfile
class TestWhatsNewInWagtailVersionPanel(WagtailTestUtils, TestCase):
@classmethod
def setUpTestData(cls):
cls.panel = WhatsNewInWagtailVersionPanel()
cls.dismissible_id = cls.panel.get_dismissible_id()
cls.request_factory = RequestFactory()
cls.user = cls.create_user(username="tester")
cls.profile = UserProfile.get_for_user(cls.user)
def get_parent_context(self):
request = self.request_factory.get("/")
request.user = self.user
return {"request": request}
def test_get_whats_new_banner_setting_default(self):
self.assertTrue(self.panel.get_whats_new_banner_setting())
@override_settings(WAGTAIL_ENABLE_WHATS_NEW_BANNER=False)
def test_get_whats_new_banner_setting_false(self):
self.assertFalse(self.panel.get_whats_new_banner_setting())
def test_render_html_user_initial(self):
result = self.panel.render_html(self.get_parent_context())
expected_data_attrs = [
'data-controller="w-dismissible"',
'data-w-dismissible-dismissed-class="w-dismissible--dismissed"',
f'data-w-dismissible-id-value="{self.dismissible_id}"',
]
for data_attr in expected_data_attrs:
self.assertIn(data_attr, result)
self.assertIn("Things in Wagtail 4 have changed!", result)
@override_settings(WAGTAIL_ENABLE_WHATS_NEW_BANNER=False)
def test_render_html_setting_false(self):
result = self.panel.render_html(self.get_parent_context())
self.assertEqual(result, "")
def test_render_html_user_no_profile(self):
self.profile.delete()
self.user.refresh_from_db()
result = self.panel.render_html(self.get_parent_context())
expected_data_attrs = [
'data-controller="w-dismissible"',
'data-w-dismissible-dismissed-class="w-dismissible--dismissed"',
f'data-w-dismissible-id-value="{self.dismissible_id}"',
]
for data_attr in expected_data_attrs:
self.assertIn(data_attr, result)
self.assertIn("Things in Wagtail 4 have changed!", result)
def test_render_html_user_dismissed(self):
self.profile.dismissibles[self.dismissible_id] = True
self.profile.save(update_fields=["dismissibles"])
result = self.panel.render_html(self.get_parent_context())
self.assertEqual(result, "")
@unittest.skip("Wagtail 4 banner has been removed.")
class TestWhatsNewOnDashboard(WagtailTestUtils, TestCase):
"""Test 'What's New In Wagtail' banner rendered by `wagtailadmin_home` view"""
def setUp(self):
self.user = self.login()
self.profile = UserProfile.get_for_user(self.user)
self.dismissible_id = WhatsNewInWagtailVersionPanel().get_dismissible_id()
def get(self):
return self.client.get(reverse("wagtailadmin_home"))
def METHOD_NAME(self):
response = self.get()
html_content = response.content.decode("utf-8")
expected_data_attrs = [
'data-controller="w-dismissible"',
'data-w-dismissible-dismissed-class="w-dismissible--dismissed"',
f'data-w-dismissible-id-value="{self.dismissible_id}"',
]
for data_attr in expected_data_attrs:
self.assertIn(data_attr, html_content)
self.assertContains(response, "Things in Wagtail 4 have changed!")
@override_settings(WAGTAIL_ENABLE_WHATS_NEW_BANNER=False)
def test_get_disabled_initial(self):
response = self.get()
html_content = response.content.decode("utf-8")
expected_data_attrs = [
'data-controller="w-dismissible"',
'data-w-dismissible-dismissed-class="w-dismissible--dismissed"',
f'data-w-dismissible-id-value="{self.dismissible_id}"',
]
for data_attr in expected_data_attrs:
self.assertNotIn(data_attr, html_content)
self.assertNotContains(response, "Things in Wagtail 4 have changed!")
def test_render_html_user_no_profile(self):
self.profile.delete()
self.user.refresh_from_db()
response = self.get()
html_content = response.content.decode("utf-8")
expected_data_attrs = [
'data-controller="w-dismissible"',
'data-w-dismissible-dismissed-class="w-dismissible--dismissed"',
f'data-w-dismissible-id-value="{self.dismissible_id}"',
]
for data_attr in expected_data_attrs:
self.assertIn(data_attr, html_content)
self.assertContains(response, "Things in Wagtail 4 have changed!")
def test_get_enabled_dismissed(self):
self.profile.dismissibles[self.dismissible_id] = True
self.profile.save(update_fields=["dismissibles"])
response = self.get()
html_content = response.content.decode("utf-8")
expected_data_attrs = [
'data-controller="w-dismissible"',
'data-w-dismissible-dismissed-class="w-dismissible--dismissed"',
f'data-w-dismissible-id-value="{self.dismissible_id}"',
]
for data_attr in expected_data_attrs:
self.assertNotIn(data_attr, html_content)
self.assertNotContains(response, "Things in Wagtail 4 have changed!") |
5,250 | sign | import pytest
from eth_account import Account
from eth_account.messages import encode_defunct
from eth_keys import KeyAPI
from eth_utils import is_same_address
@pytest.fixture
def c(w3, get_contract):
a0, a1, a2, a3, a4, a5, a6 = w3.eth.accounts[:7]
with open("examples/wallet/wallet.vy") as f:
code = f.read()
# Sends wei to the contract for future transactions gas costs
c = get_contract(code, *[[a1, a2, a3, a4, a5], 3])
w3.eth.send_transaction({"to": c.address, "value": 10**17})
return c
@pytest.fixture
def sign(keccak):
def METHOD_NAME(seq, to, value, data, key):
keys = KeyAPI()
comb = seq.to_bytes(32, "big") + b"\x00" * 12 + to + value.to_bytes(32, "big") + data
h1 = keccak(comb)
h2 = keccak(b"\x19Ethereum Signed Message:\n32" + h1)
sig = keys.ecdsa_sign(h2, key)
return [28 if sig.v == 1 else 27, sig.r, sig.s]
return METHOD_NAME
def test_approve(w3, c, tester, assert_tx_failed, sign):
a0, a1, a2, a3, a4, a5, a6 = w3.eth.accounts[:7]
k0, k1, k2, k3, k4, k5, k6, k7 = tester.backend.account_keys[:8]
to, value, data = b"\x35" * 20, 10**16, b""
to_address = w3.to_checksum_address(to)
def pack_and_sign(seq, *args):
sigs = [sign(seq, to, value, data, k) if k else [0, 0, 0] for k in args]
return sigs
# Legitimate approval
sigs = pack_and_sign(0, k1, 0, k3, 0, k5)
c.approve(0, "0x" + to.hex(), value, data, sigs, transact={"value": value, "from": a1})
# Approve fails if only 2 signatures are given
sigs = pack_and_sign(1, k1, 0, k3, 0, 0)
assert_tx_failed(
lambda: c.approve(1, to_address, value, data, sigs, transact={"value": value, "from": a1})
) # noqa: E501
# Approve fails if an invalid signature is given
sigs = pack_and_sign(1, k1, 0, k7, 0, k5)
assert_tx_failed(
lambda: c.approve(1, to_address, value, data, sigs, transact={"value": value, "from": a1})
) # noqa: E501
# Approve fails if transaction number is incorrect (the first argument should be 1)
sigs = pack_and_sign(0, k1, 0, k3, 0, k5)
assert_tx_failed(
lambda: c.approve(0, to_address, value, data, sigs, transact={"value": value, "from": a1})
) # noqa: E501
# Approve fails if not enough value is sent
sigs = pack_and_sign(1, k1, 0, k3, 0, k5)
assert_tx_failed(
lambda: c.approve(1, to_address, value, data, sigs, transact={"value": 0, "from": a1})
) # noqa: E501
sigs = pack_and_sign(1, k1, 0, k3, 0, k5)
# this call should succeed
c.approve(1, to_address, value, data, sigs, call={"value": value, "from": a1})
print("Basic tests passed")
def test_javascript_signatures(w3, get_contract):
a3 = w3.eth.accounts[2]
# The zero address will cause `approve` to default to valid signatures
zero_address = "0x0000000000000000000000000000000000000000"
accounts = [
"0x776ba14735ff84789320718cf0aa43e91f7a8ce1",
"0x095ce4e4240fa66ff90282c26847456e3f3b5002",
]
# The address that will receive the transaction
recipient = "0x776Ba14735FF84789320718cf0aa43e91F7A8Ce1"
# These are the matching sigs to the accounts
raw_sigs = [
"0x4a89507bf71749fb338ed13fba623a683d9ecab0fb9c389a4298525c043e38281a00ab65628bb18a382eb8c8b4fb4dae95ccc993cf49f617c60d8051180778601c", # noqa: E501
"0xc84fe5d2a600e033930e0cf73f26e78f4c65b134f9c9992f60f08ce0863abdbe0548a6e8aa2d952659f29c67106b59fdfcd64d67df03c1df620c70c85578ae701b", # noqa: E501
]
# Turns the raw sigs into sigs
sigs = [
(w3.to_int(x[64:]), w3.to_int(x[:32]), w3.to_int(x[32:64])) # v # r # s
for x in map(lambda z: w3.to_bytes(hexstr=z[2:]), raw_sigs)
]
h = w3.keccak(
(0).to_bytes(32, "big")
+ b"\x00" * 12
+ w3.to_bytes(hexstr=recipient[2:])
+ (25).to_bytes(32, "big")
+ b""
) # noqa: E501
h2 = encode_defunct(h)
# Check to make sure the signatures are valid
assert is_same_address(Account.recover_message(h2, sigs[0]), accounts[0])
assert is_same_address(Account.recover_message(h2, sigs[1]), accounts[1])
# Set the owners to zero addresses
with open("examples/wallet/wallet.vy") as f:
owners = [w3.to_checksum_address(x) for x in accounts + [a3, zero_address, zero_address]]
x2 = get_contract(f.read(), *[owners, 2])
w3.eth.send_transaction({"to": x2.address, "value": 10**17})
# There's no need to pass in signatures because the owners are 0 addresses
# causing them to default to valid signatures
x2.approve(
0, recipient, 25, b"", sigs + [[0, 0, 0]] * 3, call={"to": x2.address, "value": 10**17}
)
print("Javascript signature tests passed") |
5,251 | check daemon status | """
Check daemon status inside PMON container. Each daemon status is checked under the conditions below in this script:
* Daemon Running Status
* Daemon Stop status
* Daemon Restart status
This script is to cover the test case in the SONiC platform daemon and service test plan:
https://github.com/sonic-net/sonic-mgmt/blob/master/docs/testplan/PMON-Services-Daemons-test-plan.md
"""
import logging
import time
import pytest
from tests.common.helpers.assertions import pytest_assert
from tests.common.platform.daemon_utils import check_pmon_daemon_enable_status
from tests.common.platform.processes_utils import check_critical_processes
from tests.common.utilities import wait_until, skip_release
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.topology('any'),
pytest.mark.sanity_check(skip_sanity=True),
pytest.mark.disable_loganalyzer
]
expected_running_status = "RUNNING"
expected_stopped_status = "STOPPED"
expected_exited_status = "EXITED"
daemon_name = "ledd"
SIG_STOP_SERVICE = None
SIG_TERM = "-15"
SIG_KILL = "-9"
@pytest.fixture(scope="module", autouse=True)
def setup(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
daemon_en_status = check_pmon_daemon_enable_status(duthost, daemon_name)
if daemon_en_status is False:
pytest.skip("{} is not enabled in {} {}".format(daemon_name, duthost.facts["platform"], duthost.os_version))
@pytest.fixture(scope="module", autouse=True)
def teardown_module(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
yield
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
if daemon_status != "RUNNING":
duthost.start_pmon_daemon(daemon_name)
time.sleep(10)
logger.info("Tearing down: to make sure all the critical services, interfaces and transceivers are good")
check_critical_processes(duthost, watch_secs=10)
@pytest.fixture()
def METHOD_NAME(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
if daemon_status != "RUNNING":
duthost.start_pmon_daemon(daemon_name)
time.sleep(10)
def check_expected_daemon_status(duthost, expected_daemon_status):
daemon_status, _ = duthost.get_pmon_daemon_status(daemon_name)
return daemon_status == expected_daemon_status
def test_pmon_ledd_running_status(duthosts, rand_one_dut_hostname):
"""
@summary: This test case is to check ledd status on dut
"""
duthost = duthosts[rand_one_dut_hostname]
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(
daemon_name, daemon_status, daemon_pid))
pytest_assert(daemon_status == expected_running_status,
"{} expected running status is {} but is {}"
.format(daemon_name, expected_running_status, daemon_status))
pytest_assert(daemon_pid != -1,
"{} expected pid is a positive integer but is {}".format(daemon_name, daemon_pid))
def test_pmon_ledd_stop_and_start_status(METHOD_NAME, duthosts, rand_one_dut_hostname):
"""
@summary: This test case is to check the ledd stopped and restarted status
"""
duthost = duthosts[rand_one_dut_hostname]
pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid))
duthost.stop_pmon_daemon(daemon_name, SIG_STOP_SERVICE)
time.sleep(2)
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(daemon_status == expected_stopped_status,
"{} expected stopped status is {} but is {}"
.format(daemon_name, expected_stopped_status, daemon_status))
pytest_assert(daemon_pid == -1,
"{} expected pid is -1 but is {}".format(daemon_name, daemon_pid))
duthost.start_pmon_daemon(daemon_name)
time.sleep(10)
post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(post_daemon_status == expected_running_status,
"{} expected restarted status is {} but is {}"
.format(daemon_name, expected_running_status, post_daemon_status))
pytest_assert(post_daemon_pid != -1,
"{} expected pid is a positive integer but is {}"
.format(daemon_name, post_daemon_pid))
pytest_assert(post_daemon_pid > pre_daemon_pid,
"Restarted {} pid should be bigger than {} but it is {}"
.format(daemon_name, pre_daemon_pid, post_daemon_pid))
def test_pmon_ledd_term_and_start_status(METHOD_NAME, duthosts, rand_one_dut_hostname):
"""
@summary: This test case is to check the ledd terminated and restarted status
"""
duthost = duthosts[rand_one_dut_hostname]
skip_release(duthost, ["201811", "201911"])
pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid))
duthost.stop_pmon_daemon(daemon_name, SIG_TERM, pre_daemon_pid)
# Insert 2 seconds delay between termination of the LEDD process by SIG_TERM and status verification,
# to avoid misleading check result which caused by latency of termination process.
time.sleep(2)
# TODO: To arm the wait_until API with a delay parameter, by which to delay specified time
# before invoking the check function.
wait_until(120, 10, 0, check_expected_daemon_status,
duthost, expected_running_status)
post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(post_daemon_status == expected_running_status,
"{} expected restarted status is {} but is {}"
.format(daemon_name, expected_running_status, post_daemon_status))
pytest_assert(post_daemon_pid != -1,
"{} expected pid is a positive integer but is {}".format(daemon_name, post_daemon_pid))
pytest_assert(post_daemon_pid > pre_daemon_pid,
"Restarted {} pid should be bigger than {} but it is {}"
.format(daemon_name, pre_daemon_pid, post_daemon_pid))
def test_pmon_ledd_kill_and_start_status(METHOD_NAME, duthosts, rand_one_dut_hostname):
"""
@summary: This test case is to check the ledd killed unexpectedly (automatically restarted) status
"""
duthost = duthosts[rand_one_dut_hostname]
pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid))
duthost.stop_pmon_daemon(daemon_name, SIG_KILL, pre_daemon_pid)
daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(daemon_status != expected_running_status,
"{} unexpected killed status is not {}".format(daemon_name, daemon_status))
time.sleep(10)
post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name)
pytest_assert(post_daemon_status == expected_running_status,
"{} expected restarted status is {} but is {}"
.format(daemon_name, expected_running_status, post_daemon_status))
pytest_assert(post_daemon_pid != -1,
"{} expected pid is a positive integer but is {}".format(daemon_name, post_daemon_pid))
pytest_assert(post_daemon_pid > pre_daemon_pid,
"Restarted {} pid should be bigger than {} but it is {}"
.format(daemon_name, pre_daemon_pid, post_daemon_pid)) |
5,252 | configure | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
# Authors: Amador Pahim <amador@pahim.org>
# Bestoun S. Ahmed <bestoon82@gmail.com>
# Cleber Rosa <crosa@redhat.com>
import logging
import os
import sys
from avocado_varianter_cit.Cit import LOG, Cit
from avocado_varianter_cit.Parser import Parser
from avocado.core import exit_codes, varianter
from avocado.core.output import LOG_UI
from avocado.core.plugin_interfaces import CLI, Varianter
from avocado.core.settings import settings
from avocado.core.tree import TreeNode
#: The default order of combinations
DEFAULT_ORDER_OF_COMBINATIONS = 2
class VarianterCitCLI(CLI):
"""
CIT Varianter options
"""
name = "cit"
description = "CIT Varianter options for the 'run' subcommand"
def METHOD_NAME(self, parser):
for name in ("run", "variants"):
subparser = parser.subcommands.choices.get(name, None)
if subparser is None:
continue
subparser.add_argument_group("CIT varianter options")
settings.register_option(
section=f"{name}.cit",
key="parameter_file",
metavar="PATH",
help_msg="Paths to a parameter file",
parser=subparser,
default=None,
long_arg="--cit-parameter-file",
)
help_msg = "Order of combinations. Maximum number is 6"
settings.register_option(
section=f"{name}.cit",
key="combination_order",
key_type=int,
parser=subparser,
help_msg=help_msg,
metavar="ORDER",
default=DEFAULT_ORDER_OF_COMBINATIONS,
long_arg="--cit-order-of-combinations",
)
def run(self, config):
if config.get("variants.debug"):
LOG.setLevel(logging.DEBUG)
class VarianterCit(Varianter):
"""
Processes the parameters file into variants
"""
name = "cit"
description = "CIT Varianter"
def initialize(self, config):
subcommand = config.get("subcommand")
self.variants = None # pylint: disable=W0201
order = config.get(f"{subcommand}.cit.combination_order")
if order and order > 6:
LOG_UI.error("The order of combinations is bigger then 6")
self.error_exit(config)
section_key = f"{subcommand}.cit.parameter_file"
cit_parameter_file = config.get(section_key)
if cit_parameter_file is None:
return
else:
cit_parameter_file = os.path.expanduser(cit_parameter_file)
if not os.access(cit_parameter_file, os.R_OK):
LOG_UI.error(
"parameter file '%s' could not be found or is not readable",
cit_parameter_file,
)
self.error_exit(config)
try:
parameters, constraints = Parser.parse(
open(cit_parameter_file, encoding="utf-8")
)
except ValueError as details:
LOG_UI.error("Cannot parse parameter file: %s", details)
self.error_exit(config)
input_data = [len(parameter[1]) for parameter in parameters]
cit = Cit(input_data, order, constraints)
final_list = cit.compute()
self.headers = [ # pylint: disable=W0201
parameter[0] for parameter in parameters
]
results = [
[parameters[j][1][final_list[i][j]] for j in range(len(final_list[i]))]
for i in range(len(final_list))
]
self.variants = [] # pylint: disable=W0201
for combination in results:
self.variants.append(dict(zip(self.headers, combination)))
@staticmethod
def error_exit(config):
if config.get("subcommand") == "run":
sys.exit(exit_codes.AVOCADO_JOB_FAIL)
else:
sys.exit(exit_codes.AVOCADO_FAIL)
def __iter__(self):
if self.variants is None:
return
variant_ids = []
for variant in self.variants:
variant_ids.append("-".join([variant.get(key) for key in self.headers]))
for vid, variant in zip(variant_ids, self.variants):
yield {
"variant_id": vid,
"variant": [TreeNode("", variant)],
"paths": ["/"],
}
def __len__(self):
return sum(1 for _ in self.variants) if self.variants else 0
def to_str(self, summary, variants, **kwargs):
"""
Return human readable representation
The summary/variants accepts verbosity where 0 means silent and
maximum is up to the plugin.
:param summary: How verbose summary to output (int)
:param variants: How verbose list of variants to output (int)
:param kwargs: Other free-form arguments
:rtype: str
"""
if not self.variants:
return ""
out = []
if variants:
# variants == 0 means disable, but in plugin it's brief
out.append(f"CIT Variants ({len(self)}):")
for variant in self:
out.extend(
varianter.variant_to_str(variant, variants - 1, kwargs, False)
)
return "\n".join(out) |
5,253 | main | #!/usr/bin/env python3
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a human readable file with the list of readers supported by the
Connector App."""
import argparse
import string
import sys
# List of section titles that should be picked up for forming the USB devices
# list in the file.
CCID_SUPPORTED_READERS_CONFIG_SECTIONS_TO_PICK = ('supported', 'shouldwork')
def load_usb_devices(ccid_supported_readers_file):
"""Parses the specified config containing descriptions of the readers
supported by CCID.
Only readers from the sections listed in the
CCID_SUPPORTED_READERS_CONFIG_SECTIONS_TO_PICK are picked up.
Args:
ccid_supported_readers_file: The opened config file containing descriptions
of the readers supported by CCID.
Returns list of strings: (name), which are the names of the usb devices.
"""
# The CCID supported readers config file format is the following.
#
# There are several sections, each of which starts with a comment block
# containing the title of the section (e.g. "supported"), followed by a number
# of lines containing reader descriptions.
#
# The reader description line has the following format:
# "vendor_id:product_id:name", where vendor_id and product_id are hexadecimal
# integers.
#
# Empty lines should be ignored.
#
# Comment lines, which start from the hash character, should also be ignored
# with the only exception of special-formatted comments containing section
# titles.
# Prefix marker used for determining section titles.
SECTION_HEADER_PREFIX = '# section:'
# Prefix marker used for determining comment lines.
COMMENT_PREFIX = '#'
usb_devices = []
ignored_usb_devices = []
current_section = None
for line in ccid_supported_readers_file:
if not line.strip():
# Ignore empty line
continue
line = line.strip('\n')
if line.startswith(SECTION_HEADER_PREFIX):
# Parse section title line
current_section = line[len(SECTION_HEADER_PREFIX):].strip()
if not current_section:
raise RuntimeError('Failed to extract section title from the CCID '
'supported readers config')
continue
if line.startswith(COMMENT_PREFIX):
# Ignore comment line
continue
# Parse reader description line
parts = line.split(':', 2)
if len(parts) != 3:
raise RuntimeError('Failed to parse the reader description from the CCID '
'supported readers config: "{0}"'.format(line))
name = parts[2]
if (name in usb_devices) or (name in ignored_usb_devices):
continue
if current_section is None:
raise RuntimeError('Unexpected reader definition met in the CCID '
'supported readers config before any section title')
if current_section in CCID_SUPPORTED_READERS_CONFIG_SECTIONS_TO_PICK:
usb_devices.append(name)
else:
ignored_usb_devices.append(name)
if not usb_devices:
raise RuntimeError('No supported USB devices were extracted from the CCID '
'supported readers config')
sys.stderr.write('Extracted {0} supported USB devices from the CCID '
'supported readers config, and ignored {1} '
'items.\n'.format(
len(usb_devices), len(ignored_usb_devices)))
return usb_devices
def create_readers_list(ccid_supported_readers_file):
usb_devices = load_usb_devices(ccid_supported_readers_file)
return '\n'.join(sorted(usb_devices, key=str.lower))
def METHOD_NAME():
args_parser = argparse.ArgumentParser(
description="Creates a human readable file with the list of readers "
"supported by the Connector App.")
args_parser.add_argument(
'--ccid-supported-readers-config-path',
type=argparse.FileType('r'),
required=True,
metavar='"path/to/ccid_supported_readers_file"',
dest='ccid_supported_readers_file')
args_parser.add_argument(
'--target-file-path',
type=argparse.FileType('w'),
required=True,
metavar='"path/to/target_file"',
dest='target_file')
args = args_parser.parse_args()
readers_list = create_readers_list(args.ccid_supported_readers_file)
args.target_file.write(
"# NOTE: Auto-generated by create_human_readable_supported_readers.py.\n"
"# DO NOT MODIFY MANUALLY!\n\n")
args.target_file.write("Supported readers:\n\n")
args.target_file.write(readers_list)
if __name__ == '__main__':
METHOD_NAME() |
5,254 | test get weight | # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
class TestGetWeight(TransactionCase):
"""Test get_weight functions."""
# some helpers
def _create_order(self, customer):
return self.env["sale.order"].create({"partner_id": customer.id})
def _create_order_line(self, order, products):
for product in products:
self.env["sale.order.line"].create(
{"product_id": product.id, "order_id": order.id}
)
def _create_ul(self):
vals = [
{"name": "Cardboard box", "type": "box", "weight": 0.200},
{"name": "Wood box", "type": "box", "weight": 1.30},
]
return [self.env["product.ul"].create(val) for val in vals]
def _create_operation(self, picking, values):
vals = {
"picking_id": picking.id,
"location_id": picking.location_id.id,
"location_dest_id": picking.location_dest_id.id,
}
vals.update(values)
return self.env["stock.move.line"].create(vals)
def _create_product(self, vals):
return self.env["product.product"].create(vals)
def _get_products(self, weights):
"""A recordset of products without any specific uom.
It means : no uom or kg or unit
Params:
weights: recordset will be size of weights and each
product will get a size according of weights[i]
"""
kg_id = self.env.ref("uom.product_uom_kgm").id
unit_id = self.env.ref("uom.product_uom_unit").id
products = self.env["product.product"].search(
[["uom_id", "in", (False, kg_id, unit_id)]], limit=len(weights)
)
for idx, product in enumerate(products):
# by default there is no weight on products
product.weight = weights[idx]
return products
def _generate_picking(self, products):
"""Create a picking from products."""
customer = self.env["res.partner"].search([], limit=1)
order = self._create_order(customer)
self._create_order_line(order, products)
order.action_confirm()
picking = order.picking_ids
picking.button_validate()
return picking
def METHOD_NAME(self):
"""Test quant.package.weight computed field and
pack.operation.get_weight."""
# prepare some data
weights = [2, 30, 1, 24, 39]
products = self._get_products(weights)
picking = self._generate_picking(products)
package = self.env["stock.quant.package"].create({})
operations = self.env["stock.move.line"]
for product in products:
operations |= self._create_operation(
picking,
{
"product_uom_qty": 1,
"product_id": product.id,
"product_uom_id": product.uom_id.id,
"result_package_id": package.id,
},
)
# end of prepare data
# test operation.get_weight()
for operation in operations:
self.assertEqual(
operation.get_weight(),
operation.product_id.weight * operation.product_uom_qty,
)
# test package.weight
self.assertEqual(package.weight, sum([product.weight for product in products]))
def test_total_weight(self):
"""Test quant.package.weight computed field when a total
weight is defined"""
# prepare some data
weights = [2, 30, 1, 24, 39]
products = self._get_products(weights)
picking = self._generate_picking(products)
package = self.env["stock.quant.package"].create({})
operations = self.env["stock.move.line"]
for product in products:
operations |= self._create_operation(
picking,
{
"product_uom_qty": 1,
"product_id": product.id,
"product_uom_id": product.uom_id.id,
"result_package_id": package.id,
},
)
package.shipping_weight = 1542.0
# end of prepare data
# test operation.get_weight()
for operation in operations:
self.assertEqual(
operation.get_weight(),
operation.product_id.weight * operation.product_uom_qty,
)
# test package.weight
self.assertEqual(package.weight, package.shipping_weight)
def test_get_weight_with_qty(self):
"""Ensure qty are taken in account."""
# prepare some data
weights = [2, 30, 1, 24, 39]
products = self._get_products(weights)
picking = self._generate_picking(products)
package = self.env["stock.quant.package"].create({})
operations = self.env["stock.move.line"]
for idx, product in enumerate(products):
operations |= self._create_operation(
picking,
{
"product_uom_qty": idx, # nice one
"product_id": product.id,
"product_uom_id": product.uom_id.id,
"result_package_id": package.id,
},
)
# end of prepare data
# test operation.get_weight()
for operation in operations:
self.assertEqual(
operation.get_weight(),
operation.product_id.weight * operation.product_uom_qty,
)
# test package._weight
self.assertEqual(
package.weight, sum([operation.get_weight() for operation in operations])
)
def test_get_weight_with_uom(self):
"""Check with differents uom."""
# prepare some data
weights = [0.3, 14.01, 0.59]
package = self.env["stock.quant.package"].create({})
tonne_id = self.env.ref("uom.product_uom_ton")
kg_id = self.env.ref("uom.product_uom_kgm")
gr_id = self.env.ref("uom.product_uom_gram")
products = []
products.append(
self._create_product(
{
"name": "Expected Odoo dev documentation",
"uom_id": tonne_id.id,
"uom_po_id": tonne_id.id,
"weight": weights[0],
}
)
)
products.append(
self._create_product(
{
"name": "OCA documentation",
"uom_id": kg_id.id,
"uom_po_id": kg_id.id,
"weight": weights[1],
}
)
)
products.append(
self._create_product(
{
"name": "Actual Odoo dev documentation",
"uom_id": gr_id.id,
"uom_po_id": gr_id.id,
"weight": weights[2],
}
)
)
products_weight = (
weights[0] * 1000 + weights[1] * 1 + weights[2] * 0.01 # tonne # kg # g
)
picking = self._generate_picking(products)
operations = self.env["stock.move.line"]
for product in products:
operations |= self._create_operation(
picking,
{
"product_uom_qty": 1,
"product_id": product.id,
"product_uom_id": product.uom_id.id,
"result_package_id": package.id,
},
)
# end of prepare data
# because uom conversion is not implemented
self.assertEqual(package.weight, False)
# if one day, uom conversion is implemented:
# self.assertEqual(package.get_weight(), products_weight)
self.assertEqual(products_weight, products_weight) # flak8 warning |
5,255 | test sub component sc 2uu | # -*- coding: utf-8 -*-
#
# Copyright 2009-2023 NTESS. Under the terms
# of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
#
# Copyright (c) 2009-2023, NTESS
# All rights reserved.
#
# This file is part of the SST software package. For license
# information, see the LICENSE file in the top level directory of the
# distribution.
import os
import sys
from sst_unittest import *
from sst_unittest_support import *
################################################################################
# Code to support a single instance module initialize, must be called setUp method
module_init = 0
module_sema = threading.Semaphore()
def initializeTestModule_SingleInstance(class_inst):
global module_init
global module_sema
module_sema.acquire()
if module_init != 1:
# Put your single instance Init Code Here
module_init = 1
module_sema.release()
################################################################################
class testcase_SubComponent(SSTTestCase):
def initializeClass(self, testName):
super(type(self), self).initializeClass(testName)
# Put test based setup code here. it is called before testing starts
# NOTE: This method is called once for every test
def setUp(self):
super(type(self), self).setUp()
initializeTestModule_SingleInstance(self)
# Put test based setup code here. it is called once before every test
def tearDown(self):
# Put test based teardown code here. it is called once after every test
super(type(self), self).tearDown()
#####
rankerr = "Test only suports ranks <= 2"
@unittest.skipIf(testing_check_get_num_ranks() > 2, rankerr)
def test_SubComponent_sc_2a(self):
self.subcomponent_test_template("sc_2a")
@unittest.skipIf(testing_check_get_num_ranks() > 2, rankerr)
def test_SubComponent_sc_2u2u(self):
self.subcomponent_test_template("sc_2u2u")
@unittest.skipIf(testing_check_get_num_ranks() > 2, rankerr)
def test_SubComponent_sc_2u(self):
self.subcomponent_test_template("sc_2u")
@unittest.skipIf(testing_check_get_num_ranks() > 2, rankerr)
def test_SubComponent_sc_a(self):
self.subcomponent_test_template("sc_a")
@unittest.skipIf(testing_check_get_num_ranks() > 2, rankerr)
def test_SubComponent_sc_u2u(self):
self.subcomponent_test_template("sc_u2u")
@unittest.skipIf(testing_check_get_num_ranks() > 2, rankerr)
def test_SubComponent_sc_u(self):
self.subcomponent_test_template("sc_u")
@unittest.skipIf(testing_check_get_num_ranks() > 2, rankerr)
def test_SubComponent_sc_2u2a(self):
self.subcomponent_test_template("sc_2u2a")
@unittest.skipIf(testing_check_get_num_ranks() > 2, rankerr)
def test_SubComponent_sc_2ua(self):
self.subcomponent_test_template("sc_2ua")
@unittest.skipIf(testing_check_get_num_ranks() > 2, rankerr)
def METHOD_NAME(self):
self.subcomponent_test_template("sc_2uu")
@unittest.skipIf(testing_check_get_num_ranks() > 2, rankerr)
def test_SubComponent_sc_u2a(self):
self.subcomponent_test_template("sc_u2a")
@unittest.skipIf(testing_check_get_num_ranks() > 2, rankerr)
def test_SubComponent_sc_ua(self):
self.subcomponent_test_template("sc_ua")
@unittest.skipIf(testing_check_get_num_ranks() > 2, rankerr)
def test_SubComponent_sc_uu(self):
self.subcomponent_test_template("sc_uu")
#####
def subcomponent_test_template(self, testtype):
testsuitedir = self.get_testsuite_dir()
outdir = test_output_get_run_dir()
# Set the various file paths
sdlfile = "{0}/subcomponent_tests/test_{1}.py".format(testsuitedir, testtype)
reffile = "{0}/subcomponent_tests/refFiles/test_{1}.out".format(testsuitedir, testtype)
outfile = "{0}/test_SubComponent_{1}.out".format(outdir, testtype)
self.run_sst(sdlfile, outfile)
# Perform the test
filter1 = StartsWithFilter("WARNING: No components are")
cmp_result = testing_compare_filtered_diff(testtype, outfile, reffile, sort=True, filters=[filter1])
self.assertTrue(cmp_result, "Output/Compare file {0} does not match Reference File {1}".format(outfile, reffile))
|
5,256 | node has no consumers | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines the topology for ingesting sources
"""
from typing import Any, Generic, List, Optional, Type, TypeVar
from pydantic import BaseModel, Extra, create_model
T = TypeVar("T", bound=BaseModel)
class NodeStage(BaseModel, Generic[T]):
"""
Handles the processing stages of each node.
Each stage is equipped with a processing function
and a context key, which will be updated at the
source.
"""
class Config:
extra = Extra.forbid
type_: Type[T] # Entity type
processor: str # has the producer results as an argument. Here is where filters happen
context: Optional[str] = None # context key storing stage state, if needed
ack_sink: bool = True # Validate that the request is present in OM and update the context with the results
nullable: bool = False # The yielded value can be null
must_return: bool = False # The sink MUST return a value back after ack. Useful to validate services are correct.
cache_all: bool = (
False # If we need to cache all values being yielded in the context
)
clear_cache: bool = False # If we need to clean cache values in the context for each produced element
overwrite: bool = True # If we want to overwrite existing data from OM
consumer: Optional[
List[str]
] = None # keys in the source context to fetch state from the parent's context
class TopologyNode(BaseModel):
"""
Each node has a producer function, which will
yield an Entity to be passed to the Sink. Afterwards,
the producer function will update the Source context
with the updated element from the OM API.
"""
class Config:
extra = Extra.forbid
# method name in the source to use to generate the data to process
# does not accept input parameters
producer: str
# list of functions to execute - in order - for each element produced by the producer
# each stage accepts the producer results as an argument
stages: List[NodeStage]
children: Optional[List[str]] = None # nodes to call execute next
post_process: Optional[
List[str]
] = None # Method to be run after the node has been fully processed
class ServiceTopology(BaseModel):
"""
Bounds all service topologies
"""
class Config:
extra = Extra.allow
class TopologyContext(BaseModel):
"""
Bounds all topology contexts
"""
class Config:
extra = Extra.allow
def __repr__(self):
ctx = {key: value.name.__root__ for key, value in self.__dict__.items()}
return f"TopologyContext({ctx})"
def get_topology_nodes(topology: ServiceTopology) -> List[TopologyNode]:
"""
Fetch all nodes from a ServiceTopology
:param topology: ServiceTopology
:return: List of nodes
"""
return [value for key, value in topology.__dict__.items()]
def METHOD_NAME(node: TopologyNode) -> bool:
"""
Validate if a node has no consumers
:param node:
:return:
"""
stage_consumers = [stage.consumer for stage in node.stages]
return all(consumer is None for consumer in stage_consumers)
def get_topology_root(topology: ServiceTopology) -> List[TopologyNode]:
"""
Fetch the roots from a ServiceTopology.
A node is root if it has no consumers, i.e., can be
computed at the top of the Tree.
:param topology: ServiceTopology
:return: List of nodes that can be roots
"""
nodes = get_topology_nodes(topology)
return [node for node in nodes if METHOD_NAME(node)]
def get_ctx_default(stage: NodeStage) -> Optional[List[Any]]:
"""
If we cache all, default value is an empty list
:param stage: Node Stage
:return: None or []
"""
return [] if stage.cache_all else None
def create_source_context(topology: ServiceTopology) -> TopologyContext:
"""
Dynamically build a context based on the topology nodes.
Builds a Pydantic BaseModel class.
:param topology: ServiceTopology
:return: TopologyContext
"""
nodes = get_topology_nodes(topology)
ctx_fields = {
stage.context: (Optional[stage.type_], get_ctx_default(stage))
for node in nodes
for stage in node.stages
if stage.context
}
return create_model("GeneratedContext", **ctx_fields, __base__=TopologyContext)()
def get_topology_node(name: str, topology: ServiceTopology) -> TopologyNode:
"""
Fetch a topology node by name
:param name: node name
:param topology: service topology with all nodes
:return: TopologyNode
"""
node = topology.__dict__.get(name)
if not node:
raise ValueError(f"{name} node not found in {topology}")
return node |
5,257 | get group output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetGroupResult',
'AwaitableGetGroupResult',
'get_group',
'get_group_output',
]
@pulumi.output_type
class GetGroupResult:
"""
A group created in a Migration project.
"""
def __init__(__self__, e_tag=None, id=None, name=None, properties=None, type=None):
if e_tag and not isinstance(e_tag, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", e_tag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
"""
For optimistic concurrency control.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def id(self) -> str:
"""
Path reference to this group. /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/assessmentProjects/{projectName}/groups/{groupName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.GroupPropertiesResponse':
"""
Properties of the group.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the object = [Microsoft.Migrate/assessmentProjects/groups].
"""
return pulumi.get(self, "type")
class AwaitableGetGroupResult(GetGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGroupResult(
e_tag=self.e_tag,
id=self.id,
name=self.name,
properties=self.properties,
type=self.type)
def get_group(group_name: Optional[str] = None,
project_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGroupResult:
"""
Get information related to a specific group in the project. Returns a json object of type 'group' as specified in the models section.
:param str group_name: Unique name of a group within a project.
:param str project_name: Name of the Azure Migrate project.
:param str resource_group_name: Name of the Azure Resource Group that project is part of.
"""
__args__ = dict()
__args__['groupName'] = group_name
__args__['projectName'] = project_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:migrate/v20191001:getGroup', __args__, opts=opts, typ=GetGroupResult).value
return AwaitableGetGroupResult(
e_tag=pulumi.get(__ret__, 'e_tag'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_group)
def METHOD_NAME(group_name: Optional[pulumi.Input[str]] = None,
project_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGroupResult]:
"""
Get information related to a specific group in the project. Returns a json object of type 'group' as specified in the models section.
:param str group_name: Unique name of a group within a project.
:param str project_name: Name of the Azure Migrate project.
:param str resource_group_name: Name of the Azure Resource Group that project is part of.
"""
... |
5,258 | is authenticated | # Stubs for paramiko.transport (Python 3.5)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
import threading
from paramiko.util import ClosingContextManager
class Transport(threading.Thread, ClosingContextManager):
active = ... # type: Any
sock = ... # type: Any
packetizer = ... # type: Any
local_version = ... # type: Any
remote_version = ... # type: Any
local_cipher = ... # type: Any
local_kex_init = ... # type: Any
local_mac = ... # type: Any
local_compression = ... # type: Any
session_id = ... # type: Any
host_key_type = ... # type: Any
host_key = ... # type: Any
use_gss_kex = ... # type: Any
gss_kex_used = ... # type: Any
kexgss_ctxt = ... # type: Any
gss_host = ... # type: Any
kex_engine = ... # type: Any
H = ... # type: Any
K = ... # type: Any
initial_kex_done = ... # type: Any
in_kex = ... # type: Any
authenticated = ... # type: Any
lock = ... # type: Any
channel_events = ... # type: Any
channels_seen = ... # type: Any
default_max_packet_size = ... # type: Any
default_window_size = ... # type: Any
saved_exception = ... # type: Any
clear_to_send = ... # type: Any
clear_to_send_lock = ... # type: Any
clear_to_send_timeout = ... # type: Any
log_name = ... # type: Any
logger = ... # type: Any
auth_handler = ... # type: Any
global_response = ... # type: Any
completion_event = ... # type: Any
banner_timeout = ... # type: Any
handshake_timeout = ... # type: Any
server_mode = ... # type: Any
server_object = ... # type: Any
server_key_dict = ... # type: Any
server_accepts = ... # type: Any
server_accept_cv = ... # type: Any
subsystem_table = ... # type: Any
def __init__(self, sock, default_window_size=..., default_max_packet_size=..., gss_kex=False, gss_deleg_creds=True): ...
def atfork(self): ...
def get_security_options(self): ...
def set_gss_host(self, gss_host): ...
def start_client(self, event=None): ...
def start_server(self, event=None, server=None): ...
def add_server_key(self, key): ...
def get_server_key(self): ...
@staticmethod
def load_server_moduli(filename=None): ...
def close(self): ...
def get_remote_server_key(self): ...
def is_active(self): ...
def open_session(self, window_size=None, max_packet_size=None, timeout=None): ...
def open_x11_channel(self, src_addr=None): ...
def open_forward_agent_channel(self): ...
def open_forwarded_tcpip_channel(self, src_addr, dest_addr): ...
def open_channel(self, kind, dest_addr=None, src_addr=None, window_size=None, max_packet_size=None, timeout=None): ...
def request_port_forward(self, address, port, handler=None): ...
def cancel_port_forward(self, address, port): ...
def open_sftp_client(self): ...
def send_ignore(self, byte_count=None): ...
def renegotiate_keys(self): ...
def set_keepalive(self, interval): ...
def global_request(self, kind, data=None, wait=True): ...
def accept(self, timeout=None): ...
def connect(self, hostkey=None, username='', password=None, pkey=None, gss_host=None, gss_auth=False, gss_kex=False, gss_deleg_creds=True): ...
def get_exception(self): ...
def set_subsystem_handler(self, name, handler, *larg, **kwarg): ...
def METHOD_NAME(self): ...
def get_username(self): ...
def get_banner(self): ...
def auth_none(self, username): ...
def auth_password(self, username, password, event=None, fallback=True): ...
def auth_publickey(self, username, key, event=None): ...
def auth_interactive(self, username, handler, submethods=''): ...
def auth_interactive_dumb(self, username, handler=None, submethods=''): ...
def auth_gssapi_with_mic(self, username, gss_host, gss_deleg_creds): ...
def auth_gssapi_keyex(self, username): ...
def set_log_channel(self, name): ...
def get_log_channel(self): ...
def set_hexdump(self, hexdump): ...
def get_hexdump(self): ...
def use_compression(self, compress=True): ...
def getpeername(self): ...
def stop_thread(self): ...
sys = ... # type: Any
def run(self): ...
class SecurityOptions:
def __init__(self, transport): ...
@property
def ciphers(self): ...
@ciphers.setter
def ciphers(self, x): ...
@property
def digests(self): ...
@digests.setter
def digests(self, x): ...
@property
def key_types(self): ...
@key_types.setter
def key_types(self, x): ...
@property
def kex(self): ...
@kex.setter
def kex(self, x): ...
@property
def compression(self): ...
@compression.setter
def compression(self, x): ...
class ChannelMap:
def __init__(self): ...
def put(self, chanid, chan): ...
def get(self, chanid): ...
def delete(self, chanid): ...
def values(self): ...
def __len__(self): ... |
5,259 | init2 ack | # SPDX-License-Identifier: MIT
import struct
from ..utils import *
from .asc import StandardASC
from .asc.base import *
class PMPMessage(Register64):
TYPE = 56, 44
class PMP_Startup(PMPMessage):
TYPE = 56, 44, Constant(0x00)
class PMP_Configure(PMPMessage):
TYPE = 56, 44, Constant(0x10)
DVA = 47, 0
class PMP_Configure_Ack(PMPMessage):
TYPE = 56, 44, Constant(0x20)
UNK = 47, 0
class PMP_Init1(PMPMessage):
TYPE = 56, 44, Constant(0x200)
UNK1 = 43, 16
UNK2 = 15, 0
class PMP_Init1_Ack(PMPMessage):
TYPE = 56, 44, Constant(0x201)
UNK1 = 43, 16
UNK2 = 15, 0
class PMP_Init2(PMPMessage):
TYPE = 56, 44, Constant(0x202)
UNK1 = 43, 16
UNK2 = 15, 0
class PMP_Init2_Ack(PMPMessage):
TYPE = 56, 44, Constant(0x203)
UNK1 = 43, 16
UNK2 = 15, 0
class PMP_Unk(PMPMessage):
TYPE = 56, 44, Constant(0x100)
UNK1 = 43, 16
UNK2 = 15, 0
class PMP_Unk_Ack(PMPMessage):
TYPE = 56, 44, Constant(0x110)
UNK1 = 43, 16
UNK2 = 15, 0
class PMP_DevPwr(PMPMessage):
TYPE = 56, 44, Constant(0x20e)
DEV = 31, 16
STATE = 15, 0
class PMP_DevPwr_Sync(PMPMessage):
TYPE = 56, 44, Constant(0x208)
DEV = 31, 16
STATE = 15, 0
class PMP_DevPwr_Ack(PMPMessage):
TYPE = 56, 44, Constant(0x209)
DEV = 31, 16
STATE = 15, 0
class PMPEndpoint(ASCBaseEndpoint):
BASE_MESSAGE = PMPMessage
SHORT = "pmpep"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.shmem = self.shmem_dva = None
self.init_complete = False
self.init1_acked = False
self.init2_acked = False
self.unk_acked = False
@msg_handler(0x00, PMP_Startup)
def Startup(self, msg):
self.log("Starting up")
self.shmem, self.shmem_dva = self.asc.ioalloc(0x10000)
self.send_init_config()
return True
def send_init_config(self):
self.asc.p.memset32(self.shmem, 0, 0x10000)
dram_config = self.asc.u.adt["arm-io/pmp/iop-pmp-nub"].energy_model_dram_configs
self.asc.iface.writemem(self.shmem + 0x2000, dram_config)
node = self.asc.u.adt["arm-io/pmp"]
maps = []
dva = 0xc0000000
for i in range(3, len(node.reg)):
addr, size = node.get_reg(i)
if size == 0:
maps.append(struct.pack("<QQ", 0, 0))
continue
self.asc.dart.iomap_at(0, dva, addr, size)
self.log(f"map {addr:#x} -> {dva:#x} [{size:#x}]")
maps.append(struct.pack("<QQ", dva, size))
dva += align(size, 0x4000)
chexdump(b"".join(maps))
self.asc.iface.writemem(self.shmem + 0xe000, b"".join(maps))
self.send(PMP_Configure(DVA=self.shmem_dva))
while not self.init_complete:
self.asc.work()
return True
@msg_handler(0x20, PMP_Configure_Ack)
def Configure_Ack(self, msg):
self.init_complete = True
props = self.asc.iface.readmem(self.shmem, 0x2000)
devinfo = self.asc.iface.readmem(self.shmem + 0x4000, 0x1000)
status = self.asc.iface.readmem(self.shmem + 0xc000, 0x100)
print("PMP Props:")
chexdump(props)
print("PMP Device Info:")
chexdump(devinfo)
print("PMP Status:")
chexdump(status)
self.send(PMP_Init1(UNK1=1, UNK2=3))
while not self.init1_acked:
self.asc.work()
self.send(PMP_Init2(UNK1=1, UNK2=0))
while not self.init2_acked:
self.asc.work()
self.send(PMP_Unk(UNK1=0x3bc, UNK2=2))
while not self.unk_acked:
self.asc.work()
return True
@msg_handler(0x201, PMP_Init1_Ack)
def Init1_Ack(self, msg):
self.init1_acked = True
return True
@msg_handler(0x203, PMP_Init2_Ack)
def METHOD_NAME(self, msg):
self.init2_acked = True
return True
@msg_handler(0x110, PMP_Unk_Ack)
def Unk_Ack(self, msg):
self.unk_acked = True
return True
class PMPClient(StandardASC):
pass
ENDPOINTS = {
0x20: PMPEndpoint,
} |
5,260 | list edge module provisioning token output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ListEdgeModuleProvisioningTokenResult',
'AwaitableListEdgeModuleProvisioningTokenResult',
'list_edge_module_provisioning_token',
'list_edge_module_provisioning_token_output',
]
@pulumi.output_type
class ListEdgeModuleProvisioningTokenResult:
"""
Provisioning token properties. A provisioning token allows for a single instance of Azure Video analyzer IoT edge module to be initialized and authorized to the cloud account. The provisioning token itself is short lived and it is only used for the initial handshake between IoT edge module and the cloud. After the initial handshake, the IoT edge module will agree on a set of authentication keys which will be auto-rotated as long as the module is able to periodically connect to the cloud. A new provisioning token can be generated for the same IoT edge module in case the module state lost or reset.
"""
def __init__(__self__, expiration_date=None, token=None):
if expiration_date and not isinstance(expiration_date, str):
raise TypeError("Expected argument 'expiration_date' to be a str")
pulumi.set(__self__, "expiration_date", expiration_date)
if token and not isinstance(token, str):
raise TypeError("Expected argument 'token' to be a str")
pulumi.set(__self__, "token", token)
@property
@pulumi.getter(name="expirationDate")
def expiration_date(self) -> str:
"""
The expiration date of the registration token. The Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to the token expiration date.
"""
return pulumi.get(self, "expiration_date")
@property
@pulumi.getter
def token(self) -> str:
"""
The token blob to be provided to the Azure Video Analyzer IoT edge module through the Azure IoT Edge module twin properties.
"""
return pulumi.get(self, "token")
class AwaitableListEdgeModuleProvisioningTokenResult(ListEdgeModuleProvisioningTokenResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListEdgeModuleProvisioningTokenResult(
expiration_date=self.expiration_date,
token=self.token)
def list_edge_module_provisioning_token(account_name: Optional[str] = None,
edge_module_name: Optional[str] = None,
expiration_date: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListEdgeModuleProvisioningTokenResult:
"""
Creates a new provisioning token. A provisioning token allows for a single instance of Azure Video analyzer IoT edge module to be initialized and authorized to the cloud account. The provisioning token itself is short lived and it is only used for the initial handshake between IoT edge module and the cloud. After the initial handshake, the IoT edge module will agree on a set of authentication keys which will be auto-rotated as long as the module is able to periodically connect to the cloud. A new provisioning token can be generated for the same IoT edge module in case the module state lost or reset.
Azure REST API version: 2021-11-01-preview.
:param str account_name: The Azure Video Analyzer account name.
:param str edge_module_name: The Edge Module name.
:param str expiration_date: The desired expiration date of the registration token. The Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to the token expiration date.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['edgeModuleName'] = edge_module_name
__args__['expirationDate'] = expiration_date
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:videoanalyzer:listEdgeModuleProvisioningToken', __args__, opts=opts, typ=ListEdgeModuleProvisioningTokenResult).value
return AwaitableListEdgeModuleProvisioningTokenResult(
expiration_date=pulumi.get(__ret__, 'expiration_date'),
token=pulumi.get(__ret__, 'token'))
@_utilities.lift_output_func(list_edge_module_provisioning_token)
def METHOD_NAME(account_name: Optional[pulumi.Input[str]] = None,
edge_module_name: Optional[pulumi.Input[str]] = None,
expiration_date: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListEdgeModuleProvisioningTokenResult]:
"""
Creates a new provisioning token. A provisioning token allows for a single instance of Azure Video analyzer IoT edge module to be initialized and authorized to the cloud account. The provisioning token itself is short lived and it is only used for the initial handshake between IoT edge module and the cloud. After the initial handshake, the IoT edge module will agree on a set of authentication keys which will be auto-rotated as long as the module is able to periodically connect to the cloud. A new provisioning token can be generated for the same IoT edge module in case the module state lost or reset.
Azure REST API version: 2021-11-01-preview.
:param str account_name: The Azure Video Analyzer account name.
:param str edge_module_name: The Edge Module name.
:param str expiration_date: The desired expiration date of the registration token. The Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to the token expiration date.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
5,261 | event monitor thread async | """Module holding utility and convenience functions for zmq event monitoring.
example
import sys
import zmq
# import zmq.asyncio
# from zmq_monitor_class import event_monitor_async
from zmq_monitor_class import event_monitor
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.setsockopt(zmq.SUBSCRIBE, b"")
event_monitor(socket)
# event_monitor_async(socket, zmq.asyncio.asyncio.get_event_loop() )
socket.connect ("tcp://127.0.0.1:7103")
while True:
m=socket.recv()
# print(socket.recv())
"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import asyncio
import threading
import time
from typing import Any, Dict
import zmq
from zmq.utils.monitor import recv_monitor_message
def METHOD_NAME(
monitor: zmq.asyncio.Socket, loop: asyncio.BaseEventLoop
) -> None:
"""A thread that prints events
This is a convenience method. It could serve as an example for your code of a monitor,
For example if you don't need the prints, then copy paste this part of code to your code and modify it to your needs.
parameters:
monitor: a zmq monitor socket, from calling: my_zmq_socket.get_monitor_socket()
loop: an asyncio event loop, from calling zmq.asyncio.asyncio.get_event_loop() , whens starting a thread it does not contains an event loop
"""
print("libzmq-%s" % zmq.zmq_version())
if zmq.zmq_version_info() < (4, 0):
raise RuntimeError("monitoring in libzmq version < 4.0 is not supported")
EVENT_MAP = {}
print("Event names:")
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
print("%21s : %4i" % (name, value))
EVENT_MAP[value] = name
print("\n")
asyncio.set_event_loop(loop)
async def run_loop() -> None:
while True:
try:
while monitor.poll():
evt: Dict[str, Any] = {}
mon_evt = await recv_monitor_message(monitor)
evt.update(mon_evt)
evt['description'] = EVENT_MAP[evt['event']]
print(f"Event: {evt}")
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
break
except RuntimeError as e:
print(e)
time.sleep(1)
monitor.close()
print()
print("event monitor thread done!")
asyncio.ensure_future(run_loop())
def event_monitor_thread(monitor: zmq.Socket) -> None:
"""A thread that prints events
This is a convenience method. It could serve as an example for your code of a monitor,
For example if you don't need the prints, then copy paste this part of code to your code and modify it to your needs.
parameters:
monitor: a zmq monitor socket, from calling: my_zmq_socket.get_monitor_socket()
"""
print("libzmq-%s" % zmq.zmq_version())
if zmq.zmq_version_info() < (4, 0):
raise RuntimeError("monitoring in libzmq version < 4.0 is not supported")
EVENT_MAP = {}
print("Event names:")
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
print("%21s : %4i" % (name, value))
EVENT_MAP[value] = name
print()
print()
while True:
try:
while monitor.poll():
evt: Dict[str, Any] = {}
mon_evt = recv_monitor_message(monitor)
evt.update(mon_evt)
evt['description'] = EVENT_MAP[evt['event']]
print(f"Event: {evt}")
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
break
except RuntimeError as e:
print(e)
time.sleep(1)
monitor.close()
print()
print("event monitor thread done!")
def event_monitor(socket: zmq.Socket) -> None:
"""Add printing event monitor to a zmq socket, it creates a thread by calling event_monitor_thread
usage:
event_monitor_async(socket)
parameters:
monitor: a zmq monitor socket, from calling: my_zmq_socket.get_monitor_socket()
"""
monitor = socket.get_monitor_socket()
t = threading.Thread(target=event_monitor_thread, args=(monitor,))
t.start()
def event_monitor_async(socket: zmq.Socket, loop: asyncio.BaseEventLoop) -> None:
"""Add printing event monitor to a zmq socket, it creates a thread by calling event_monitor_thread_async
See notes in description of : event_monitor_thread
usage:
loop = zmq.asyncio.asyncio.get_event_loop()
event_monitor_async(socket, zmq.asyncio.asyncio.get_event_loop() )
parameters:
monitor: a zmq monitor socket, from calling: my_zmq_socket.get_monitor_socket()
"""
monitor = socket.get_monitor_socket()
t = threading.Thread(target=METHOD_NAME, args=(monitor, loop))
t.start()
__all__ = ['event_monitor', 'event_monitor_async'] |
5,262 | inject | import functools
import socket
import struct
from time import sleep
from . import const
def METHOD_NAME(control_type: int):
"""
Inject control code, with this inject, we will be able to do unit test
Args:
control_type: event to send, TYPE_*
"""
def wrapper(f):
@functools.wraps(f)
def inner(*args, **kwargs):
package = struct.pack(">B", control_type) + f(*args, **kwargs)
if args[0].parent.control_socket is not None:
with args[0].parent.control_socket_lock:
args[0].parent.control_socket.send(package)
return package
return inner
return wrapper
class ControlSender:
def __init__(self, parent):
self.parent = parent
@METHOD_NAME(const.TYPE_INJECT_KEYCODE)
def keycode(
self, keycode: int, action: int = const.ACTION_DOWN, repeat: int = 0
) -> bytes:
"""
Send keycode to device
Args:
keycode: const.KEYCODE_*
action: ACTION_DOWN | ACTION_UP
repeat: repeat count
"""
return struct.pack(">Biii", action, keycode, repeat, 0)
@METHOD_NAME(const.TYPE_INJECT_TEXT)
def text(self, text: str) -> bytes:
"""
Send text to device
Args:
text: text to send
"""
buffer = text.encode("utf-8")
return struct.pack(">i", len(buffer)) + buffer
@METHOD_NAME(const.TYPE_INJECT_TOUCH_EVENT)
def touch(
self, x: int, y: int, action: int = const.ACTION_DOWN, touch_id: int = -1
) -> bytes:
"""
Touch screen
Args:
x: horizontal position
y: vertical position
action: ACTION_DOWN | ACTION_UP | ACTION_MOVE
touch_id: Default using virtual id -1, you can specify it to emulate multi finger touch
"""
x, y = max(x, 0), max(y, 0)
return struct.pack(
">BqiiHHHi",
action,
touch_id,
int(x),
int(y),
int(self.parent.resolution[0]),
int(self.parent.resolution[1]),
0xFFFF,
1,
)
@METHOD_NAME(const.TYPE_INJECT_SCROLL_EVENT)
def scroll(self, x: int, y: int, h: int, v: int) -> bytes:
"""
Scroll screen
Args:
x: horizontal position
y: vertical position
h: horizontal movement
v: vertical movement
"""
x, y = max(x, 0), max(y, 0)
return struct.pack(
">iiHHii",
int(x),
int(y),
int(self.parent.resolution[0]),
int(self.parent.resolution[1]),
int(h),
int(v),
)
@METHOD_NAME(const.TYPE_BACK_OR_SCREEN_ON)
def back_or_turn_screen_on(self, action: int = const.ACTION_DOWN) -> bytes:
"""
If the screen is off, it is turned on only on ACTION_DOWN
Args:
action: ACTION_DOWN | ACTION_UP
"""
return struct.pack(">B", action)
@METHOD_NAME(const.TYPE_EXPAND_NOTIFICATION_PANEL)
def expand_notification_panel(self) -> bytes:
"""
Expand notification panel
"""
return b""
@METHOD_NAME(const.TYPE_EXPAND_SETTINGS_PANEL)
def expand_settings_panel(self) -> bytes:
"""
Expand settings panel
"""
return b""
@METHOD_NAME(const.TYPE_COLLAPSE_PANELS)
def collapse_panels(self) -> bytes:
"""
Collapse all panels
"""
return b""
def get_clipboard(self, copy_key=const.COPY_KEY_NONE) -> str:
"""
Get clipboard
"""
# Since this function need socket response, we can't auto inject it any more
s: socket.socket = self.parent.control_socket
with self.parent.control_socket_lock:
# Flush socket
s.setblocking(False)
while True:
try:
s.recv(1024)
except BlockingIOError:
break
s.setblocking(True)
# Read package
package = struct.pack(">BB", const.TYPE_GET_CLIPBOARD, copy_key)
s.send(package)
(code,) = struct.unpack(">B", s.recv(1))
assert code == 0
(length,) = struct.unpack(">i", s.recv(4))
return s.recv(length).decode("utf-8")
@METHOD_NAME(const.TYPE_SET_CLIPBOARD)
def set_clipboard(self, text: str, paste: bool = False) -> bytes:
"""
Set clipboard
Args:
text: the string you want to set
paste: paste now
"""
buffer = text.encode("utf-8")
return struct.pack(">?i", paste, len(buffer)) + buffer
@METHOD_NAME(const.TYPE_SET_SCREEN_POWER_MODE)
def set_screen_power_mode(self, mode: int = const.POWER_MODE_NORMAL) -> bytes:
"""
Set screen power mode
Args:
mode: POWER_MODE_OFF | POWER_MODE_NORMAL
"""
return struct.pack(">b", mode)
@METHOD_NAME(const.TYPE_ROTATE_DEVICE)
def rotate_device(self) -> bytes:
"""
Rotate device
"""
return b""
def swipe(
self,
start_x: int,
start_y: int,
end_x: int,
end_y: int,
move_step_length: int = 5,
move_steps_delay: float = 0.005,
) -> None:
"""
Swipe on screen
Args:
start_x: start horizontal position
start_y: start vertical position
end_x: start horizontal position
end_y: end vertical position
move_step_length: length per step
move_steps_delay: sleep seconds after each step
:return:
"""
self.touch(start_x, start_y, const.ACTION_DOWN)
next_x = start_x
next_y = start_y
if end_x > self.parent.resolution[0]:
end_x = self.parent.resolution[0]
if end_y > self.parent.resolution[1]:
end_y = self.parent.resolution[1]
decrease_x = True if start_x > end_x else False
decrease_y = True if start_y > end_y else False
while True:
if decrease_x:
next_x -= move_step_length
if next_x < end_x:
next_x = end_x
else:
next_x += move_step_length
if next_x > end_x:
next_x = end_x
if decrease_y:
next_y -= move_step_length
if next_y < end_y:
next_y = end_y
else:
next_y += move_step_length
if next_y > end_y:
next_y = end_y
self.touch(next_x, next_y, const.ACTION_MOVE)
if next_x == end_x and next_y == end_y:
self.touch(next_x, next_y, const.ACTION_UP)
break
sleep(move_steps_delay)
def tap(self, x, y, hold_time: float = 0.07) -> None:
"""
Tap on screen
Args:
x: horizontal position
y: vertical position
hold_time: hold time
"""
self.touch(x, y, const.ACTION_DOWN)
sleep(hold_time)
self.touch(x, y, const.ACTION_UP) |
5,263 | plugin configuration | import jwt
import pytest
from ...manager import get_plugins_manager
from ..plugin import OpenIDConnectPlugin
@pytest.fixture()
def METHOD_NAME():
def fun(
client_id=None,
client_secret=None,
enable_refresh_token=True,
oauth_authorization_url=None,
oauth_token_url=None,
json_web_key_set_url=None,
oauth_logout_url=None,
user_info_url=None,
use_oauth_scope_permissions=False,
audience=None,
staff_user_domains=None,
default_group_name_for_new_staff_users=None,
):
return [
{"name": "client_id", "value": client_id},
{"name": "client_secret", "value": client_secret},
{"name": "enable_refresh_token", "value": enable_refresh_token},
{"name": "oauth_authorization_url", "value": oauth_authorization_url},
{"name": "oauth_token_url", "value": oauth_token_url},
{"name": "json_web_key_set_url", "value": json_web_key_set_url},
{"name": "oauth_logout_url", "value": oauth_logout_url},
{"name": "user_info_url", "value": user_info_url},
{
"name": "use_oauth_scope_permissions",
"value": use_oauth_scope_permissions,
},
{"name": "audience", "value": audience},
{"name": "staff_user_domains", "value": staff_user_domains},
{
"name": "default_group_name_for_new_staff_users",
"value": default_group_name_for_new_staff_users,
},
]
return fun
@pytest.fixture
def openid_plugin(settings, METHOD_NAME):
def fun(
active=True,
client_id="client_id",
client_secret="client_secret",
enable_refresh_token=True,
oauth_authorization_url="https://saleor.io/oauth/authorize",
oauth_token_url="https://saleor.io/oauth/token",
json_web_key_set_url="https://saleor.io/.well-known/jwks.json",
oauth_logout_url="",
use_oauth_scope_permissions=False,
user_info_url="https://saleor.io/userinfo",
audience="perms",
staff_user_domains="",
default_group_name_for_new_staff_users="OpenID test group",
):
settings.PLUGINS = ["saleor.plugins.openid_connect.plugin.OpenIDConnectPlugin"]
manager = get_plugins_manager()
manager.save_plugin_configuration(
OpenIDConnectPlugin.PLUGIN_ID,
None,
{
"active": active,
"configuration": METHOD_NAME(
client_id=client_id,
client_secret=client_secret,
enable_refresh_token=enable_refresh_token,
oauth_authorization_url=oauth_authorization_url,
oauth_token_url=oauth_token_url,
json_web_key_set_url=json_web_key_set_url,
oauth_logout_url=oauth_logout_url,
use_oauth_scope_permissions=use_oauth_scope_permissions,
user_info_url=user_info_url,
audience=audience,
staff_user_domains=staff_user_domains,
default_group_name_for_new_staff_users=default_group_name_for_new_staff_users,
),
},
)
manager = get_plugins_manager()
return manager.all_plugins[0]
return fun
@pytest.fixture
def decoded_access_token():
return {
"iss": "https://saleor-test.eu.auth0.com/",
"sub": "google-oauth2|114622651317794521039",
"aud": ["perms", "https://saleor.io/userinfo"],
"iat": 1615374231,
"exp": 1615460631,
"azp": "mnrVS8QkVOjtvC2zeapSkLLkwowr37Lt",
"scope": (
"openid profile email saleor:manage_apps saleor:manage_orders "
"saleor:manage_products saleor:staff"
),
}
@pytest.fixture
def user_info_response():
return {
"sub": "google-oauth2|114622651317794521011",
"given_name": "John",
"family_name": "Doe",
"nickname": "doe",
"name": "John Doe",
"picture": "https://lh3.googleusercontent.com/a-/123",
"locale": "pl",
"updated_at": "2021-03-08T12:40:53.894Z",
"email": "test@example.com",
"email_verified": True,
}
@pytest.fixture()
def id_payload():
return {
"given_name": "Saleor",
"family_name": "Admin",
"nickname": "saloer",
"name": "Saleor Admin",
"picture": "",
"locale": "pl",
"updated_at": "2020-09-22T08:50:50.110Z",
"email": "admin@example.com",
"email_verified": True,
"iss": "https://saleor.io/",
"sub": "google-oauth2|",
"aud": "",
"iat": 1600764712,
"exp": 1600800712,
}
@pytest.fixture()
def id_token(id_payload):
private_key = """-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAnzyis1ZjfNB0bBgKFMSvvkTtwlvBsaJq7S5wA+kzeVOVpVWw
kWdVha4s38XM/pa/yr47av7+z3VTmvDRyAHcaT92whREFpLv9cj5lTeJSibyr/Mr
m/YtjCZVWgaOYIhwrXwKLqPr/11inWsAkfIytvHWTxZYEcXLgAXFuUuaS3uF9gEi
NQwzGTU1v0FqkqTBr4B8nW3HCN47XUu0t8Y0e+lf4s4OxQawWD79J9/5d3Ry0vbV
3Am1FtGJiJvOwRsIfVChDpYStTcHTCMqtvWbV6L11BWkpzGXSW4Hv43qa+GSYOD2
QU68Mb59oSk2OB+BtOLpJofmbGEGgvmwyCI9MwIDAQABAoIBACiARq2wkltjtcjs
kFvZ7w1JAORHbEufEO1Eu27zOIlqbgyAcAl7q+/1bip4Z/x1IVES84/yTaM8p0go
amMhvgry/mS8vNi1BN2SAZEnb/7xSxbflb70bX9RHLJqKnp5GZe2jexw+wyXlwaM
+bclUCrh9e1ltH7IvUrRrQnFJfh+is1fRon9Co9Li0GwoN0x0byrrngU8Ak3Y6D9
D8GjQA4Elm94ST3izJv8iCOLSDBmzsPsXfcCUZfmTfZ5DbUDMbMxRnSo3nQeoKGC
0Lj9FkWcfmLcpGlSXTO+Ww1L7EGq+PT3NtRae1FZPwjddQ1/4V905kyQFLamAA5Y
lSpE2wkCgYEAy1OPLQcZt4NQnQzPz2SBJqQN2P5u3vXl+zNVKP8w4eBv0vWuJJF+
hkGNnSxXQrTkvDOIUddSKOzHHgSg4nY6K02ecyT0PPm/UZvtRpWrnBjcEVtHEJNp
bU9pLD5iZ0J9sbzPU/LxPmuAP2Bs8JmTn6aFRspFrP7W0s1Nmk2jsm0CgYEAyH0X
+jpoqxj4efZfkUrg5GbSEhf+dZglf0tTOA5bVg8IYwtmNk/pniLG/zI7c+GlTc9B
BwfMr59EzBq/eFMI7+LgXaVUsM/sS4Ry+yeK6SJx/otIMWtDfqxsLD8CPMCRvecC
2Pip4uSgrl0MOebl9XKp57GoaUWRWRHqwV4Y6h8CgYAZhI4mh4qZtnhKjY4TKDjx
QYufXSdLAi9v3FxmvchDwOgn4L+PRVdMwDNms2bsL0m5uPn104EzM6w1vzz1zwKz
5pTpPI0OjgWN13Tq8+PKvm/4Ga2MjgOgPWQkslulO/oMcXbPwWC3hcRdr9tcQtn9
Imf9n2spL/6EDFId+Hp/7QKBgAqlWdiXsWckdE1Fn91/NGHsc8syKvjjk1onDcw0
NvVi5vcba9oGdElJX3e9mxqUKMrw7msJJv1MX8LWyMQC5L6YNYHDfbPF1q5L4i8j
8mRex97UVokJQRRA452V2vCO6S5ETgpnad36de3MUxHgCOX3qL382Qx9/THVmbma
3YfRAoGAUxL/Eu5yvMK8SAt/dJK6FedngcM3JEFNplmtLYVLWhkIlNRGDwkg3I5K
y18Ae9n7dHVueyslrb6weq7dTkYDi3iOYRW8HRkIQh06wEdbxt0shTzAJvvCQfrB
jg/3747WSsf/zBTcHihTRBdAv6OmdhV4/dD5YBfLAkLrd+mX7iE=
-----END RSA PRIVATE KEY-----"""
return jwt.encode(
id_payload,
private_key,
"RS256",
) |
5,264 | flatten | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import abstractmethod
from ansible.errors import AnsibleFileNotFound
from ansible.plugins import AnsiblePlugin
from ansible.utils.display import Display
display = Display()
__all__ = ['LookupBase']
class LookupBase(AnsiblePlugin):
def __init__(self, loader=None, templar=None, **kwargs):
super(LookupBase, self).__init__()
self._loader = loader
self._templar = templar
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
def get_basedir(self, variables):
if 'role_path' in variables:
return variables['role_path']
else:
return self._loader.get_basedir()
@staticmethod
def METHOD_NAME(terms):
ret = []
for term in terms:
if isinstance(term, (list, tuple)):
ret.extend(term)
else:
ret.append(term)
return ret
@staticmethod
def _combine(a, b):
results = []
for x in a:
for y in b:
results.append(LookupBase.METHOD_NAME([x, y]))
return results
@staticmethod
def _flatten_hash_to_list(terms):
ret = []
for key in terms:
ret.append({'key': key, 'value': terms[key]})
return ret
@abstractmethod
def run(self, terms, variables=None, **kwargs):
"""
When the playbook specifies a lookup, this method is run. The
arguments to the lookup become the arguments to this method. One
additional keyword argument named ``variables`` is added to the method
call. It contains the variables available to ansible at the time the
lookup is templated. For instance::
"{{ lookup('url', 'https://toshio.fedorapeople.org/one.txt', validate_certs=True) }}"
would end up calling the lookup plugin named url's run method like this::
run(['https://toshio.fedorapeople.org/one.txt'], variables=available_variables, validate_certs=True)
Lookup plugins can be used within playbooks for looping. When this
happens, the first argument is a list containing the terms. Lookup
plugins can also be called from within playbooks to return their
values into a variable or parameter. If the user passes a string in
this case, it is converted into a list.
Errors encountered during execution should be returned by raising
AnsibleError() with a message describing the error.
Any strings returned by this method that could ever contain non-ascii
must be converted into python's unicode type as the strings will be run
through jinja2 which has this requirement. You can use::
from ansible.module_utils.common.text.converters import to_text
result_string = to_text(result_string)
"""
pass
def find_file_in_search_path(self, myvars, subdir, needle, ignore_missing=False):
'''
Return a file (needle) in the task's expected search path.
'''
if 'ansible_search_path' in myvars:
paths = myvars['ansible_search_path']
else:
paths = [self.get_basedir(myvars)]
result = None
try:
result = self._loader.path_dwim_relative_stack(paths, subdir, needle)
except AnsibleFileNotFound:
if not ignore_missing:
self._display.warning("Unable to find '%s' in expected paths (use -vvvvv to see paths)" % needle)
return result
def _deprecate_inline_kv(self):
# TODO: place holder to deprecate in future version allowing for long transition period
# self._display.deprecated('Passing inline k=v values embedded in a string to this lookup. Use direct ,k=v, k2=v2 syntax instead.', version='2.18')
pass |
5,265 | test write font 4 | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ...styles import Styles
from ...format import Format
class TestWriteFont(unittest.TestCase):
"""
Test the Styles _write_font() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_font_1(self):
"""Test the _write_font() method. Default properties."""
properties = {}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_2(self):
"""Test the _write_font() method. Bold."""
properties = {"bold": 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><b/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_3(self):
"""Test the _write_font() method. Italic."""
properties = {"italic": 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><i/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def METHOD_NAME(self):
"""Test the _write_font() method. Underline."""
properties = {"underline": 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><u/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_5(self):
"""Test the _write_font() method. Strikeout."""
properties = {"font_strikeout": 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><strike/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_6(self):
"""Test the _write_font() method. Superscript."""
properties = {"font_script": 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><vertAlign val="superscript"/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_7(self):
"""Test the _write_font() method. Subscript."""
properties = {"font_script": 2}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><vertAlign val="subscript"/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_8(self):
"""Test the _write_font() method. Font name."""
properties = {"font_name": "Arial"}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><sz val="11"/><color theme="1"/><name val="Arial"/><family val="2"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_9(self):
"""Test the _write_font() method. Font size."""
properties = {"size": 12}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><sz val="12"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_10(self):
"""Test the _write_font() method. Outline."""
properties = {"font_outline": 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><outline/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_11(self):
"""Test the _write_font() method. Shadow."""
properties = {"font_shadow": 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><shadow/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_12(self):
"""Test the _write_font() method. Colour = red."""
properties = {"color": "#FF0000"}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><sz val="11"/><color rgb="FFFF0000"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_13(self):
"""Test the _write_font() method. All font attributes to check order."""
properties = {
"bold": 1,
"color": "#FF0000",
"font_outline": 1,
"font_script": 1,
"font_shadow": 1,
"font_strikeout": 1,
"italic": 1,
"size": 12,
"underline": 1,
}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><b/><i/><strike/><outline/><shadow/><u/><vertAlign val="superscript"/><sz val="12"/><color rgb="FFFF0000"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_14(self):
"""Test the _write_font() method. Double underline."""
properties = {"underline": 2}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><u val="double"/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_15(self):
"""Test the _write_font() method. Double underline."""
properties = {"underline": 33}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><u val="singleAccounting"/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_16(self):
"""Test the _write_font() method. Double underline."""
properties = {"underline": 34}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><u val="doubleAccounting"/><sz val="11"/><color theme="1"/><name val="Calibri"/><family val="2"/><scheme val="minor"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_font_17(self):
"""Test the _write_font() method. Hyperlink."""
properties = {"hyperlink": 1}
xf_format = Format(properties)
self.styles._write_font(xf_format)
exp = """<font><u/><sz val="11"/><color theme="10"/><name val="Calibri"/><family val="2"/></font>"""
got = self.fh.getvalue()
self.assertEqual(got, exp) |
5,266 | get tracking pixel | from __future__ import print_function
import os
import fnmatch
import re
import sys
import urllib.parse
import argparse
HOSTNAME = 'https://azure-sdk-impressions.azurewebsites.net'
README_PATTERNS = ['*/readme.md', '*/readme.rst']
MARKDOWN_REGEX = r'!\[Impressions\]\([^\)]+\)'
RST_REGEX = r'\.\. image:: ' + HOSTNAME + r'/api/impressions/[^\s]+'
TRACKING_PIXEL_MD_FORMAT_STRING = ''
TRACKING_PIXEL_RST_FORMAT_STRING = '.. image:: ' + HOSTNAME + '/api/impressions/{0}{1}'
# walks a target directory with support for multiple glob patterns
def walk_directory_for_pattern(target_directory, target_patterns):
expected_locations = []
target_directory = os.path.normpath(target_directory)
normalized_target_patterns = [os.path.normpath(pattern) for pattern in target_patterns]
# walk the folders, filter to the patterns established
for folder, subfolders, files in os.walk(target_directory):
for file in files:
file_path = os.path.join(folder, file)
if check_match(file_path, normalized_target_patterns):
expected_locations.append(file_path)
return expected_locations
# a set of glob patterns against a single file path
def check_match(file_path, normalized_target_patterns):
return any([fnmatch.fnmatch(file_path, normalized_target_pattern)
for normalized_target_pattern in normalized_target_patterns])
# returns all readmes that match either of the readme patterns.
def get_all_readme_files(folder_location):
return walk_directory_for_pattern(folder_location, README_PATTERNS)
# runs across provided set of readmes, inserts or updates pixels in all
def update_readmes_with_tracking(readme_files, target_directory, repo_id):
for file_path in readme_files:
with open(file_path, 'r') as f:
data = f.read()
md_regex = re.compile(MARKDOWN_REGEX, re.IGNORECASE | re.MULTILINE)
rs_regex = re.compile(RST_REGEX, re.IGNORECASE | re.MULTILINE)
try:
extension = os.path.splitext(file_path)[1]
except e:
print('No file extension present.')
print(e)
exit(1)
if (extension == '.rst'):
updated_content = replace_tracking_pixel_rst(file_path, data, rs_regex, target_directory, repo_id)
if (extension == '.md'):
updated_content = replace_tracking_pixel_md(file_path, data, md_regex, target_directory, repo_id)
if updated_content != data:
with open(file_path, 'w') as f:
f.write(updated_content)
# insert/update tracking pixel, rst specific
def replace_tracking_pixel_rst(file_path, file_content, compiled_regex, target_directory, repo_id):
existing_matches = compiled_regex.search(file_content)
if existing_matches:
return compiled_regex.sub(METHOD_NAME(TRACKING_PIXEL_RST_FORMAT_STRING, file_path, target_directory, repo_id), file_content)
else:
return file_content + '\n\n' + METHOD_NAME(TRACKING_PIXEL_RST_FORMAT_STRING, file_path, target_directory, repo_id) + '\n'
# insert/update tracking pixel, markdown specific
def replace_tracking_pixel_md(file_path, file_content, compiled_regex, target_directory, repo_id):
existing_matches = compiled_regex.search(file_content)
if existing_matches:
return compiled_regex.sub(METHOD_NAME(TRACKING_PIXEL_MD_FORMAT_STRING, file_path, target_directory, repo_id), file_content)
else:
return file_content + '\n\n' + METHOD_NAME(TRACKING_PIXEL_MD_FORMAT_STRING, file_path, target_directory, repo_id) + '\n'
# creates the pixel tag
def METHOD_NAME(fmt_string, file_path, target_directory, repo_id):
# remove leading folders
relative_path = file_path.replace(os.path.normpath(target_directory), '')
# rename to an image
relative_path = os.path.splitext(relative_path)[0] + '.png'
# ensure that windows doesn't mess up anything
slash_path = relative_path.replace('\\', '/')
# uri encode
url_encoded_path = urllib.parse.quote_plus(slash_path)
return fmt_string.format(repo_id, url_encoded_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description ='Script that will take any repository + identifier as input, and add or update a tracking pixel image in all readme files contained within the input directory.')
parser.add_argument(
'-d',
'--scan-directory',
dest = 'scan_directory',
help = 'The repo directory that this tool should be scanning.',
required = True)
parser.add_argument(
'-i',
'--id',
dest = 'repo_id',
help = 'The repository identifier. Will be prefixed onto the readme path.',
required = True)
args = parser.parse_args()
args.scan_directory = os.path.abspath(args.scan_directory)
target_readme_files = get_all_readme_files(args.scan_directory)
update_readmes_with_tracking(target_readme_files, args.scan_directory, args.repo_id) |
5,267 | test display publisher creation | """ Tests for zmq shell / display publisher. """
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import unittest
import warnings
from queue import Queue
from threading import Thread
from unittest.mock import MagicMock
import pytest
import zmq
from jupyter_client.session import Session
from traitlets import Int
from ipykernel.zmqshell import ( # type:ignore
InteractiveShell,
KernelMagics,
ZMQDisplayPublisher,
ZMQInteractiveShell,
)
class NoReturnDisplayHook:
"""
A dummy DisplayHook which allows us to monitor
the number of times an object is called, but which
does *not* return a message when it is called.
"""
call_count = 0
def __call__(self, obj):
self.call_count += 1
class ReturnDisplayHook(NoReturnDisplayHook):
"""
A dummy DisplayHook with the same counting ability
as its base class, but which also returns the same
message when it is called.
"""
def __call__(self, obj):
super().__call__(obj)
return obj
class CounterSession(Session):
"""
This is a simple subclass to allow us to count
the calls made to the session object by the display
publisher.
"""
send_count = Int(0)
def send(self, *args, **kwargs):
"""
A trivial override to just augment the existing call
with an increment to the send counter.
"""
self.send_count += 1
super().send(*args, **kwargs)
class ZMQDisplayPublisherTests(unittest.TestCase):
"""
Tests the ZMQDisplayPublisher in zmqshell.py
"""
def setUp(self):
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUB)
self.session = CounterSession()
self.disp_pub = ZMQDisplayPublisher(session=self.session, pub_socket=self.socket)
def tearDown(self):
"""
We need to close the socket in order to proceed with the
tests.
TODO - There is still an open file handler to '/dev/null',
presumably created by zmq.
"""
self.disp_pub.clear_output()
self.socket.close()
self.context.term()
def METHOD_NAME(self):
"""
Since there's no explicit constructor, here we confirm
that keyword args get assigned correctly, and override
the defaults.
"""
assert self.disp_pub.session == self.session
assert self.disp_pub.pub_socket == self.socket
def test_thread_local_hooks(self):
"""
Confirms that the thread_local attribute is correctly
initialised with an empty list for the display hooks
"""
assert self.disp_pub._hooks == []
def hook(msg):
return msg
self.disp_pub.register_hook(hook)
assert self.disp_pub._hooks == [hook]
q: Queue = Queue()
def set_thread_hooks():
q.put(self.disp_pub._hooks)
t = Thread(target=set_thread_hooks)
t.start()
thread_hooks = q.get(timeout=10)
assert thread_hooks == []
def test_publish(self):
"""
Publish should prepare the message and eventually call
`send` by default.
"""
data = dict(a=1)
assert self.session.send_count == 0
self.disp_pub.publish(data)
assert self.session.send_count == 1
def test_display_hook_halts_send(self):
"""
If a hook is installed, and on calling the object
it does *not* return a message, then we assume that
the message has been consumed, and should not be
processed (`sent`) in the normal manner.
"""
data = dict(a=1)
hook = NoReturnDisplayHook()
self.disp_pub.register_hook(hook)
assert hook.call_count == 0
assert self.session.send_count == 0
self.disp_pub.publish(data)
assert hook.call_count == 1
assert self.session.send_count == 0
def test_display_hook_return_calls_send(self):
"""
If a hook is installed and on calling the object
it returns a new message, then we assume that this
is just a message transformation, and the message
should be sent in the usual manner.
"""
data = dict(a=1)
hook = ReturnDisplayHook()
self.disp_pub.register_hook(hook)
assert hook.call_count == 0
assert self.session.send_count == 0
self.disp_pub.publish(data)
assert hook.call_count == 1
assert self.session.send_count == 1
def test_unregister_hook(self):
"""
Once a hook is unregistered, it should not be called
during `publish`.
"""
data = dict(a=1)
hook = NoReturnDisplayHook()
self.disp_pub.register_hook(hook)
assert hook.call_count == 0
assert self.session.send_count == 0
self.disp_pub.publish(data)
assert hook.call_count == 1
assert self.session.send_count == 0
#
# After unregistering the `NoReturn` hook, any calls
# to publish should *not* got through the DisplayHook,
# but should instead hit the usual `session.send` call
# at the end.
#
# As a result, the hook call count should *not* increase,
# but the session send count *should* increase.
#
first = self.disp_pub.unregister_hook(hook)
self.disp_pub.publish(data)
self.assertTrue(first)
assert hook.call_count == 1
assert self.session.send_count == 1
#
# If a hook is not installed, `unregister_hook`
# should return false.
#
second = self.disp_pub.unregister_hook(hook)
self.assertFalse(second)
def test_magics(tmp_path):
context = zmq.Context()
socket = context.socket(zmq.PUB)
shell = InteractiveShell()
shell.user_ns["hi"] = 1
magics = KernelMagics(shell)
tmp_file = tmp_path / "test.txt"
tmp_file.write_text("hi", "utf8")
magics.edit(str(tmp_file))
magics.clear([])
magics.less(str(tmp_file))
if os.name == "posix":
magics.man("ls")
magics.autosave("10")
socket.close()
context.destroy()
def test_zmq_interactive_shell(kernel):
shell = ZMQInteractiveShell()
with pytest.raises(RuntimeError):
shell.enable_gui("tk")
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
shell.data_pub_class = MagicMock() # type:ignore
shell.data_pub
shell.kernel = kernel
shell.set_next_input("hi")
assert shell.get_parent() is None
if os.name == "posix":
shell.system_piped("ls")
else:
shell.system_piped("dir")
shell.ask_exit()
if __name__ == "__main__":
unittest.main() |
5,268 | define a c specs | # -*- coding: utf-8
"""
This module is designed to hold functions for calculating absorption chillers.
This file is part of project oemof (github.com/oemof/oemof-thermal). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location:
oemof-thermal/src/oemof/thermal/absorption_heatpumps_and_chillers.py
SPDX-License-Identifier: MIT
"""
def calc_characteristic_temp(t_hot, t_cool, t_chill, coef_a, coef_e, method):
r"""
Calculates the characteristic temperature difference.
.. calc_characteristic_temp-equations:
:math:`\Delta\Delta T = t_{G} - a \cdot t_{AC} + e \cdot t_{E}`
Parameters
----------
t_hot : numeric
External arithmetic mean fluid temperature of hot water at heat exchanger (generator) [K]
t_cool : numeric
External arithmetic mean fluid temperature of cooling water at
heat exchanger (absorber and condenser) [K]
t_chill : numeric
External arithmetic mean fluid temperature of chilled water at
heat exchanger (evaporater) [K]
coeff_a : numeric
Characteristic parameter [-]
coeff_e : numeric
Characteristic parameter [-]
method : string
Method to calculate characteristic temperature difference
Returns
-------
ddts : numeric
Characteristic temperature difference [K]
**Reference**
[1] A. Kühn, F. Ziegler.
“Operational results of a 10 kW absorption chiller and adaptation of
the characteristic equation”,
Proc. of the 1st Int. Conf. Solar Air Conditioning, 6-7 October 2005,
Bad Staffelstein, Germany.
[2] A. Kühn, C. Özgür-Popanda, and F. Ziegler.
“A 10 kW indirectly fired absorption heat pump :
Concepts for a reversible operation,”
in Thermally driven heat pumps for heating and cooling,
Universitätsverlag der TU Berlin, 2013, pp. 173–184.
[http://dx.doi.org/10.14279/depositonce-4872]
[3] Maria Puig-Arnavat, Jesús López-Villada, \
Joan Carles Bruno, Alberto Coronas.
Analysis and parameter identification for characteristic equations \
of single- and double-effect absorption chillers by means of \
multivariable regression.
In: International Journal of Refrigeration, 33 (2010) 70-78.
"""
if not isinstance(t_hot, (list)):
raise TypeError("Argument 't_hot' is not of "
"type list!")
if not isinstance(t_cool, (list)):
raise TypeError("Argument 't_cool' is not of "
"type list!")
if not isinstance(t_chill, (list)):
raise TypeError("Argument 't_chill' is not of "
"type list!")
lengths = [len(t_hot), len(t_cool), len(t_chill)]
length = max(lengths)
# External mean temperature at generator (g)
if len(t_hot) == 1:
list_t_g = t_hot * length
elif len(t_hot) == length:
list_t_g = t_hot
else:
raise ValueError("Length of argument 't_hot' does not to match requirements")
# External mean temperature at absorber/condenser (ac)
if len(t_cool) == 1:
list_t_ac = t_cool * length
elif len(t_cool) == length:
list_t_ac = t_cool
else:
raise ValueError("Length of argument 't_cool' does not to match requirements")
# External mean temperature at evaporator (e)
if len(t_chill) == 1:
list_t_e = t_chill * length
elif len(t_chill) == length:
list_t_e = t_chill
else:
raise ValueError("Length of argument 't_chill' does not to match requirements")
if method == 'kuehn_and_ziegler':
ddts = [t_g - coef_a * t_ac + coef_e * t_e for
t_g, t_ac, t_e in zip(list_t_g, list_t_ac, list_t_e)]
else:
raise ValueError("Unrecognized input for argument 'method'. "
"Possible options: 'kuehn_and_ziegler'.")
return ddts
def calc_heat_flux(ddts, coef_s, coef_r, method):
r"""
Calculates the heat flux at external heat exchanger.
.. calc_heat_flux-equations:
:math:`\dot{Q} = s \cdot \Delta\Delta T + r`
Parameters
----------
ddt : numeric
Characteristic temperature difference [K]
coeff_s : numeric
Characteristic parameter [-]
coeff_r : numeric
Characteristic parameter [-]
method : string
Method to calculate characteristic temperature difference
Returns
-------
Q_dots : numeric
Heat flux [W]
"""
if method == 'kuehn_and_ziegler':
Q_dots = [coef_s * ddt + coef_r for ddt in ddts]
else:
raise ValueError("Unrecognized input for argument 'method'. "
"Possible options: 'kuehn_and_ziegler'.")
return Q_dots
def METHOD_NAME(Q_dots_evap, Q_dots_gen):
r"""
Calculates the coefficients of performance ('COPs'),
the maximum chiller capacity as normed value ('Q_chill_max'),
and the maximum chiller capacity as
absolute value ('Q_chill_nominal').
.. COP-equations:
:math:`COP= \frac{\dot{Q}_{evap}}{\dot{Q}_{gen}}`
.. Q_chill_max-equations:
:math:`\dot{Q}_{chill, max} = \frac{\dot{Q}_{evap}}{max(\dot{Q}_{evap})}`
.. Q_chill_max-equations:
:math:`\dot{Q}_{chill, nominal} = max(\dot{Q}_{evap})`
Parameters
----------
Q_dots_evap : numeric
Heat flux at Evaporator
Q_dots_gen : numeric
Heat flux at Generator
Returns
-------
AC_specs : dict
Absorption chiller specifications
('COPs', 'Q_chill_max', 'Q_chill_nominal')
"""
AC_specs = {
'COPs': [Q_e / Q_g for Q_e, Q_g in zip(Q_dots_evap, Q_dots_gen)],
'Q_chill_max': [Q_e / max(Q_dots_evap) for Q_e in Q_dots_evap],
'Q_chill_nominal': max(Q_dots_evap)
}
return AC_specs |
5,269 | render created by | import django_filters
import django_tables2
from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin
from django.db import transaction
from django.db.models import Q
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django.views import generic
from django_filters.views import FilterView
from django_tables2 import SingleTableView
from tapir.accounts.models import TapirUser
from tapir.coop.forms import IncomingPaymentForm
from tapir.coop.models import IncomingPayment, ShareOwner, CreatePaymentLogEntry
from tapir.core.config import TAPIR_TABLE_CLASSES, TAPIR_TABLE_TEMPLATE
from tapir.core.views import TapirFormMixin
from tapir.settings import (
PERMISSION_COOP_VIEW,
PERMISSION_ACCOUNTING_MANAGE,
PERMISSION_ACCOUNTING_VIEW,
)
from tapir.utils.filters import ShareOwnerModelChoiceFilter, TapirUserModelChoiceFilter
from tapir.utils.forms import DateFromToRangeFilterTapir
from tapir.utils.user_utils import UserUtils
class IncomingPaymentTable(django_tables2.Table):
class Meta:
model = IncomingPayment
template_name = TAPIR_TABLE_TEMPLATE
fields = [
"id",
"paying_member",
"credited_member",
"amount",
"payment_date",
"creation_date",
"comment",
"created_by",
]
order_by = "-payment_date"
attrs = {"class": TAPIR_TABLE_CLASSES}
id = django_tables2.Column(verbose_name=_("Payment ID"))
def before_render(self, request):
self.request = request
def render_id(self, value, record: IncomingPayment):
return f"#{record.id}"
def render_member(self, logged_in_member: TapirUser, other_member: ShareOwner):
if logged_in_member.share_owner == other_member or logged_in_member.has_perm(
PERMISSION_ACCOUNTING_VIEW
):
other_member = other_member.get_info()
return UserUtils.build_html_link_for_viewer(other_member, logged_in_member)
return _("Other member")
def render_paying_member(self, value, record: IncomingPayment):
return self.render_member(self.request.user, record.paying_member)
def render_credited_member(self, value, record: IncomingPayment):
return self.render_member(self.request.user, record.credited_member)
def METHOD_NAME(self, value, record: IncomingPayment):
return self.render_member(self.request.user, record.created_by.share_owner)
def render_payment_date(self, value, record: IncomingPayment):
return record.payment_date.strftime("%d.%m.%Y")
def render_creation_date(self, value, record: IncomingPayment):
return record.creation_date.strftime("%d.%m.%Y")
class IncomingPaymentFilter(django_filters.FilterSet):
class Meta:
model = IncomingPayment
fields = []
payment_date = DateFromToRangeFilterTapir(
field_name="payment_date",
)
creation_date = DateFromToRangeFilterTapir(
field_name="creation_date",
)
paying_member = ShareOwnerModelChoiceFilter()
credited_member = ShareOwnerModelChoiceFilter()
created_by = TapirUserModelChoiceFilter()
class IncomingPaymentListView(LoginRequiredMixin, FilterView, SingleTableView):
table_class = IncomingPaymentTable
model = IncomingPayment
template_name = "coop/incoming_payment_list.html"
filterset_class = IncomingPaymentFilter
def get_queryset(self):
queryset = IncomingPayment.objects.all()
if not self.request.user.has_perm(PERMISSION_ACCOUNTING_VIEW):
tapir_user: TapirUser = self.request.user
logged_in_share_owner = tapir_user.share_owner
return queryset.filter(
Q(paying_member=logged_in_share_owner)
| Q(credited_member=logged_in_share_owner)
)
return queryset
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
context_data["enable_filter"] = self.request.user.has_perm(PERMISSION_COOP_VIEW)
return context_data
class IncomingPaymentCreateView(
LoginRequiredMixin, PermissionRequiredMixin, TapirFormMixin, generic.CreateView
):
permission_required = PERMISSION_ACCOUNTING_MANAGE
model = IncomingPayment
form_class = IncomingPaymentForm
def get_success_url(self):
return reverse("coop:incoming_payment_list")
def form_valid(self, form):
with transaction.atomic():
payment: IncomingPayment = form.instance
payment.creation_date = timezone.now().date()
payment.created_by = self.request.user
payment.save()
CreatePaymentLogEntry().populate(
actor=self.request.user,
share_owner=form.cleaned_data["credited_member"],
amount=form.cleaned_data["amount"],
payment_date=form.cleaned_data["payment_date"],
).save()
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data()
context["page_title"] = _("Register payment")
context["card_title"] = _("Register a new incoming payment")
return context |
5,270 | list | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class BgpServiceCommunitiesOperations(object):
"""BgpServiceCommunitiesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-04-01"
self.config = config
def METHOD_NAME(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the available bgp service communities.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of BgpServiceCommunity
:rtype:
~azure.mgmt.network.v2018_04_01.models.BgpServiceCommunityPaged[~azure.mgmt.network.v2018_04_01.models.BgpServiceCommunity]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.METHOD_NAME.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.BgpServiceCommunityPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.BgpServiceCommunityPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
METHOD_NAME.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/bgpServiceCommunities'} |
5,271 | test new attr | import importlib
from importlib import abc
from importlib import util
import sys
import types
import unittest
from . import util as test_util
class CollectInit:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def exec_module(self, module):
return self
class LazyLoaderFactoryTests(unittest.TestCase):
def test_init(self):
factory = util.LazyLoader.factory(CollectInit)
# E.g. what importlib.machinery.FileFinder instantiates loaders with
# plus keyword arguments.
lazy_loader = factory('module name', 'module path', kw='kw')
loader = lazy_loader.loader
self.assertEqual(('module name', 'module path'), loader.args)
self.assertEqual({'kw': 'kw'}, loader.kwargs)
def test_validation(self):
# No exec_module(), no lazy loading.
with self.assertRaises(TypeError):
util.LazyLoader.factory(object)
class TestingImporter(abc.MetaPathFinder, abc.Loader):
module_name = 'lazy_loader_test'
mutated_name = 'changed'
loaded = None
source_code = 'attr = 42; __name__ = {!r}'.format(mutated_name)
def find_spec(self, name, path, target=None):
if name != self.module_name:
return None
return util.spec_from_loader(name, util.LazyLoader(self))
def exec_module(self, module):
exec(self.source_code, module.__dict__)
self.loaded = module
class LazyLoaderTests(unittest.TestCase):
def test_init(self):
with self.assertRaises(TypeError):
# Classes that don't define exec_module() trigger TypeError.
util.LazyLoader(object)
def new_module(self, source_code=None):
loader = TestingImporter()
if source_code is not None:
loader.source_code = source_code
spec = util.spec_from_loader(TestingImporter.module_name,
util.LazyLoader(loader))
module = spec.loader.create_module(spec)
if module is None:
module = types.ModuleType(TestingImporter.module_name)
module.__spec__ = spec
module.__loader__ = spec.loader
spec.loader.exec_module(module)
# Module is now lazy.
self.assertIsNone(loader.loaded)
return module
def test_e2e(self):
# End-to-end test to verify the load is in fact lazy.
importer = TestingImporter()
assert importer.loaded is None
with test_util.uncache(importer.module_name):
with test_util.import_state(meta_path=[importer]):
module = importlib.import_module(importer.module_name)
self.assertIsNone(importer.loaded)
# Trigger load.
self.assertEqual(module.__loader__, importer)
self.assertIsNotNone(importer.loaded)
self.assertEqual(module, importer.loaded)
def test_attr_unchanged(self):
# An attribute only mutated as a side-effect of import should not be
# changed needlessly.
module = self.new_module()
self.assertEqual(TestingImporter.mutated_name, module.__name__)
def METHOD_NAME(self):
# A new attribute should persist.
module = self.new_module()
module.new_attr = 42
self.assertEqual(42, module.new_attr)
def test_mutated_preexisting_attr(self):
# Changing an attribute that already existed on the module --
# e.g. __name__ -- should persist.
module = self.new_module()
module.__name__ = 'bogus'
self.assertEqual('bogus', module.__name__)
def test_mutated_attr(self):
# Changing an attribute that comes into existence after an import
# should persist.
module = self.new_module()
module.attr = 6
self.assertEqual(6, module.attr)
def test_delete_eventual_attr(self):
# Deleting an attribute should stay deleted.
module = self.new_module()
del module.attr
self.assertFalse(hasattr(module, 'attr'))
def test_delete_preexisting_attr(self):
module = self.new_module()
del module.__name__
self.assertFalse(hasattr(module, '__name__'))
def test_module_substitution_error(self):
with test_util.uncache(TestingImporter.module_name):
fresh_module = types.ModuleType(TestingImporter.module_name)
sys.modules[TestingImporter.module_name] = fresh_module
module = self.new_module()
with self.assertRaisesRegex(ValueError, "substituted"):
module.__name__
def test_module_already_in_sys(self):
with test_util.uncache(TestingImporter.module_name):
module = self.new_module()
sys.modules[TestingImporter.module_name] = module
# Force the load; just care that no exception is raised.
module.__name__
if __name__ == '__main__':
unittest.main() |
5,272 | check rst data | """distutils.command.check
Implements the Distutils 'check' command.
"""
__revision__ = "$Id$"
from distutils.core import Command
from distutils.dist import PKG_INFO_ENCODING
from distutils.errors import DistutilsSetupError
try:
# docutils is installed
from docutils.utils import Reporter
from docutils.parsers.rst import Parser
from docutils import frontend
from docutils import nodes
from StringIO import StringIO
class SilentReporter(Reporter):
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
self.messages = []
Reporter.__init__(self, source, report_level, halt_level, stream,
debug, encoding, error_handler)
def system_message(self, level, message, *children, **kwargs):
self.messages.append((level, message, children, kwargs))
return nodes.system_message(message, level=level,
type=self.levels[level],
*children, **kwargs)
HAS_DOCUTILS = True
except ImportError:
# docutils is not installed
HAS_DOCUTILS = False
class check(Command):
"""This command checks the meta-data of the package.
"""
description = ("perform some checks on the package")
user_options = [('metadata', 'm', 'Verify meta-data'),
('restructuredtext', 'r',
('Checks if long string meta-data syntax '
'are reStructuredText-compliant')),
('strict', 's',
'Will exit with an error if a check fails')]
boolean_options = ['metadata', 'restructuredtext', 'strict']
def initialize_options(self):
"""Sets default values for options."""
self.restructuredtext = 0
self.metadata = 1
self.strict = 0
self._warnings = 0
def finalize_options(self):
pass
def warn(self, msg):
"""Counts the number of warnings that occurs."""
self._warnings += 1
return Command.warn(self, msg)
def run(self):
"""Runs the command."""
# perform the various tests
if self.metadata:
self.check_metadata()
if self.restructuredtext:
if HAS_DOCUTILS:
self.check_restructuredtext()
elif self.strict:
raise DistutilsSetupError('The docutils package is needed.')
# let's raise an error in strict mode, if we have at least
# one warning
if self.strict and self._warnings > 0:
raise DistutilsSetupError('Please correct your package.')
def check_metadata(self):
"""Ensures that all required elements of meta-data are supplied.
name, version, URL, (author and author_email) or
(maintainer and maintainer_email)).
Warns if any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: %s" % ', '.join(missing))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' must be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' must be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"must be supplied")
def check_restructuredtext(self):
"""Checks if the long string fields are reST-compliant."""
data = self.distribution.get_long_description()
if not isinstance(data, unicode):
data = data.decode(PKG_INFO_ENCODING)
for warning in self.METHOD_NAME(data):
line = warning[-1].get('line')
if line is None:
warning = warning[1]
else:
warning = '%s (line %s)' % (warning[1], line)
self.warn(warning)
def METHOD_NAME(self, data):
"""Returns warnings when the provided data doesn't compile."""
source_path = StringIO()
parser = Parser()
settings = frontend.OptionParser(components=(Parser,)).get_default_values()
settings.tab_width = 4
settings.pep_references = None
settings.rfc_references = None
reporter = SilentReporter(source_path,
settings.report_level,
settings.halt_level,
stream=settings.warning_stream,
debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
try:
parser.parse(data, document)
except AttributeError as e:
reporter.messages.append(
(-1, 'Could not finish the parsing: %s.' % e, '', {}))
return reporter.messages |
5,273 | test sync streaming response | """A large part of the tests in this file were adapted from:
https://github.com/encode/starlette/blob/master/tests/test_responses.py And are meant to ensure our compatibility with
their API.
"""
from itertools import cycle
from typing import TYPE_CHECKING, AsyncIterator, Iterator
import anyio
from httpx_sse import aconnect_sse
from litestar import get
from litestar.background_tasks import BackgroundTask
from litestar.response import ServerSentEvent
from litestar.response.streaming import ASGIStreamingResponse
from litestar.testing import TestClient, create_async_test_client
if TYPE_CHECKING:
from litestar.types import Message, Receive, Scope, Send
def test_streaming_response_unknown_size() -> None:
app = ASGIStreamingResponse(iterator=iter(["hello", "world"]))
client = TestClient(app)
response = client.get("/")
assert "content-length" not in response.headers
def test_streaming_response_known_size() -> None:
app = ASGIStreamingResponse(iterator=iter(["hello", "world"]), headers={"content-length": "10"})
client = TestClient(app)
response = client.get("/")
assert response.headers["content-length"] == "10"
async def test_streaming_response_stops_if_receiving_http_disconnect_with_async_iterator(anyio_backend: str) -> None:
streamed = 0
disconnected = anyio.Event()
async def receive_disconnect() -> dict:
await disconnected.wait()
return {"type": "http.disconnect"}
async def send(message: "Message") -> None:
nonlocal streamed
if message["type"] == "http.response.body":
streamed += len(message.get("body", b""))
# Simulate disconnection after download has started
if streamed >= 16:
disconnected.set()
async def stream_indefinitely() -> AsyncIterator[bytes]:
while True:
# Need a sleep for the event loop to switch to another task
await anyio.sleep(0)
yield b"chunk "
response = ASGIStreamingResponse(iterator=stream_indefinitely())
with anyio.move_on_after(1) as cancel_scope:
await response({}, receive_disconnect, send) # type: ignore
assert not cancel_scope.cancel_called, "Content streaming should stop itself."
async def test_streaming_response_stops_if_receiving_http_disconnect_with_sync_iterator(anyio_backend: str) -> None:
streamed = 0
disconnected = anyio.Event()
async def receive_disconnect() -> dict:
await disconnected.wait()
return {"type": "http.disconnect"}
async def send(message: "Message") -> None:
nonlocal streamed
if message["type"] == "http.response.body":
streamed += len(message.get("body", b""))
# Simulate disconnection after download has started
if streamed >= 16:
disconnected.set()
response = ASGIStreamingResponse(iterator=cycle(["1", "2", "3"]))
with anyio.move_on_after(1) as cancel_scope:
await response({}, receive_disconnect, send) # type: ignore
assert not cancel_scope.cancel_called, "Content streaming should stop itself."
def test_streaming_response() -> None:
filled_by_bg_task = ""
async def app(scope: "Scope", receive: "Receive", send: "Send") -> None:
async def numbers(minimum: int, maximum: int) -> AsyncIterator[str]:
for i in range(minimum, maximum + 1):
yield str(i)
if i != maximum:
yield ", "
await anyio.sleep(0)
async def numbers_for_cleanup(start: int = 1, stop: int = 5) -> None:
nonlocal filled_by_bg_task
async for thing in numbers(start, stop):
filled_by_bg_task += thing
cleanup_task = BackgroundTask(numbers_for_cleanup, start=6, stop=9)
generator = numbers(1, 5)
response = ASGIStreamingResponse(iterator=generator, media_type="text/plain", background=cleanup_task)
await response(scope, receive, send)
assert not filled_by_bg_task
client = TestClient(app)
response = client.get("/")
assert response.text == "1, 2, 3, 4, 5"
assert filled_by_bg_task == "6, 7, 8, 9"
def test_streaming_response_custom_iterator() -> None:
async def app(scope: "Scope", receive: "Receive", send: "Send") -> None:
class CustomAsyncIterator:
def __init__(self) -> None:
self._called = 0
def __aiter__(self) -> "CustomAsyncIterator":
return self
async def __anext__(self) -> str:
if self._called == 5:
raise StopAsyncIteration()
self._called += 1
return str(self._called)
response = ASGIStreamingResponse(iterator=CustomAsyncIterator(), media_type="text/plain")
await response(scope, receive, send)
client = TestClient(app)
response = client.get("/")
assert response.text == "12345"
def test_streaming_response_custom_iterable() -> None:
async def app(scope: "Scope", receive: "Receive", send: "Send") -> None:
class CustomAsyncIterable:
async def __aiter__(self) -> AsyncIterator[str]:
for i in range(5):
yield str(i + 1)
response = ASGIStreamingResponse(iterator=CustomAsyncIterable(), media_type="text/plain")
await response(scope, receive, send)
client = TestClient(app)
response = client.get("/")
assert response.text == "12345"
def METHOD_NAME() -> None:
async def app(scope: "Scope", receive: "Receive", send: "Send") -> None:
def numbers(minimum: int, maximum: int) -> Iterator[str]:
for i in range(minimum, maximum + 1):
yield str(i)
if i != maximum:
yield ", "
generator = numbers(1, 5)
response = ASGIStreamingResponse(iterator=generator, media_type="text/plain")
await response(scope, receive, send)
client = TestClient(app)
response = client.get("/")
assert response.text == "1, 2, 3, 4, 5"
async def test_sse_steaming_response() -> None:
@get(
path="/test",
)
def handler() -> ServerSentEvent:
def numbers(minimum: int, maximum: int) -> Iterator[str]:
for i in range(minimum, maximum + 1):
yield str(i)
if i != maximum:
yield ", "
generator = numbers(1, 5)
return ServerSentEvent(content=generator, event_id=123, event_type="special", retry_duration=1000)
async with create_async_test_client(handler) as client:
async with aconnect_sse(client, "GET", f"{client.base_url}/test") as event_source:
events = [sse async for sse in event_source.aiter_sse()]
assert len(events) == 1
(sse,) = events
assert sse.event == "special"
assert sse.data == "1\n, \n2\n, \n3\n, \n4\n, \n5"
assert sse.id == "123"
assert sse.retry == 1000 |
5,274 | optout email | from __future__ import annotations
from collections import defaultdict
from typing import Any, Sequence
from django.utils import timezone
from sentry.models.user import User
from .base import Newsletter
class NewsletterSubscription:
def __init__(
self,
user,
list_id,
list_name=None,
list_description=None,
email=None,
verified=None,
subscribed=False,
subscribed_date=None,
unsubscribed_date=None,
**kwargs,
):
from sentry.models import UserEmail
self.email = user.email or email
self.list_id = list_id
self.list_description = list_description
self.list_name = list_name
# is the email address verified?
self.verified = (
UserEmail.objects.get_primary_email(user).is_verified if verified is None else verified
)
# are they subscribed to ``list_id``
self.subscribed = subscribed
if subscribed:
self.subscribed_date = subscribed_date or timezone.now()
elif subscribed is False:
self.unsubscribed_date = unsubscribed_date or timezone.now()
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
def update(
self, verified=None, subscribed=None, subscribed_date=None, unsubscribed_date=None, **kwargs
):
if verified is not None:
self.verified = verified
if subscribed is not None:
self.subscribed = subscribed
if subscribed_date is not None:
self.subscribed_date = subscribed_date
elif subscribed:
self.subscribed_date = timezone.now()
if unsubscribed_date is not None:
self.unsubscribed_date = unsubscribed_date
elif subscribed is False:
self.unsubscribed_date = timezone.now()
class DummyNewsletter(Newsletter):
"""
The ``DummyNewsletter`` implementation is primarily used for test cases. It uses a in-memory
store for tracking subscriptions, which means its not suitable for any real production use-case.
"""
def __init__(self, enabled: bool = False) -> None:
self._subscriptions: dict[User, dict[int, NewsletterSubscription]] = defaultdict(dict)
self._enabled = enabled
def enable(self):
self._enabled = True
def disable(self):
self._enabled = False
def clear(self):
self._subscriptions = defaultdict(dict)
def is_enabled(self):
return self._enabled
def get_subscriptions(self, user: User):
return {"subscriptions": list((self._subscriptions.get(user) or {}).values())}
def update_subscription(
self,
user: User,
list_id: int | None = None,
create: bool | None = False,
**kwargs: Any,
) -> dict[int, NewsletterSubscription]:
if not list_id:
list_id = self.get_default_list_id()
if create:
self._subscriptions[user].setdefault(
list_id, NewsletterSubscription(user, list_id, subscribed=True)
)
self._subscriptions[user][list_id].update(**kwargs)
return self._subscriptions[user]
def update_subscriptions(
self,
user: User,
list_ids: Sequence[int] | None = None,
create: bool | None = False,
**kwargs: Any,
):
if not list_ids:
list_ids = self.get_default_list_ids()
for list_id in list_ids:
self.update_subscription(user, list_id, create, **kwargs)
return self._subscriptions[user]
def METHOD_NAME(self, email: str, **kwargs: Any) -> None:
unsubscribe_date = timezone.now()
for by_list in self._subscriptions.values():
for subscription in by_list.values():
if subscription.email == email:
subscription.update(subscribed=False, unsubscribe_date=unsubscribe_date) |
5,275 | exists | # -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rucio.common import exception
from rucio.rse.protocols import protocol
class Default(protocol.RSEProtocol):
""" Implementing access to RSEs using the local filesystem."""
def __init__(self, protocol_attr, rse_settings, logger=None):
""" Initializes the object with information about the referred RSE.
:param props: Properties derived from the RSE Repository
"""
super(Default, self).__init__(protocol_attr, rse_settings, logger=logger)
self.attributes.pop('determinism_type', None)
self.files = []
def path2pfn(self, path):
"""
Retruns a fully qualified PFN for the file referred by path.
:param path: The path to the file.
:returns: Fully qualified PFN.
"""
return ''.join([self.rse['scheme'], '://%s' % self.rse['hostname'], path])
def METHOD_NAME(self, pfn):
""" Checks if the requested file is known by the referred RSE.
:param pfn: Physical file name
:returns: True if the file exists, False if it doesn't
:raise ServiceUnavailable
"""
return pfn in self.files
def connect(self):
""" Establishes the actual connection to the referred RSE.
:param credentials: Provide all necessary information to establish a connection
to the referred storage system. Some is loaded from the repository inside the
RSE class and some must be provided specific for the SFTP protocol like
username, password, private_key, private_key_pass, port.
For details about possible additional parameters and details about their usage
see the pysftp.Connection() documentation.
NOTE: the host parametrer is overwritten with the value provided by the repository
:raise RSEAccessDenied
"""
pass
def close(self):
""" Closes the connection to RSE."""
pass
def get(self, pfn, dest, transfer_timeout=None):
""" Provides access to files stored inside connected the RSE.
:param pfn: Physical file name of requested file
:param dest: Name and path of the files when stored at the client
:param transfer_timeout Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
if pfn not in self.files:
raise exception.SourceNotFound(pfn)
def put(self, source, target, source_dir=None, transfer_timeout=None):
""" Allows to store files inside the referred RSE.
:param source: Physical file name
:param target: Name of the file on the storage system e.g. with prefixed scope
:param source_dir Path where the to be transferred files are stored in the local file system
:param transfer_timeout Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
self.files.append(target)
def delete(self, pfn):
""" Deletes a file from the connected RSE.
:param pfn: Physical file name
:raises ServiceUnavailable, SourceNotFound
"""
pass
def bulk_delete(self, pfns):
"""
Submits an async task to bulk delete files.
:param pfns: list of pfns to delete
:raises TransferAPIError: if unexpected response from the service.
"""
pass
def rename(self, pfn, new_pfn):
""" Allows to rename a file stored inside the connected RSE.
:param pfn: Current physical file name
:param new_pfn New physical file name
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
pass |
5,276 | hdx send mail members | import logging
import ckan.logic.auth.create as create
import ckan.logic.auth.update as update
import ckan.plugins.toolkit as tk
from ckanext.hdx_users.helpers.permissions import Permissions
log = logging.getLogger(__name__)
get_action = tk.get_action
auth_allow_anonymous_access = tk.auth_allow_anonymous_access
_ = tk._
def package_create(context, data_dict=None):
retvalue = True
if data_dict and 'groups' in data_dict:
temp_groups = data_dict['groups']
del data_dict['groups']
# check original package_create auth
log.debug('Removed groups from data_dict: ' + str(data_dict))
retvalue = create.package_create(context, data_dict)
data_dict['groups'] = temp_groups
else:
retvalue = create.package_create(context, data_dict)
return retvalue
def package_update(context, data_dict=None):
retvalue = True
if data_dict and 'groups' in data_dict:
temp_groups = data_dict['groups']
del data_dict['groups']
# check original package_create auth
log.debug('Removed groups from data_dict: ' + str(data_dict))
retvalue = update.package_update(context, data_dict)
data_dict['groups'] = temp_groups
else:
retvalue = update.package_update(context, data_dict)
return retvalue
def hdx_resource_id_list(context, data_dict=None):
'''
Only sysadmins are allowed to call this action
'''
return {'success': False, 'msg': _('Only sysadmins can get the entire list of resource ids')}
def hdx_send_mail_contributor(context, data_dict):
'''
Only a logged in user has access.
'''
user_obj = context.get('auth_user_obj') or context.get('user_obj')
if user_obj:
return {
'success': True
}
return {
'success': False,
'msg': _('Not authorized to perform this request')
}
def METHOD_NAME(context, data_dict):
'''
Only a logged in user has access and member of dataset's owner_org .
'''
user_obj = context.get('auth_user_obj') or context.get('user_obj')
if user_obj:
org_members = get_action('hdx_member_list')(context, {'org_id': data_dict.get('org_id')})
if org_members and org_members.get('is_member'):
return {
'success': True
}
return {
'success': False,
'msg': _('Not authorized to perform this request')
}
# def hdx_create_screenshot_for_cod(context, data_dict=None):
# '''
# Only sysadmins are allowed to call this action
# '''
# return {'success': False, 'msg': _('Only sysadmins can create a screenshot of a dataset\'s viz')}
@auth_allow_anonymous_access
def hdx_resource_download(context, resource_dict):
if resource_dict.get('in_quarantine', False):
return {'success': False, 'msg': _('Only sysadmins can download quarantined resources')}
return {'success': True}
def hdx_mark_qa_completed(context, data_dict=None):
username_or_id = context.get('user')
result = Permissions(username_or_id).has_permission(Permissions.PERMISSION_MANAGE_QA)
return {'success': result}
def hdx_mark_resource_in_quarantine(context, data_dict=None):
username_or_id = context.get('user')
result = Permissions(username_or_id).has_permission(Permissions.PERMISSION_MANAGE_QA)
return {'success': result}
def hdx_qa_resource_patch(context, data_dict=None):
'''
Only sysadmins are allowed to call this action
'''
return {'success': False, 'msg': _('Only sysadmins can change the qa script related flags')}
def hdx_fs_check_resource_revise(context, data_dict=None):
'''
Only sysadmins are allowed to call this action
'''
return {'success': False, 'msg': _('Only sysadmins can change the file structure check info')}
def package_qa_checklist_update(context, data_dict=None):
'''
Only sysadmins are allowed to call this action
'''
return {'success': False, 'msg': _('Only sysadmins can change the qa_completed flag')}
def hdx_cod_update(context, data_dict):
return _check_hdx_user_permission(context, Permissions.PERMISSION_MANAGE_COD)
def hdx_dataseries_update(context, data_dict):
return _check_hdx_user_permission(context, Permissions.PERMISSION_MANAGE_DATASERIES)
def _check_hdx_user_permission(context, permission):
username_or_id = context.get('user')
result = Permissions(username_or_id).has_permission(permission)
return {'success': result}
def hdx_p_coded_resource_update(context, data_dict):
username_or_id = context.get('user')
result = Permissions(username_or_id).has_permission(Permissions.PERMISSION_MANAGE_P_CODES)
return {'success': result}
def hdx_send_mail_request_tags(context, data_dict):
'''
Only a logged in user has access.
'''
user_obj = context.get('auth_user_obj') or context.get('user_obj')
if user_obj:
return {
'success': True
}
return {
'success': False,
'msg': _('Not authorized to perform this request')
} |
5,277 | test repo locale checkout path | import os
from unittest.mock import call, patch, Mock
from urllib.parse import urlparse
import pytest
from django.core.exceptions import ValidationError
from pontoon.base.models import repository_url_validator
from pontoon.test.factories import ProjectLocaleFactory
@pytest.mark.django_db
def test_repo_checkout_path(repo_git, settings):
"""checkout_path should be determined by the repo URL."""
# im a bit unclear about the mix of os.path and urlparse here
# how would this work on windows <> linux ?
assert repo_git.checkout_path == os.path.join(
*[repo_git.project.checkout_path] + urlparse(repo_git.url).path.split("/")
)
settings.MEDIA_ROOT = "/media/root"
assert repo_git.checkout_path == os.path.join(
*[repo_git.project.checkout_path] + urlparse(repo_git.url).path.split("/")
)
assert repo_git.project.checkout_path.startswith("/media/root")
@pytest.mark.django_db
def test_repo_checkout_path_multi_locale(settings, repo_git):
"""
The checkout_path for multi-locale repos should not include the
locale_code variable.
"""
repo_git.url = "https://example.com/path/to/{locale_code}/"
repo_git.save()
settings.MEDIA_ROOT = "/media/root"
assert repo_git.checkout_path == (
"/media/root/projects/%s/path/to" % repo_git.project.slug
)
@pytest.mark.django_db
def test_repo_checkout_path_source_repo(settings, repo_git):
"""
The checkout_path for a source repo should end with a templates
directory.
"""
repo_git.source_repo = True
repo_git.url = "https://example.com/path/to/locale/"
repo_git.save()
assert repo_git.checkout_path == (
"%s/projects/%s/path/to/locale/templates"
% (settings.MEDIA_ROOT, repo_git.project.slug)
)
@pytest.mark.django_db
def METHOD_NAME(settings, repo_git, locale_a):
"""Append the locale code the the project's checkout_path."""
repo_git.url = "https://example.com/path/{locale_code}/"
repo_git.save()
assert repo_git.locale_checkout_path(locale_a) == (
"%s/projects/%s/path/%s"
% (
settings.MEDIA_ROOT,
repo_git.project.slug,
locale_a.code,
)
)
@pytest.mark.django_db
def test_repo_path_non_multi_locale(repo_git, locale_a):
"""If the repo isn't multi-locale, throw a ValueError."""
assert repo_git.multi_locale is False
with pytest.raises(ValueError):
repo_git.locale_checkout_path(locale_a)
@pytest.mark.django_db
def test_repo_locale_url(repo_git, locale_a):
"""Fill in the {locale_code} variable in the URL."""
repo_git.url = "https://example.com/path/to/{locale_code}/"
repo_git.save()
assert (
repo_git.locale_url(locale_a)
== "https://example.com/path/to/%s/" % locale_a.code
)
@pytest.mark.django_db
def test_repo_locale_url_non_multi_locale(repo_git, locale_a):
"""If the repo isn't multi-locale, throw a ValueError."""
with pytest.raises(ValueError):
repo_git.locale_url(locale_a)
@pytest.mark.django_db
def test_repo_url_for_path(project_locale_a, repo_git, locale_b):
"""
Return the first locale_checkout_path for locales active for the
repo's project that matches the given path.
"""
ProjectLocaleFactory.create(
project=repo_git.project,
locale=locale_b,
)
repo_git.url = "https://example.com/path/to/{locale_code}/"
repo_git.save()
assert (
repo_git.url_for_path(
os.path.join(
repo_git.locale_checkout_path(project_locale_a.locale), "foo/bar.po"
)
)
== "https://example.com/path/to/%s/" % project_locale_a.locale.code
)
@pytest.mark.django_db
def test_repo_url_for_path_no_match(repo_git, locale_a, settings):
repo_git.url = "https://example.com/path/to/{locale_code}/"
repo_git.save()
settings.MEDIA_ROOT = "/media/root"
with pytest.raises(ValueError):
repo_git.url_for_path("/media/root/path/to/match/foo/bar.po")
@pytest.mark.django_db
def test_repo_pull(repo_git):
with patch(
"pontoon.sync.vcs.repositories.update_from_vcs"
) as m_update_from_vcs, patch(
"pontoon.sync.vcs.repositories.get_revision"
) as m_get_revision:
repo_git.url = "https://example.com"
m_get_revision.return_value = "asdf"
assert repo_git.pull() == {"single_locale": "asdf"}
assert m_update_from_vcs.call_args[0] == (
"git",
"https://example.com",
repo_git.checkout_path,
"",
)
@pytest.mark.django_db
def test_repo_pull_multi_locale(project_locale_a, repo_git, locale_b):
"""
If the repo is multi-locale, pull all of the repos for the
active locales.
"""
locale_a = project_locale_a.locale
ProjectLocaleFactory.create(
project=repo_git.project,
locale=locale_b,
)
with patch("pontoon.sync.vcs.repositories.update_from_vcs") as m_update_from_vcs:
with patch("pontoon.sync.vcs.repositories.get_revision") as m_get_revision:
repo_git.url = "https://example.com/{locale_code}/"
repo_git.locale_url = lambda locale: "https://example.com/%s" % locale.code
repo_git.locale_checkout_path = lambda locale: "/media/%s" % locale.code
# Return path as the revision so different locales return
# different values.
m_get_revision.side_effect = lambda type, path: path
assert repo_git.pull() == {
locale_a.code: "/media/%s" % locale_a.code,
locale_b.code: "/media/%s" % locale_b.code,
}
assert m_update_from_vcs.call_args_list == [
call(
"git",
"https://example.com/%s" % locale_b.code,
"/media/%s" % locale_b.code,
"",
),
call(
"git",
"https://example.com/%s" % locale_a.code,
"/media/%s" % locale_a.code,
"",
),
]
@pytest.mark.django_db
def test_repo_commit(repo_git):
repo_git.url = "https://example.com"
with patch("pontoon.sync.vcs.repositories.commit_to_vcs") as m:
repo_git.commit("message", "author", "path")
assert m.call_args[0] == (
"git",
"path",
"message",
"author",
"",
"https://example.com",
)
@pytest.mark.django_db
def test_repo_commit_multi_locale(repo_git):
"""
If the repo is multi-locale, use the url from url_for_path for
committing.
"""
repo_git.url = "https://example.com/{locale_code}/"
repo_git.url_for_path = Mock(return_value="https://example.com/for_path")
with patch("pontoon.sync.vcs.repositories.commit_to_vcs") as m:
repo_git.commit("message", "author", "path")
assert m.call_args[0] == (
"git",
"path",
"message",
"author",
"",
"https://example.com/for_path",
)
assert repo_git.url_for_path.call_args[0] == ("path",)
def test_repository_url_validator():
"""
The validity of the Repository URL.
"""
regular_url = "https://example.com/"
assert repository_url_validator(regular_url) is None
git_scp_url = "git@github.com:user/repository.git"
assert repository_url_validator(git_scp_url) is None
invalid_url = "--evil=parameter"
with pytest.raises(ValidationError):
repository_url_validator(invalid_url) |
5,278 | sample program configs | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
from functools import partial
import numpy as np
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
class TestScatterNdAddOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.Host,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
in_dtype = program_config.inputs["input_data"].dtype
index_dtype = program_config.inputs["index"].dtype
if in_dtype == "float32" and index_dtype == "int32":
return True
else:
return False
def METHOD_NAME(self, draw):
def judge_update_shape(ref_shape, index_shape):
update_shape = []
for i in range(len(index_shape) - 1):
update_shape.append(index_shape[i])
for i in range(index_shape[-1], len(ref_shape), 1):
update_shape.append(ref_shape[i])
return update_shape
input_type = draw(st.sampled_from(["int32", "int64", "float32"]))
index_type = draw(st.sampled_from(["int32", "int64"]))
out_dtype_dict = {
"int32": np.int32,
"int64": np.int64,
"float32": np.float32
}
in_shape = draw(
st.lists(
st.integers(
min_value=2, max_value=8), min_size=3, max_size=7))
index_np = np.vstack(
[np.random.randint(
0, s, size=100) for s in in_shape]).T.astype(index_type)
test_update_0d = draw(st.sampled_from([True, False]))
if test_update_0d:
in_shape = draw(
st.lists(
st.integers(
min_value=2, max_value=8),
min_size=1,
max_size=1))
index_np = np.array([1]).reshape([1]).astype(index_type)
update_shape = judge_update_shape(in_shape, index_np.shape)
if test_update_0d == False:
assume(index_np.shape[-1] <= len(in_shape))
def generate_data(*args, **kwargs):
if kwargs["type"] == "int32":
return np.random.randint(kwargs["low"], kwargs["high"],
kwargs["shape"]).astype(np.int32)
elif kwargs["type"] == "int64":
return np.random.randint(kwargs["low"], kwargs["high"],
kwargs["shape"]).astype(np.int64)
elif kwargs["type"] == "float32":
return np.random.random(kwargs["shape"]).astype(np.float32)
def generate_index_data(*args, **kwargs):
return index_np
scatter_nd_add_op = OpConfig(
type="scatter_nd_add",
inputs={
"X": ["input_data"],
"Index": ["index"],
"Updates": ["updates"]
},
outputs={"Out": ["output_data"]},
outputs_dtype={"output_data": out_dtype_dict[input_type]},
attrs={})
program_config = ProgramConfig(
ops=[scatter_nd_add_op],
weights={},
inputs={
"input_data": TensorConfig(data_gen=partial(
generate_data,
type=input_type,
low=-10,
high=10,
shape=in_shape)),
"index": TensorConfig(data_gen=partial(generate_index_data)),
"updates": TensorConfig(data_gen=partial(
generate_data,
type=input_type,
low=-10,
high=10,
shape=update_shape)),
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["scatter_nd_add"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
target_str = self.get_target()
max_examples = 25
if target_str == "Host":
# Make sure to generate enough valid cases for Host
max_examples = 400
self.run_and_statis(
quant=False, min_success_num=25, max_examples=max_examples)
if __name__ == "__main__":
unittest.main(argv=['']) |
5,279 | test add torrent success | import pytest
from ddt import ddt
from mock import patch, Mock, MagicMock
from requests import Response
from monitorrent.plugins.clients.utorrent import UTorrentClientPlugin
from tests import DbTestCase, use_vcr
@ddt
class UTorrentPluginTest(DbTestCase):
real_host = "http://localhost"
real_port = 8080
real_login = "admin"
real_password = "password"
bad_host = "http://fake.com"
bad_port = 1234
bad_login = "fake"
bad_password = "more_fake"
def test_settings(self):
plugin = UTorrentClientPlugin()
settings = {'host': 'localhost', 'username': 'monitorrent', 'password': 'monitorrent'}
self.assertIsNone(plugin.get_settings())
plugin.set_settings(settings)
readed_settings = plugin.get_settings()
self.assertEqual({'host': 'localhost', 'port': None, 'username': 'monitorrent'}, readed_settings)
@use_vcr
def test_check_connection_successfull(self):
plugin = UTorrentClientPlugin()
settings = {'host': self.real_host, 'port': self.real_port, 'username': self.real_login,
'password': self.real_password}
plugin.set_settings(settings)
self.assertTrue(plugin.check_connection())
def test_check_connection_failed(self):
import monitorrent.plugins.clients.utorrent
with patch.object(monitorrent.plugins.clients.utorrent.requests.Session, 'get', side_effect=Exception):
plugin = UTorrentClientPlugin()
settings = {'host': self.bad_host, 'port': self.bad_port, 'username': self.bad_login,
'password': self.bad_password}
plugin.set_settings(settings)
self.assertFalse(plugin.check_connection())
@use_vcr
def test_find_torrent(self):
plugin = UTorrentClientPlugin()
torrent_hash = "8347DD6415598A7409DFC3D1AB95078F959BFB93"
settings = {'host': self.real_host, 'username': self.real_login,
'password': self.real_password}
plugin.set_settings(settings)
torrent = plugin.find_torrent(torrent_hash)
self.assertIsNone(torrent['date_added'])
self.assertIsNotNone(torrent['name'])
def test_find_torrent_no_settings(self):
import monitorrent.plugins.clients.utorrent
with patch.object(monitorrent.plugins.clients.utorrent.requests.Session, 'get', side_effect=Exception):
plugin = UTorrentClientPlugin()
torrent_hash = "8347DD6415598A7409DFC3D1AB95078F959BFB93"
settings = {'host': self.bad_host, 'port': self.bad_port, 'username': self.bad_login,
'password': self.bad_password}
plugin.set_settings(settings)
torrent = plugin.find_torrent(torrent_hash)
self.assertFalse(torrent)
@patch('requests.Session.get')
def test_find_torrent_failed(self, get_mock):
response = Response()
response._content = b"<html><div id=''token'' style=''display:none;''>FKWBGjUDYXGNX7I-UBo5-UiWK1MUOaDmjjrorxOTzmEq3b0lWpr4no8v-FYAAAAA</div></html>"
get_mock.return_value = response
plugin = UTorrentClientPlugin()
torrent_hash = "8347DD6415598A7409DFC3D1AB95078F959BFB93"
settings = {'host': self.real_host, 'port': self.real_port, 'username': self.real_login,
'password': self.real_password}
plugin.set_settings(settings)
with pytest.raises(Exception) as e:
plugin.find_torrent(torrent_hash)
@patch('requests.Session.get')
def test_add_torrent_bad_settings(self, get_mock):
plugin = UTorrentClientPlugin()
torrent = b'torrent'
self.assertFalse(plugin.add_torrent(torrent, None))
@patch('requests.Session.get')
@patch('requests.Session.post')
def test_add_torrent_failed(self, post_mock, get_mock):
response = Response()
response._content = b"<html><div id=''token'' style=''display:none;''>FKWBGjUDYXGNX7I-UBo5-UiWK1MUOaDmjjrorxOTzmEq3b0lWpr4no8v-FYAAAAA</div></html>"
response.status_code = 200
get_mock.return_value = response
post_mock.side_effect = Exception('boom')
plugin = UTorrentClientPlugin()
settings = {'host': self.real_host, 'port': self.real_port, 'username': self.real_login,
'password': self.real_password}
plugin.set_settings(settings)
torrent = b'torrent'
with pytest.raises(Exception) as e:
plugin.add_torrent(torrent, None)
@patch('requests.Session.get')
@patch('requests.Session.post')
def METHOD_NAME(self, post_mock, get_mock):
response = Response()
response._content = b"<html><div id=''token'' style=''display:none;''>FKWBGjUDYXGNX7I-UBo5-UiWK1MUOaDmjjrorxOTzmEq3b0lWpr4no8v-FYAAAAA</div></html>"
response.status_code = 200
good_response = Response()
good_response.status_code = 200
get_mock.return_value = response
post_mock.return_value = good_response
plugin = UTorrentClientPlugin()
settings = {'host': self.real_host, 'port': self.real_port, 'username': self.real_login,
'password': self.real_password}
plugin.set_settings(settings)
torrent = b'torrent'
self.assertTrue(plugin.add_torrent(torrent, None))
@patch('requests.Session.get')
def test_remove_torrent_bad_settings(self, get_mock):
plugin = UTorrentClientPlugin()
torrent = b'torrent'
assert plugin.remove_torrent(torrent) is False
@patch('requests.Session.get')
def test_remove_torrent_failed(self, get_mock):
response = Response()
response._content = b"<html><div id=''token'' style=''display:none;''>FKWBGjUDYXGNX7I-UBo5-UiWK1MUOaDmjjrorxOTzmEq3b0lWpr4no8v-FYAAAAA</div></html>"
response.status_code = 200
get_mock.side_effect = [response, Exception('boom')]
plugin = UTorrentClientPlugin()
settings = {'host': self.real_host, 'port': self.real_port, 'username': self.real_login,
'password': self.real_password}
plugin.set_settings(settings)
torrent = b'torrent'
with pytest.raises(Exception) as e:
plugin.remove_torrent(torrent)
@patch('requests.Session.get')
def test_remove_torrent_success(self, get_mock):
response = Response()
response._content = b"<html><div id=''token'' style=''display:none;''>FKWBGjUDYXGNX7I-UBo5-UiWK1MUOaDmjjrorxOTzmEq3b0lWpr4no8v-FYAAAAA</div></html>"
response.status_code = 200
good_response = Response()
good_response.status_code = 200
get_mock.side_effect = [response, good_response]
plugin = UTorrentClientPlugin()
settings = {'host': self.real_host, 'port': self.real_port, 'username': self.real_login,
'password': self.real_password}
plugin.set_settings(settings)
torrent = b'torrent'
self.assertTrue(plugin.remove_torrent(torrent)) |
5,280 | remove prefix | # The GPEN implementation is also open-sourced by the authors,
# and available at https://github.com/yangxy/GPEN/blob/main/face_detect/retinaface_detection.py
import os
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from .models.retinaface import RetinaFace
from .utils import PriorBox, decode, decode_landm, py_cpu_nms
cfg_re50 = {
'name': 'Resnet50',
'min_sizes': [[16, 32], [64, 128], [256, 512]],
'steps': [8, 16, 32],
'variance': [0.1, 0.2],
'clip': False,
'pretrain': False,
'return_layers': {
'layer2': 1,
'layer3': 2,
'layer4': 3
},
'in_channel': 256,
'out_channel': 256
}
class RetinaFaceDetection(object):
def __init__(self, model_path, device='cuda'):
cudnn.benchmark = True
self.model_path = model_path
self.device = device
self.cfg = cfg_re50
self.net = RetinaFace(cfg=self.cfg)
self.load_model()
self.net = self.net.to(device)
self.mean = torch.tensor([[[[104]], [[117]], [[123]]]]).to(device)
def check_keys(self, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(self.net.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
assert len(
used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def METHOD_NAME(self, state_dict, prefix):
new_state_dict = dict()
# remove unnecessary 'module.'
for k, v in state_dict.items():
if k.startswith(prefix):
new_state_dict[k[len(prefix):]] = v
else:
new_state_dict[k] = v
return new_state_dict
def load_model(self, load_to_cpu=False):
pretrained_dict = torch.load(
self.model_path, map_location=torch.device('cpu'))
if 'state_dict' in pretrained_dict.keys():
pretrained_dict = self.METHOD_NAME(pretrained_dict['state_dict'],
'module.')
else:
pretrained_dict = self.METHOD_NAME(pretrained_dict, 'module.')
self.check_keys(pretrained_dict)
self.net.load_state_dict(pretrained_dict, strict=False)
self.net.eval()
def detect(self,
img_raw,
resize=1,
confidence_threshold=0.9,
nms_threshold=0.4,
top_k=5000,
keep_top_k=750,
save_image=False):
img = np.float32(img_raw)
im_height, im_width = img.shape[:2]
ss = 1.0
# tricky
if max(im_height, im_width) > 1500:
ss = 1000.0 / max(im_height, im_width)
img = cv2.resize(img, (0, 0), fx=ss, fy=ss)
im_height, im_width = img.shape[:2]
scale = torch.Tensor(
[img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(self.device)
scale = scale.to(self.device)
loc, conf, landms = self.net(img) # forward pass
del img
priorbox = PriorBox(self.cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(self.device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, self.cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(
landms.data.squeeze(0), prior_data, self.cfg['variance'])
scale1 = torch.Tensor([
im_width, im_height, im_width, im_height, im_width, im_height,
im_width, im_height, im_width, im_height
])
scale1 = scale1.to(self.device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
# ignore low scores
inds = np.where(scores > confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(
np.float32, copy=False)
keep = py_cpu_nms(dets, nms_threshold)
dets = dets[keep, :]
landms = landms[keep]
# keep top-K faster NMS
dets = dets[:keep_top_k, :]
landms = landms[:keep_top_k, :]
landms = landms.reshape((-1, 5, 2))
landms = landms.transpose((0, 2, 1))
landms = landms.reshape(
-1,
10,
)
return dets / ss, landms / ss
def detect_tensor(self,
img,
resize=1,
confidence_threshold=0.9,
nms_threshold=0.4,
top_k=5000,
keep_top_k=750,
save_image=False):
im_height, im_width = img.shape[-2:]
ss = 1000 / max(im_height, im_width)
img = F.interpolate(img, scale_factor=ss)
im_height, im_width = img.shape[-2:]
scale = torch.Tensor([im_width, im_height, im_width,
im_height]).to(self.device)
img -= self.mean
loc, conf, landms = self.net(img) # forward pass
priorbox = PriorBox(self.cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(self.device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, self.cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(
landms.data.squeeze(0), prior_data, self.cfg['variance'])
scale1 = torch.Tensor([
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]
])
scale1 = scale1.to(self.device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
# ignore low scores
inds = np.where(scores > confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(
np.float32, copy=False)
keep = py_cpu_nms(dets, nms_threshold)
dets = dets[keep, :]
landms = landms[keep]
# keep top-K faster NMS
dets = dets[:keep_top_k, :]
landms = landms[:keep_top_k, :]
landms = landms.reshape((-1, 5, 2))
landms = landms.transpose((0, 2, 1))
landms = landms.reshape(
-1,
10,
)
return dets / ss, landms / ss |
5,281 | test initialize | from unittest import mock
from xsdata.codegen.container import ClassContainer
from xsdata.codegen.container import Steps
from xsdata.codegen.models import Class
from xsdata.codegen.models import Status
from xsdata.models.config import GeneratorConfig
from xsdata.models.enums import Tag
from xsdata.utils.testing import AttrFactory
from xsdata.utils.testing import ClassFactory
from xsdata.utils.testing import FactoryTestCase
class ClassContainerTests(FactoryTestCase):
def setUp(self):
super().setUp()
self.container = ClassContainer(config=GeneratorConfig())
def METHOD_NAME(self):
classes = [
ClassFactory.create(qname="{xsdata}foo", tag=Tag.ELEMENT),
ClassFactory.create(qname="{xsdata}foo", tag=Tag.COMPLEX_TYPE),
ClassFactory.create(qname="{xsdata}foobar", tag=Tag.COMPLEX_TYPE),
]
config = GeneratorConfig()
container = ClassContainer(config)
container.extend(classes)
self.assertEqual(2, len(container.data))
self.assertEqual(3, len(list(container)))
self.assertEqual(classes, list(container))
actual = {
step: [processor.__class__.__name__ for processor in processors]
for step, processors in container.processors.items()
}
expected = {
10: [
"FlattenAttributeGroups",
],
20: [
"CalculateAttributePaths",
"FlattenClassExtensions",
"SanitizeEnumerationClass",
"UpdateAttributesEffectiveChoice",
"UnnestInnerClasses",
"AddAttributeSubstitutions",
"ProcessAttributeTypes",
"MergeAttributes",
"ProcessMixedContentClass",
],
30: [
"ResetAttributeSequences",
"RenameDuplicateAttributes",
"SanitizeAttributesDefaultValue",
],
40: ["ValidateAttributesOverrides"],
50: [
"VacuumInnerClasses",
"CreateCompoundFields",
"ResetAttributeSequenceNumbers",
],
}
self.assertEqual(expected, actual)
@mock.patch.object(ClassContainer, "process_class")
def test_find(self, mock_process_class):
def process_class(x: Class, step: int):
x.status = Status.FLATTENED
class_a = ClassFactory.create(qname="a")
class_b = ClassFactory.create(qname="b", status=Status.FLATTENED)
class_c = ClassFactory.enumeration(2, qname="b", status=Status.FLATTENING)
mock_process_class.side_effect = process_class
self.container.extend([class_a, class_b, class_c])
self.container.step = Steps.FLATTEN
self.assertIsNone(self.container.find("nope"))
self.assertEqual(class_a, self.container.find(class_a.qname))
self.assertEqual(class_b, self.container.find(class_b.qname))
self.assertEqual(
class_c, self.container.find(class_b.qname, lambda x: x.is_enumeration)
)
mock_process_class.assert_called_once_with(class_a, Steps.FLATTEN)
@mock.patch.object(ClassContainer, "process_class")
def test_find_inner(self, mock_process_class):
obj = ClassFactory.create()
first = ClassFactory.create(qname="{a}a")
second = ClassFactory.create(qname="{a}b", status=Status.FLATTENED)
obj.inner.extend((first, second))
def process_class(x: Class, step: int):
x.status = Status.FLATTENED
mock_process_class.side_effect = process_class
self.container.step = Steps.FLATTEN
self.assertEqual(first, self.container.find_inner(obj, "{a}a"))
self.assertEqual(second, self.container.find_inner(obj, "{a}b"))
mock_process_class.assert_called_once_with(first, Steps.FLATTEN)
def test_first(self):
obj = ClassFactory.create()
self.container.add(obj)
self.assertEqual(obj, self.container.first(obj.qname))
with self.assertRaises(KeyError) as cm:
self.container.first("aa")
def test_process_class(self):
target = ClassFactory.create(
inner=[ClassFactory.elements(2), ClassFactory.elements(1)]
)
self.container.add(target)
self.container.process()
self.assertEqual(Status.FINALIZED, target.status)
self.assertEqual(Status.FINALIZED, target.inner[0].status)
self.assertEqual(Status.FINALIZED, target.inner[1].status)
def test_process_classes(self):
target = ClassFactory.create(
attrs=[AttrFactory.reference("enumeration", forward=True)],
inner=[ClassFactory.enumeration(2, qname="enumeration")],
)
self.container.add(target)
self.container.process_classes(Steps.FLATTEN)
self.assertEqual(2, len(list(self.container)))
for obj in self.container:
self.assertEqual(Status.FLATTENED, obj.status)
def test_filter_classes(self):
complex_type = ClassFactory.elements(1)
enum_1 = ClassFactory.enumeration(2)
complex_type.attrs[0].types[0].reference = enum_1.ref
simple_type = ClassFactory.simple_type()
enum_2 = ClassFactory.enumeration(3)
simple_type.attrs[0].types[0].reference = enum_2.ref
element = ClassFactory.create(tag=Tag.ELEMENT, abstract=True)
container = ClassContainer(config=GeneratorConfig())
container.extend([complex_type, enum_1, enum_2, simple_type, element])
expected = [complex_type, enum_1]
container.filter_classes()
self.assertEqual(expected, list(container))
def test_remove_groups(self):
classes = [
ClassFactory.create(tag=Tag.ATTRIBUTE_GROUP),
ClassFactory.create(tag=Tag.GROUP),
ClassFactory.create(tag=Tag.ELEMENT),
]
self.container.extend(classes)
self.container.remove_groups()
self.assertEqual(1, len(list(self.container))) |
5,282 | test three conditions alert message | import urllib
from zerver.lib.test_classes import WebhookTestCase
class LibratoHookTests(WebhookTestCase):
STREAM_NAME = "librato"
URL_TEMPLATE = "/api/v1/external/librato?api_key={api_key}&stream={stream}"
WEBHOOK_DIR_NAME = "librato"
IS_ATTACHMENT = False
def get_body(self, fixture_name: str) -> str:
if self.IS_ATTACHMENT:
return self.webhook_fixture_data("librato", fixture_name, file_type="json")
return urllib.parse.urlencode(
{"payload": self.webhook_fixture_data("librato", fixture_name, file_type="json")}
)
def test_alert_message_with_default_topic(self) -> None:
expected_topic = "Alert alert.name"
expected_message = "Alert [alert_name](https://metrics.librato.com/alerts#/6294535) has triggered! [Reaction steps](http://www.google.pl):\n * Metric `librato.cpu.percent.idle`, sum was below 44 by 300s, recorded at 2016-03-31 09:11:42 UTC.\n * Metric `librato.swap.swap.cached`, average was absent by 300s, recorded at 2016-03-31 09:11:42 UTC.\n * Metric `librato.swap.swap.cached`, derivative was above 9 by 300s, recorded at 2016-03-31 09:11:42 UTC."
self.check_webhook(
"alert",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_alert_message_with_custom_topic(self) -> None:
custom_topic = "custom_name"
self.url = self.build_webhook_url(topic=custom_topic)
expected_message = "Alert [alert_name](https://metrics.librato.com/alerts#/6294535) has triggered! [Reaction steps](http://www.google.pl):\n * Metric `librato.cpu.percent.idle`, sum was below 44 by 300s, recorded at 2016-03-31 09:11:42 UTC.\n * Metric `librato.swap.swap.cached`, average was absent by 300s, recorded at 2016-03-31 09:11:42 UTC.\n * Metric `librato.swap.swap.cached`, derivative was above 9 by 300s, recorded at 2016-03-31 09:11:42 UTC."
self.check_webhook(
"alert",
custom_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def METHOD_NAME(self) -> None:
expected_message = "Alert [alert_name](https://metrics.librato.com/alerts#/6294535) has triggered! [Reaction steps](http://www.use.water.pl):\n * Metric `collectd.interface.eth0.if_octets.tx`, absolute_value was above 4 by 300s, recorded at 2016-04-11 20:40:14 UTC.\n * Metric `collectd.load.load.longterm`, max was above 99, recorded at 2016-04-11 20:40:14 UTC.\n * Metric `librato.swap.swap.cached`, average was absent by 60s, recorded at 2016-04-11 20:40:14 UTC."
expected_topic = "Alert TooHighTemperature"
self.check_webhook(
"three_conditions_alert",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_alert_clear(self) -> None:
expected_topic = "Alert Alert_name"
expected_message = "Alert [alert_name](https://metrics.librato.com/alerts#/6309313) has cleared at 2016-04-12 13:11:44 UTC!"
self.check_webhook(
"alert_cleared",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_snapshot(self) -> None:
self.IS_ATTACHMENT = True
expected_topic = "Snapshots"
expected_message = "**Hamlet** sent a [snapshot](http://snapshots.librato.com/chart/nr5l3n0c-82162.png) of [metric](https://metrics.librato.com/s/spaces/167315/explore/1731491?duration=72039&end_time=1460569409)."
self.check_webhook(
"snapshot",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
self.IS_ATTACHMENT = False
def test_bad_request(self) -> None:
with self.assertRaises(AssertionError) as e:
self.check_webhook(
"bad",
"",
"",
content_type="application/json",
)
self.assertIn("Malformed JSON input", e.exception.args[0])
def test_bad_msg_type(self) -> None:
with self.assertRaises(AssertionError) as e:
self.check_webhook(
"bad_msg_type",
"",
"",
content_type="application/x-www-form-urlencoded",
)
self.assertIn("Unexpected message type", e.exception.args[0]) |
5,283 | quote c | # Copyright (c) 2021 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Quoting helpers for Windows
This contains code to help with quoting values for use in the variable Windows
shell. Right now it should only be used in ansible.windows as the interface is
not final and could be subject to change.
"""
# FOR INTERNAL COLLECTION USE ONLY
# The interfaces in this file are meant for use within the ansible.windows collection
# and may not remain stable to outside uses. Changes may be made in ANY release, even a bugfix release.
# See also: https://github.com/ansible/community/issues/539#issuecomment-780839686
# Please open an issue if you have questions about this.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.module_utils.six import text_type
_UNSAFE_C = re.compile(u'[\\s\t"]')
_UNSAFE_CMD = re.compile(u'[\\s\\(\\)\\^\\|%!"<>&]')
# PowerShell has 5 characters it uses as a single quote, we need to double up on all of them.
# https://github.com/PowerShell/PowerShell/blob/b7cb335f03fe2992d0cbd61699de9d9aafa1d7c1/src/System.Management.Automation/engine/parser/CharTraits.cs#L265-L272
# https://github.com/PowerShell/PowerShell/blob/b7cb335f03fe2992d0cbd61699de9d9aafa1d7c1/src/System.Management.Automation/engine/parser/CharTraits.cs#L18-L21
_UNSAFE_PWSH = re.compile(u"(['\u2018\u2019\u201a\u201b])")
def METHOD_NAME(s): # type: (text_type) -> text_type
"""Quotes a value for the raw Win32 process command line.
Quotes a value to be safely used by anything that calls the Win32
CreateProcess API.
Args:
s: The string to quote.
Returns:
(text_type): The quoted string value.
"""
# https://docs.microsoft.com/en-us/archive/blogs/twistylittlepassagesallalike/everyone-quotes-command-line-arguments-the-wrong-way
if not s:
return u'""'
if not _UNSAFE_C.search(s):
return s
# Replace any double quotes in an argument with '\"'.
s = s.replace('"', '\\"')
# We need to double up on any '\' chars that preceded a double quote (now '\"').
s = re.sub(r'(\\+)\\"', r'\1\1\"', s)
# Double up '\' at the end of the argument so it doesn't escape out end quote.
s = re.sub(r'(\\+)$', r'\1\1', s)
# Finally wrap the entire argument in double quotes now we've escaped the double quotes within.
return u'"{0}"'.format(s)
def quote_cmd(s): # type: (text_type) -> text_type
"""Quotes a value for cmd.
Quotes a value to be safely used by a command prompt call.
Args:
s: The string to quote.
Returns:
(text_type): The quoted string value.
"""
# https://docs.microsoft.com/en-us/archive/blogs/twistylittlepassagesallalike/everyone-quotes-command-line-arguments-the-wrong-way#a-better-method-of-quoting
if not s:
return u'""'
if not _UNSAFE_CMD.search(s):
return s
# Escape the metachars as we are quoting the string to stop cmd from interpreting that metachar. For example
# 'file &whoami.exe' would result in 'whoami.exe' being executed and then that output being used as the argument
# instead of the literal string.
# https://stackoverflow.com/questions/3411771/multiple-character-replace-with-python
for c in u'^()%!"<>&|': # '^' must be the first char that we scan and replace
if c in s:
# I can't find any docs that explicitly say this but to escape ", it needs to be prefixed with \^.
s = s.replace(c, (u"\\^" if c == u'"' else u"^") + c)
return u'^"{0}^"'.format(s)
def quote_pwsh(s): # type: (text_type) -> text_type
"""Quotes a value for PowerShell.
Quotes a value to be safely used by a PowerShell expression. The input
string because something that is safely wrapped in single quotes.
Args:
s: The string to quote.
Returns:
(text_type): The quoted string value.
"""
# https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_quoting_rules?view=powershell-5.1
if not s:
return u"''"
# We should always quote values in PowerShell as it has conflicting rules where strings can and can't be quoted.
# This means we quote the entire arg with single quotes and just double up on the single quote equivalent chars.
return u"'{0}'".format(_UNSAFE_PWSH.sub(u'\\1\\1', s)) |
5,284 | get delta controls | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The SCAFFOLD-related functions are based on https://github.com/Xtra-Computing/NIID-Bench
# MIT License
#
# Copyright (c) 2021 Yiqun Diao, Qinbin Li
#
# Copyright (c) 2020 International Business Machines
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import torch
from torch.optim import Optimizer
def get_lr_values(optimizer: Optimizer):
"""
This function is used to get the learning rates of the optimizer.
"""
return [group["lr"] for group in optimizer.state_dict()["param_groups"]]
class PTScaffoldHelper(object):
"""Helper to be used with SCAFFOLD components.
Implements the functions used for the algorithm proposed in
Karimireddy et al. "SCAFFOLD: Stochastic Controlled Averaging for Federated Learning"
(https://arxiv.org/abs/1910.06378) using PyTorch.
SCAFFOLD-related functions are based on https://github.com/Xtra-Computing/NIID-Bench.
See also Li et al. "Federated Learning on Non-IID Data Silos: An Experimental Study"
(https://arxiv.org/abs/2102.02079).
"""
def __init__(self):
# SCAFFOLD control terms
self.cnt = 0
self.c_global = None
self.c_local = None
self.c_delta_para = None
def init(self, model):
# create models for SCAFFOLD correction terms
self.c_global = copy.deepcopy(model)
self.c_local = copy.deepcopy(model)
# Initialize correction term with zeros
c_init_para = model.state_dict()
for k in c_init_para.keys():
c_init_para[k] = torch.zeros_like(c_init_para[k])
self.c_global.load_state_dict(c_init_para)
self.c_local.load_state_dict(c_init_para)
def get_params(self):
self.cnt = 0
# Adapted from https://github.com/Xtra-Computing/NIID-Bench/blob/main/experiments.py#L371
c_global_para = self.c_global.state_dict()
c_local_para = self.c_local.state_dict()
return c_global_para, c_local_para
def model_update(self, model, curr_lr, c_global_para, c_local_para):
# Update model using scaffold controls
# See https://github.com/Xtra-Computing/NIID-Bench/blob/main/experiments.py#L391
net_para = model.state_dict()
for key in net_para:
net_para[key] = net_para[key] - curr_lr * (c_global_para[key] - c_local_para[key])
model.load_state_dict(net_para)
self.cnt += 1
def terms_update(self, model, curr_lr, c_global_para, c_local_para, model_global):
# Update the local scaffold controls
# See https://github.com/Xtra-Computing/NIID-Bench/blob/main/experiments.py#L403
c_new_para = self.c_local.state_dict()
self.c_delta_para = copy.deepcopy(self.c_local.state_dict())
global_model_para = model_global.state_dict()
net_para = model.state_dict()
for key in net_para:
c_new_para[key] = (
c_new_para[key] - c_global_para[key] + (global_model_para[key] - net_para[key]) / (self.cnt * curr_lr)
)
self.c_delta_para[key] = (c_new_para[key] - c_local_para[key]).cpu().numpy()
self.c_local.load_state_dict(c_new_para)
def load_global_controls(self, weights):
self.c_global.load_state_dict(weights)
def METHOD_NAME(self):
if self.c_delta_para is None:
raise ValueError("c_delta_para hasn't been computed yet!")
return self.c_delta_para |
5,285 | get search status | from .guard_utils import GuardApiClient
import json
import base64
class ClientResponse:
message = None
def read(self):
return self._content
def __repr__(self):
return self.message
class APIClient():
def __init__(self, connection, configuration):
# Placeholder client to allow template transmission calls.
# Remove when implementing data source API client.
host = connection["host"]
port = connection.get('port', '')
headers = dict()
url_modifier_function = None
# TODO switch on cert_verify
# cert_verify = connection.get('selfSignedCert', True)
cert_verify = connection.get(False)
auth = connection.get('auth', None)
url = "https://" + host + ":" + str(port)
params = dict()
params["client_id"] = connection["client_id"]
params["url"] = url
params["client_secret"] = connection["client_secret"]
params["timeout"] = connection['options'].get('timeout')
params["config_uname"] = configuration["auth"]["username"]
params["config_pass"] = configuration["auth"]["password"]
self.client_aux = GuardApiClient(params,
host,
port,
headers,
url_modifier_function,
cert_verify,
auth
)
async def get_token(self):
await self.client_aux.get_token()
async def ping_data_source(self):
# Pings the data source
token = await self.client_aux.request_token()
if self.client_aux.validate_response(token, "", False):
return {"code": 200, "success": True}
else:
return {"success": False}
def METHOD_NAME(self, search_id):
# Check the current status of the search
return {"code": 200, "status": "COMPLETED"}
def get_status(self, search_id):
# It is a synchronous connector.
# return {"code": 200, "status": "COMPLETED"}
respObj = ClientResponse()
respObj.code = "200"
respObj.error_type = ""
respObj.status_code = 200
content = '{"search_id": "' + search_id + \
'", "progress":"Completed", "status":"COMPLETED", "data": {"message":"Completed for the search id ' \
'provided."}} '
respObj._content = bytes(content, 'utf-8')
return respObj
async def delete_search(self, search_id):
# Optional since this may not be supported by the data source API
# Delete the search
return {"code": 200, "success": True}
def create_search(self, query_expression):
respObj = ClientResponse()
respObj.code = "401"
respObj.error_type = ""
respObj.status_code = 401
# print("query="+query_expression)
if self.client_aux.access_token:
self.query = query_expression
response = self.build_searchId()
if response is not None:
respObj.code = "200"
respObj.error_type = ""
respObj.status_code = 200
content = '{"search_id": "' + \
str(response) + \
'", "data": {"message": "Search id generated."}}'
respObj._content = bytes(content, 'utf-8')
else:
respObj.code = "404"
respObj.error_type = "Not found"
respObj.status_code = 404
respObj.message = "Could not generate search id."
else:
respObj.error_type = "Unauthorized: Access token could not be generated."
respObj.message = "Unauthorized: Access token could not be generated."
#
return respObj
def build_searchId(self):
# It should be called only ONCE when transmit query is called
# Structure of the search id is
# '{"query": ' + json.dumps(self.query) + ', "credential" : ' + json.dumps(self.credential) + '}'
s_id = None
if self.query is None:
raise IOError(3001,
"Could not generate search id because 'query' or 'authorization token' or 'credential info' "
"is not available.")
else:
id_str = '{"query": ' + json.dumps(
self.query) + ', "target" : "' + self.client_aux.url + '", "user":"' + self.client_aux.user + '"}'
# print(id_str)
id_byt = id_str.encode('utf-8')
s_id = base64.b64encode(id_byt).decode()
self.search_id = s_id
# print(s_id)
return s_id
async def get_search_results(self, search_id, index_from=None, fetch_size=None):
# Sends a GET request from guardium
# This function calls Guardium to get data
if self.client_aux.access_token:
self.search_id = search_id
self.decode_searchId()
indx = int(index_from) + 1
fsize = int(fetch_size) + 1
if "reportName" in self.query:
response = await self.client_aux.handle_report(self.query, indx, fsize)
if "category" in self.query:
# print("TADA")
response = await self.client_aux.handle_qs(self.query, indx, fsize)
status_code = response.code
# Though the connector gets the authorization token just before fetching the actual result there is a
# possibility that the token returned is only valid for a second and response_code = 401 is returned.
# Catch that situation (though remote) and process again.
if status_code != 200:
error_msg = json.loads(str(response.read(), 'utf-8'))
error_code = error_msg.get('error', None)
if status_code == 401 and error_code == "invalid_token":
self.authorization = None
token = await self.client_aux.get_token()
if token:
response = await self.client_aux.handle_report(self.query, indx, fetch_size)
status_code = response.code
else:
raise ValueError(3002, "Authorization Token not received ")
# Now START and STOP are optional -- A situation can occur that data set can be empty -- handle this
# situation here
if status_code == 200:
#
# Determine if the response is empty if empty Guardium sends {"ID": 0,
# "Message": "ID=0 The Query did not retrieve any records"}
# Raise an error --> 1010: ErrorCode.TRANSMISSION_RESPONSE_EMPTY_RESULT
# response_content = self.raiseErrorIfEmptyResult(response)
return response
else:
raise ValueError(1020, "Error -- Status Code is NOT 200!")
else:
raise ValueError(3002, "Authorization Token not received ")
def decode_searchId(self):
# These value (self.credential, self.query) must be present. self.authorization may not.
try:
id_dec64 = base64.b64decode(self.search_id)
jObj = json.loads(id_dec64.decode('utf-8'))
except:
raise IOError(
3001, "Could not decode search id content - " + self.search_id)
self.query = json.loads(jObj.get("query", None))
self.credential = jObj.get("credential", None)
self.authorization = jObj.get("authorization", None)
return |
5,286 | test cc 003 | #!/usr/bin/env python
#
# Copyright 2005,2007,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, filter, blocks
class test_single_pole_iir_filter(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_ff_001(self):
src_data = (0, 1000, 2000, 3000, 4000, 5000)
expected_result = src_data
src = blocks.vector_source_f(src_data)
op = filter.single_pole_iir_filter_ff(1.0)
dst = blocks.vector_sink_f()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data)
def test_ff_002(self):
src_data = (0, 1000, 2000, 3000, 4000, 5000)
expected_result = (
0,
125,
359.375,
689.453125,
1103.271484,
1590.36255)
src = blocks.vector_source_f(src_data)
op = filter.single_pole_iir_filter_ff(0.125)
dst = blocks.vector_sink_f()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 3)
def test_ff_003(self):
block_size = 2
src_data = (0, 1000, 2000, 3000, 4000, 5000)
expected_result = (0, 125, 250, 484.375, 718.75, 1048.828125)
src = blocks.vector_source_f(src_data)
s2p = blocks.stream_to_vector(gr.sizeof_float, block_size)
op = filter.single_pole_iir_filter_ff(0.125, block_size)
p2s = blocks.vector_to_stream(gr.sizeof_float, block_size)
dst = blocks.vector_sink_f()
self.tb.connect(src, s2p, op, p2s, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 3)
def test_cc_001(self):
src_data = (
0 + 0j,
1000 + 1000j,
2000 + 2000j,
3000 + 3000j,
4000 + 4000j,
5000 + 5000j)
expected_result = src_data
src = blocks.vector_source_c(src_data)
op = filter.single_pole_iir_filter_cc(1.0)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data)
def test_cc_002(self):
src_data = (complex(0, 0), complex(1000, -
1000), complex(2000, -
2000), complex(3000, -
3000), complex(4000, -
4000), complex(5000, -
5000))
expected_result = (complex(0, 0), complex(125, -
125), complex(359.375, -
359.375), complex(689.453125, -
689.453125), complex(1103.271484, -
1103.271484), complex(1590.36255, -
1590.36255))
src = blocks.vector_source_c(src_data)
op = filter.single_pole_iir_filter_cc(0.125)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 3)
def METHOD_NAME(self):
block_size = 2
src_data = (complex(0, 0), complex(1000, -
1000), complex(2000, -
2000), complex(3000, -
3000), complex(4000, -
4000), complex(5000, -
5000))
expected_result = (complex(0, 0), complex(125, -
125), complex(250, -
250), complex(484.375, -
484.375), complex(718.75, -
718.75), complex(1048.828125, -
1048.828125))
src = blocks.vector_source_c(src_data)
s2p = blocks.stream_to_vector(gr.sizeof_gr_complex, block_size)
op = filter.single_pole_iir_filter_cc(0.125, block_size)
p2s = blocks.vector_to_stream(gr.sizeof_gr_complex, block_size)
dst = blocks.vector_sink_c()
self.tb.connect(src, s2p, op, p2s, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 3)
if __name__ == '__main__':
gr_unittest.run(test_single_pole_iir_filter) |
5,287 | set up | from unittest import skip
from django.test import TestCase
from django.test.utils import override_settings
from people.forms.forms import BasePersonForm, UpdatePersonForm
from people.tests.factories import PersonFactory
from .uk_examples import UK2015ExamplesMixin
@override_settings(TWITTER_APP_ONLY_BEARER_TOKEN=None)
class TestValidators(UK2015ExamplesMixin, TestCase):
def METHOD_NAME(self):
super().METHOD_NAME()
self.person = PersonFactory.create(name="John Doe")
@skip("PersonIdentifiers are on Person Form")
def test_malformed_email(self):
form = BasePersonForm(
{"name": "John Bercow", "email": "foo bar!"},
initial={"person": self.person},
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors, {"email": ["Enter a valid email address."]}
)
@skip("Until rebased over upstream master")
def test_update_person_form_standing_no_party_no_constituency(self):
form = UpdatePersonForm(
{
"name": "John Doe",
"source": "Just testing...",
"standing_2015": "standing",
},
initial={"person": self.person},
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
"__all__": [
"If you mark the candidate as standing in the 2015 General Election, you must select a post"
]
},
)
@skip("Until rebased over upstream master")
def test_update_person_form_standing_no_party_but_gb_constituency(self):
form = UpdatePersonForm(
{
"name": "John Doe",
"source": "Just testing...",
"standing_2015": "standing",
"constituency_2015": "65808",
},
initial={"person": self.person},
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
"__all__": [
"You must specify a party for the 2015 General Election"
]
},
)
def test_update_person_form_standing_party_and_gb_constituency(self):
form = UpdatePersonForm(
{
"name": "John Doe",
"source": "Just testing...",
"standing_2015": "standing",
"constituency_2015": "65808",
"party_GB_2015": self.conservative_party.id,
},
initial={"person": self.person},
)
self.assertTrue(form.is_valid())
# When 'not-standing' is selected, it shouldn't matter whether you
# specify party of constituency:
def test_update_person_form_not_standing_no_party_no_constituency(self):
form = UpdatePersonForm(
{
"name": "John Doe",
"source": "Just testing...",
"standing_2015": "not-standing",
},
initial={"person": self.person},
)
self.assertTrue(form.is_valid())
def test_update_person_form_not_standing_no_party_but_gb_constituency(self):
form = UpdatePersonForm(
{
"name": "John Doe",
"source": "Just testing...",
"standing_2015": "not-standing",
"constituency_2015": "65808",
},
initial={"person": self.person},
)
self.assertTrue(form.is_valid())
def test_update_person_form_not_standing_party_and_gb_constituency(self):
form = UpdatePersonForm(
{
"name": "John Doe",
"source": "Just testing...",
"standing_2015": "standing",
"constituency_2015": "65808",
"party_GB_2015": self.conservative_party.id,
},
initial={"person": self.person},
)
self.assertTrue(form.is_valid())
# Similarly, when 'not-sure' is selected, it shouldn't matter
# whether you specify party of constituency:
def test_update_person_form_not_sure_no_party_no_constituency(self):
form = UpdatePersonForm(
{
"name": "John Doe",
"source": "Just testing...",
"standing_2015": "not-sure",
},
initial={"person": self.person},
)
self.assertTrue(form.is_valid())
def test_update_person_form_not_sure_no_party_but_gb_constituency(self):
form = UpdatePersonForm(
{
"name": "John Doe",
"source": "Just testing...",
"standing_2015": "not-sure",
"constituency_2015": "65808",
},
initial={"person": self.person},
)
self.assertTrue(form.is_valid())
def test_update_person_form_not_sure_party_and_gb_constituency(self):
form = UpdatePersonForm(
{
"name": "John Doe",
"source": "Just testing...",
"standing_2015": "not-sure",
"constituency_2015": "65808",
"party_GB_2015": self.conservative_party.id,
},
initial={"person": self.person},
)
self.assertTrue(form.is_valid()) |
5,288 | test init | import os
import tempfile
from subprocess import CompletedProcess
from pytest_mock import MockerFixture
from reconcile.utils import lean_terraform_client
def METHOD_NAME(mocker: MockerFixture) -> None:
mocker.patch(
"reconcile.utils.lean_terraform_client.os"
).environ.copy.return_value = {}
mocked_subprocess = mocker.patch("reconcile.utils.lean_terraform_client.subprocess")
mocked_subprocess.run.return_value = CompletedProcess(
args=[],
returncode=0,
stdout=b"out",
stderr=b"err",
)
return_code, stdout, stderr = lean_terraform_client.init(
"working_dir",
env={"TF_LOG": "INFO"},
)
assert return_code == 0
assert stdout == "out"
assert stderr == "err"
mocked_subprocess.run.assert_called_once_with(
["terraform", "init", "-input=false", "-no-color"],
capture_output=True,
check=False,
cwd="working_dir",
env={"TF_LOG": "INFO"},
)
def test_output(mocker: MockerFixture) -> None:
mocker.patch(
"reconcile.utils.lean_terraform_client.os"
).environ.copy.return_value = {}
mocked_subprocess = mocker.patch("reconcile.utils.lean_terraform_client.subprocess")
mocked_subprocess.run.return_value = CompletedProcess(
args=[],
returncode=0,
stdout=b"out",
stderr=b"err",
)
return_code, stdout, stderr = lean_terraform_client.output(
"working_dir",
env={"TF_LOG": "INFO"},
)
assert return_code == 0
assert stdout == "out"
assert stderr == "err"
mocked_subprocess.run.assert_called_once_with(
["terraform", "output", "-json"],
capture_output=True,
check=False,
cwd="working_dir",
env={"TF_LOG": "INFO"},
)
def test_plan(mocker: MockerFixture) -> None:
mocker.patch(
"reconcile.utils.lean_terraform_client.os"
).environ.copy.return_value = {}
mocked_subprocess = mocker.patch("reconcile.utils.lean_terraform_client.subprocess")
mocked_subprocess.run.return_value = CompletedProcess(
args=[],
returncode=0,
stdout=b"out",
stderr=b"err",
)
return_code, stdout, stderr = lean_terraform_client.plan(
working_dir="working_dir",
out="tfplan",
env={"TF_LOG": "INFO"},
)
assert return_code == 0
assert stdout == "out"
assert stderr == "err"
mocked_subprocess.run.assert_called_once_with(
[
"terraform",
"plan",
"-out=tfplan",
"-input=false",
"-no-color",
],
capture_output=True,
check=False,
cwd="working_dir",
env={"TF_LOG": "INFO"},
)
def test_apply(mocker: MockerFixture) -> None:
mocker.patch(
"reconcile.utils.lean_terraform_client.os"
).environ.copy.return_value = {}
mocked_subprocess = mocker.patch("reconcile.utils.lean_terraform_client.subprocess")
mocked_subprocess.run.return_value = CompletedProcess(
args=[],
returncode=0,
stdout=b"out",
stderr=b"err",
)
return_code, stdout, stderr = lean_terraform_client.apply(
working_dir="working_dir",
dir_or_plan="tfplan",
env={"TF_LOG": "INFO"},
)
assert return_code == 0
assert stdout == "out"
assert stderr == "err"
mocked_subprocess.run.assert_called_once_with(
[
"terraform",
"apply",
"-input=false",
"-no-color",
"tfplan",
],
capture_output=True,
check=False,
cwd="working_dir",
env={"TF_LOG": "INFO"},
)
def test_show_json(mocker: MockerFixture) -> None:
mocker.patch(
"reconcile.utils.lean_terraform_client.os"
).environ.copy.return_value = {}
mocked_subprocess = mocker.patch("reconcile.utils.lean_terraform_client.subprocess")
mocked_subprocess.run.return_value = CompletedProcess(
args=[],
returncode=0,
stdout=b"{}",
stderr=b"",
)
result = lean_terraform_client.show_json(
working_dir="working_dir",
path="tfplan",
)
assert result == {}
mocked_subprocess.run.assert_called_once_with(
[
"terraform",
"show",
"-no-color",
"-json",
"tfplan",
],
capture_output=True,
check=False,
cwd="working_dir",
env={},
)
def test_terraform_component() -> None:
with tempfile.TemporaryDirectory() as working_dir:
with open(os.path.join(working_dir, "main.tf"), "w"):
pass
assert lean_terraform_client.init(working_dir)[0] == 0
assert lean_terraform_client.output(working_dir)[0] == 0
assert lean_terraform_client.plan(working_dir, "tfplan")[0] == 0
assert lean_terraform_client.show_json(working_dir, "tfplan") is not None
assert lean_terraform_client.apply(working_dir, "tfplan")[0] == 0 |
5,289 | get credentials interactive | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014 SUSE
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# SUSE trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate SUSE trademarks that are incorporated
# in this software or its documentation.
from __future__ import print_function
from spacewalk.susemanager.helpers import cli_ask
from spacewalk.susemanager.helpers import timeout
try:
import xmlrpc.client as xmlrpc_client
except ImportError:
import xmlrpclib as xmlrpc_client
# pylint: disable=line-too-long
class MaximumNumberOfAuthenticationFailures(Exception):
pass
class Authenticator(object):
"""
Cache authentication, implements password-less connect.
"""
MAX_NUM_OF_CREDENTIAL_FAILURES_ALLOWED = 3 # pylint: disable=invalid-name
def __init__(self, connection, user, password, token):
self.connection = connection
self._token = token
self.user = user
self.password = password
self.credentials_prompts = 0
self.cached_credentials_used = not self.has_credentials()
def token(self):
"""
Authenticate user.
This method obtains a new token when `self.token` is `None`.
The code uses the cached username/password when available.
These cached credentials are used just once, they are discarded if they
do not work.
The code asks the user to enter a new pair of username/password
when either the cached credentials are not available or when they have
been discarded.
If an interactively entered pair of credentials does not work it
is discarded and a new one is requested to the user. The user has a
limited number of attempts to enter the right username/password; then
the code raises a `MaximumNumberOfAuthenticationFailures` exception.
This mimics how other Unix programs handle credentials (e.g. `sudo`).
"""
if not self._token:
if not self.has_credentials():
self.METHOD_NAME()
self.credentials_prompts += 1
try:
self._token = self.connection.auth.login(self.user, self.password)
except xmlrpc_client.Fault as ex:
if ex.faultCode == 2950 and "Either the password or username is incorrect" in ex.faultString:
if self.has_credentials() and not self.cached_credentials_used:
# Try to reuse the credentials stored into the configuration file
# to obtain a token. However ensure these are no longer used if
# they are not valid.
self.cached_credentials_used = True
self._discard_credentials()
elif self.credentials_prompts < Authenticator.MAX_NUM_OF_CREDENTIAL_FAILURES_ALLOWED:
# The cached credentials are either invalid or have
# never been stored inside of the local configuration
# file. Ask the user to enter new credentials
self.credentials_prompts += 1
self.METHOD_NAME()
else:
# - The cached credentials failed or have never been
# stored into the local configuration file.
# - The user has already tried to authenticate with
# new credentials but they didn't work.
# The credential prompt has been shown
# MAX_NUM_OF_CREDENTIAL_FAILURES_ALLOWED times.
raise MaximumNumberOfAuthenticationFailures
return self.token()
else:
raise ex
return self._token
def has_token(self):
return self._token is not None and len(self._token) > 0
def has_credentials(self):
return self.user and self.password
def discard_token(self):
"""
Discard the cached token.
"""
self._token = None
def _discard_credentials(self):
self.user = None
self.password = None
@timeout(60, "Timeout. No user input for 60 seconds. Exiting...")
def METHOD_NAME(self):
"""
Get credentials from CLI interactively.
"""
print("Please enter the credentials of SUSE Manager Administrator.")
self.user = cli_ask("Login")
self.password = cli_ask("Password", password=True)
self.credentials_prompts += 1 |
5,290 | test update runtime choices | # IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.core.session.current_user import (
PreferencesModel,
copy_user,
)
from openbb_terminal.econometrics.econometrics_controller import EconometricsController
# pylint: disable=W0621
@pytest.fixture()
def controller(mocker):
preferences = PreferencesModel(USE_PROMPT_TOOLKIT=True)
mock_current_user = copy_user(preferences=preferences)
mocker.patch(
target="openbb_terminal.core.session.current_user.__current_user",
new=mock_current_user,
)
mocker.patch("openbb_terminal.econometrics.econometrics_controller.session", True)
return EconometricsController()
def METHOD_NAME(controller):
controller.update_runtime_choices()
assert controller.choices
@pytest.mark.parametrize(
"other",
[
["-f", "badpath.xlsx"],
["-f", "badpath.xlsx", "alias"],
["badpath.xlsx"],
["-f", "cancer", "-a", "dataset"],
[],
["-ex"],
],
)
def test_call_load(controller, other):
controller.call_load(other)
@pytest.mark.parametrize("other", [["dataset"], ["-n", "data"]])
def test_call_export(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_export(other)
@pytest.mark.skip
@pytest.mark.record_stdout
@pytest.mark.parametrize("other", [["data"], ["-n", "dataset"], []])
def test_call_remove(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_remove(other)
@pytest.mark.skip
@pytest.mark.parametrize("other", [["data"], ["-n", "data"], []])
def test_call_options(controller, other):
controller.call_options(other)
@pytest.mark.parametrize("other", [["cancer-dataset"], ["-c", "data"], []])
def test_call_plot(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_plot(other)
@pytest.mark.record_stdout
@pytest.mark.parametrize(
"other",
[
["data"],
["-n", "dataset", "-s", "badcol"],
["-n", "dataset", "-s", "cancer"],
[],
],
)
def test_call_show(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_show(other)
controller.datasets = {"dataset": pd.DataFrame()}
controller.call_show(["dataset"])
@pytest.mark.record_stdout
@pytest.mark.parametrize("other", [["data"], ["-n", "dataset"], []])
def test_call_desc(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_desc(other)
controller.datasets = {"dataset": pd.DataFrame()}
controller.call_desc(["-n", "dataset"])
@pytest.mark.record_stdout
@pytest.mark.parametrize(
"other",
[
["data"],
["-n", "dataset"],
[],
["cancer-dataset", "int"],
["cancer-dataset", "badbad"],
["cancer-dataset", "date"],
],
)
def test_call_type(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_type(other)
@pytest.mark.record_stdout
def test_call_index(controller):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_index(["dataset", "cancer", "-a"])
controller.call_index(["dataset", "cancer", "-a"])
controller.call_index(["dataset", "cancer", "-d"])
controller.call_index(["dataset", "cancer", "oogabooga", "-d"])
@pytest.mark.record_stdout
@pytest.mark.parametrize("other", [["data"], ["-n", "dataset"], []])
def test_call_ols(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_ols(other)
@pytest.mark.record_stdout
@pytest.mark.parametrize(
"other", [["cancer-dataset"], ["cancer-datast"], ["-n", "dataset"], []]
)
def test_call_norm(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_norm(other)
@pytest.mark.record_stdout
@pytest.mark.parametrize(
"other", [["cancer-dataset"], ["cancer-datast"], ["-n", "dataset"], []]
)
def test_call_root(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_root(other)
@pytest.mark.record_stdout
@pytest.mark.parametrize(
"other",
[
["cancer-dataset"],
["-r", "cancer-dataset", "population-dataset", "-t", "pols", "-ee", "-te"],
],
)
def test_call_panel(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_panel(other)
@pytest.mark.record_stdout
def test_call_compare(controller):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_compare([])
@pytest.mark.record_stdout
@pytest.mark.parametrize("other", [["data"], ["-n", "dataset"], []])
def test_call_dwat(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_dwat(other)
@pytest.mark.record_stdout
@pytest.mark.parametrize("other", [["data"], ["-n", "dataset"], []])
def test_call_bgod(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_bgod(other)
@pytest.mark.record_stdout
@pytest.mark.parametrize("other", [["data"], ["-n", "dataset"], []])
def test_call_bpag(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_bpag(other)
@pytest.mark.record_stdout
@pytest.mark.parametrize("other", [["data"], ["-n", "dataset"], []])
def test_call_granger(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_granger(other)
@pytest.mark.record_stdout
@pytest.mark.parametrize("other", [["data"], ["-n", "dataset"], []])
def test_call_coint(controller, other):
controller.call_load(["-f", "cancer", "-a", "dataset"])
controller.call_coint(other) |
5,291 | test patch with pass target context manager | import pytest
from localstack.utils.patch import Patch, get_defining_object, patch
def echo(arg):
return f"echo: {arg}"
class MyEchoer:
def do_echo(self, arg):
return f"do_echo: {arg}"
@classmethod
def do_class_echo(cls, arg):
return f"do_class_echo: {arg}"
@staticmethod
def do_static_echo(arg):
return f"do_static_echo: {arg}"
def test_patch_context_manager():
assert echo("foo") == "echo: foo"
def monkey(arg):
return f"monkey: {arg}"
with Patch(get_defining_object(echo), "echo", monkey):
assert echo("foo") == "monkey: foo"
assert echo("foo") == "echo: foo"
def METHOD_NAME():
assert echo("foo") == "echo: foo"
def uppercase(target, arg):
return target(arg).upper()
with Patch(get_defining_object(echo), "echo", uppercase):
assert echo("foo") == "ECHO: FOO"
assert echo("foo") == "echo: foo"
def test_patch_decorator():
@patch(target=echo, pass_target=False)
def monkey(arg):
return f"monkey: {arg}"
assert echo("foo") == "monkey: foo"
monkey.patch.undo()
assert echo("foo") == "echo: foo"
def test_patch_decorator_with_pass_target():
@patch(target=echo)
def uppercase(target, arg):
return target(arg).upper()
assert echo("foo") == "ECHO: FOO"
uppercase.patch.undo()
assert echo("foo") == "echo: foo"
def test_patch_decorator_on_method():
@patch(target=MyEchoer.do_echo)
def uppercase(target, self, arg):
return target(self, arg).upper()
obj = MyEchoer()
assert obj.do_echo("foo") == "DO_ECHO: FOO"
uppercase.patch.undo()
assert obj.do_echo("foo") == "do_echo: foo"
assert MyEchoer().do_echo("foo") == "do_echo: foo"
def test_patch_decorator_on_bound_method_with_pass_target():
obj = MyEchoer()
@patch(target=obj.do_echo)
def uppercase(self, target, arg):
return target(arg).upper()
assert obj.do_echo("foo") == "DO_ECHO: FOO"
assert MyEchoer().do_echo("foo") == "do_echo: foo"
uppercase.patch.undo()
assert obj.do_echo("foo") == "do_echo: foo"
assert MyEchoer().do_echo("foo") == "do_echo: foo"
def test_patch_decorator_on_bound_method():
obj = MyEchoer()
@patch(target=obj.do_echo, pass_target=False)
def monkey(self, arg):
return f"monkey: {arg}"
assert obj.do_echo("foo") == "monkey: foo"
assert MyEchoer().do_echo("foo") == "do_echo: foo"
monkey.patch.undo()
assert obj.do_echo("foo") == "do_echo: foo"
assert MyEchoer().do_echo("foo") == "do_echo: foo"
def test_patch_decorator_twice_on_method():
@patch(target=MyEchoer.do_echo)
def monkey1(self, *args):
return f"monkey: {args[-1]}"
@patch(target=MyEchoer.do_echo)
def monkey2(fn, self, *args):
return f"monkey 2: {fn(*args)}"
obj = MyEchoer()
try:
assert obj.do_echo("foo") == "monkey 2: monkey: foo"
assert MyEchoer().do_echo("foo") == "monkey 2: monkey: foo"
finally:
monkey2.patch.undo()
monkey1.patch.undo()
assert obj.do_echo("foo") == "do_echo: foo"
assert MyEchoer().do_echo("foo") == "do_echo: foo"
@pytest.mark.parametrize("pass_target", [True, False])
def test_patch_decorator_twice_on_bound_method(pass_target):
obj = MyEchoer()
@patch(target=obj.do_echo, pass_target=pass_target)
def monkey1(self, *args):
return f"monkey: {args[-1]}"
@patch(target=obj.do_echo, pass_target=True)
def monkey2(self, fn, *args):
return f"monkey 2: {fn(*args)}"
assert obj.do_echo("foo") == "monkey 2: monkey: foo"
assert MyEchoer().do_echo("foo") == "do_echo: foo"
monkey2.patch.undo()
monkey1.patch.undo()
assert obj.do_echo("foo") == "do_echo: foo"
assert MyEchoer().do_echo("foo") == "do_echo: foo"
def test_patch_decorator_on_class_method():
@patch(target=MyEchoer.do_class_echo)
def uppercase(target, *args):
if len(args) > 1:
# this happens when the method is called on an object, the first arg will be the object
arg = args[1]
else:
arg = args[0]
return target(arg).upper()
assert MyEchoer.do_class_echo("foo") == "DO_CLASS_ECHO: FOO"
assert MyEchoer().do_class_echo("foo") == "DO_CLASS_ECHO: FOO"
uppercase.patch.undo()
assert MyEchoer.do_class_echo("foo") == "do_class_echo: foo"
assert MyEchoer().do_class_echo("foo") == "do_class_echo: foo"
def test_get_defining_object():
from localstack.utils import strings
from localstack.utils.strings import short_uid
# module
assert get_defining_object(short_uid) == strings
# unbound method (=function defined by a class)
assert get_defining_object(MyEchoer.do_echo) == MyEchoer
obj = MyEchoer()
# bound method
assert get_defining_object(obj.do_echo) == obj
# class method referenced by an object
assert get_defining_object(obj.do_class_echo) == MyEchoer
# class method referenced by the class
assert get_defining_object(MyEchoer.do_class_echo) == MyEchoer
# static method (= function defined by a class)
assert get_defining_object(MyEchoer.do_static_echo) == MyEchoer |
5,292 | send | """Base class for a Comm"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import uuid
from traitlets.config import LoggingConfigurable
from ipykernel.kernelbase import Kernel
from ipykernel.jsonutil import json_clean
from traitlets import Instance, Unicode, Bytes, Bool, Dict, Any, default
class Comm(LoggingConfigurable):
"""Class for communicating between a Frontend and a Kernel"""
kernel = Instance('ipykernel.kernelbase.Kernel', allow_none=True)
@default('kernel')
def _default_kernel(self):
if Kernel.initialized():
return Kernel.instance()
comm_id = Unicode()
@default('comm_id')
def _default_comm_id(self):
return uuid.uuid4().hex
primary = Bool(True, help="Am I the primary or secondary Comm?")
target_name = Unicode('comm')
target_module = Unicode(None, allow_none=True, help="""requirejs module from
which to load comm target.""")
topic = Bytes()
@default('topic')
def _default_topic(self):
return ('comm-%s' % self.comm_id).encode('ascii')
_open_data = Dict(help="data dict, if any, to be included in comm_open")
_close_data = Dict(help="data dict, if any, to be included in comm_close")
_msg_callback = Any()
_close_callback = Any()
_closed = Bool(True)
def __init__(self, target_name='', data=None, metadata=None, buffers=None, **kwargs):
if target_name:
kwargs['target_name'] = target_name
super(Comm, self).__init__(**kwargs)
if self.kernel:
if self.primary:
# I am primary, open my peer.
self.open(data=data, metadata=metadata, buffers=buffers)
else:
self._closed = False
def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys):
"""Helper for sending a comm message on IOPub"""
data = {} if data is None else data
metadata = {} if metadata is None else metadata
content = json_clean(dict(data=data, comm_id=self.comm_id, **keys))
self.kernel.session.METHOD_NAME(self.kernel.iopub_socket, msg_type,
content,
metadata=json_clean(metadata),
parent=self.kernel._parent_header,
ident=self.topic,
buffers=buffers,
)
def __del__(self):
"""trigger close on gc"""
self.close()
# publishing messages
def open(self, data=None, metadata=None, buffers=None):
"""Open the frontend-side version of this comm"""
if data is None:
data = self._open_data
comm_manager = getattr(self.kernel, 'comm_manager', None)
if comm_manager is None:
raise RuntimeError("Comms cannot be opened without a kernel "
"and a comm_manager attached to that kernel.")
comm_manager.register_comm(self)
try:
self._publish_msg('comm_open',
data=data, metadata=metadata, buffers=buffers,
target_name=self.target_name,
target_module=self.target_module,
)
self._closed = False
except:
comm_manager.unregister_comm(self)
raise
def close(self, data=None, metadata=None, buffers=None):
"""Close the frontend-side version of this comm"""
if self._closed:
# only close once
return
self._closed = True
# nothing to send if we have no kernel
# can be None during interpreter cleanup
if not self.kernel:
return
if data is None:
data = self._close_data
self._publish_msg('comm_close',
data=data, metadata=metadata, buffers=buffers,
)
self.kernel.comm_manager.unregister_comm(self)
def METHOD_NAME(self, data=None, metadata=None, buffers=None):
"""Send a message to the frontend-side version of this comm"""
self._publish_msg('comm_msg',
data=data, metadata=metadata, buffers=buffers,
)
# registering callbacks
def on_close(self, callback):
"""Register a callback for comm_close
Will be called with the `data` of the close message.
Call `on_close(None)` to disable an existing callback.
"""
self._close_callback = callback
def on_msg(self, callback):
"""Register a callback for comm_msg
Will be called with the `data` of any comm_msg messages.
Call `on_msg(None)` to disable an existing callback.
"""
self._msg_callback = callback
# handling of incoming messages
def handle_close(self, msg):
"""Handle a comm_close message"""
self.log.debug("handle_close[%s](%s)", self.comm_id, msg)
if self._close_callback:
self._close_callback(msg)
def handle_msg(self, msg):
"""Handle a comm_msg message"""
self.log.debug("handle_msg[%s](%s)", self.comm_id, msg)
if self._msg_callback:
shell = self.kernel.shell
if shell:
shell.events.trigger('pre_execute')
self._msg_callback(msg)
if shell:
shell.events.trigger('post_execute')
__all__ = ['Comm'] |
5,293 | test signup passes through privacy acceptance | import datetime
from unittest.mock import sentinel
import pytest
from sqlalchemy.exc import IntegrityError
from h.models import Activation, User
from h.services.exceptions import ConflictError
from h.services.user_signup import UserSignupService, user_signup_service_factory
class TestUserSignupService:
def test_signup_returns_user(self, svc):
user = svc.signup(username="foo", email="foo@bar.com")
assert isinstance(user, User)
def test_signup_creates_user_in_db(self, db_session, svc):
svc.signup(username="foo", email="foo@bar.com")
db_session.commit()
db_session.close()
user = db_session.query(User).filter_by(username="foo").one_or_none()
assert user is not None
def test_signup_creates_activation_for_user(self, svc):
user = svc.signup(username="foo", email="foo@bar.com")
assert isinstance(user.activation, Activation)
def test_signup_does_not_create_activation_for_user_when_activation_not_required(
self, svc
):
user = svc.signup(require_activation=False, username="foo", email="foo@bar.com")
assert user.activation is None
def test_signup_sets_default_authority(self, svc):
user = svc.signup(username="foo", email="foo@bar.com")
assert user.authority == "example.org"
def test_signup_allows_authority_override(self, svc):
user = svc.signup(
username="foo", email="foo@bar.com", authority="bar-client.com"
)
assert user.authority == "bar-client.com"
def test_signup_allows_user_with_empty_identities(self, svc):
user = svc.signup(require_activation=False, username="foo", identities=[])
assert user.identities == []
def METHOD_NAME(self, svc):
now = datetime.datetime.utcnow()
user = svc.signup(username="foo", email="foo@bar.com", privacy_accepted=now)
assert user.privacy_accepted == now
def test_signup_passes_through_comms_opt_in(self, svc):
user = svc.signup(username="foo", email="foo@bar.com", comms_opt_in=True)
assert user.comms_opt_in
def test_signup_sets_provided_user_identities(self, svc):
identity_data = [
{"provider": "someprovider", "provider_unique_id": 1},
{"provider": "someotherprovider", "provider_unique_id": "394ffa3"},
]
user = svc.signup(username="foo", email="foo@bar.com", identities=identity_data)
assert len(user.identities) == 2
def test_signup_raises_with_invalid_identities(self, svc):
dupe_identity = {"provider": "a", "provider_unique_id": 1}
with pytest.raises(
IntegrityError, match="violates unique constraint.*identity"
):
svc.signup(
username="foo",
email="foo@bar.com",
identities=[dupe_identity, dupe_identity],
)
def test_signup_sets_password_using_password_service(
self, svc, user_password_service
):
user = svc.signup(username="foo", email="foo@bar.com", password="wibble")
user_password_service.update_password.assert_called_once_with(user, "wibble")
def test_signup_sends_email(self, svc, signup, tasks_mailer, pyramid_request):
signup.generate.return_value = ["signup", "args"]
user = svc.signup(username="foo", email="foo@bar.com")
signup.generate.assert_called_once_with(
request=pyramid_request,
user_id=user.id,
email="foo@bar.com",
activation_code=user.activation.code,
)
tasks_mailer.send.delay.assert_called_once_with(*signup.generate.return_value)
def test_signup_does_not_send_email_when_activation_not_required(
self, svc, signup, tasks_mailer
):
svc.signup(require_activation=False, username="foo", email="foo@bar.com")
signup.generate.assert_not_called()
tasks_mailer.send.delay.assert_not_called()
def test_signup_creates_subscriptions(self, svc, subscription_service, factories):
subscription = factories.Subscriptions(active=False)
subscription_service.get_all_subscriptions.return_value = [subscription]
user = svc.signup(username="foo", email="foo@bar.com")
subscription_service.get_all_subscriptions.assert_called_once_with(
user_id=user.userid
)
assert subscription.active
def test_signup_logs_conflict_error_when_account_with_email_already_exists(
self, svc, patch
):
log = patch("h.services.user_signup.log")
with pytest.raises(ConflictError):
svc.signup(username="foo", email="foo@bar.com")
svc.signup(username="foo", email="foo@bar.com")
assert (
"concurrent account signup conflict error occurred during user signup"
in log.warning.call_args[0][0]
)
@pytest.mark.parametrize(
"username,email",
[
# In the real world these values would be identical to the first signup but
# since we need to force one to error before the other, only the email or
# only the username matches. Assume that when one of these happens it means
# the user issued identical signup requests concurrently.
# Catches Integrity error on identical email.
("bar", "foo@bar.com"),
# Catches Integrity error on identical username.
("foo", "foo1@bar.com"),
],
)
def test_signup_raises_conflict_error_when_account_already_exists(
self, svc, username, email
):
# This happens when two or more identical
# concurrent signup requests race each other to the db.
with pytest.raises(
ConflictError,
match=f"The email address {email} has already been registered.",
):
svc.signup(username="foo", email="foo@bar.com")
svc.signup(username=username, email=email)
@pytest.fixture
def svc(self, pyramid_request, user_password_service, subscription_service):
return UserSignupService(
request=pyramid_request,
default_authority="example.org",
password_service=user_password_service,
subscription_service=subscription_service,
)
@pytest.fixture(autouse=True)
def tasks_mailer(self, patch):
return patch("h.services.user_signup.tasks_mailer")
@pytest.fixture(autouse=True)
def signup(self, patch):
return patch("h.services.user_signup.signup")
@pytest.mark.usefixtures("user_password_service")
class TestUserSignupServiceFactory:
def test_it(
self,
UserSignupService,
pyramid_request,
user_password_service,
subscription_service,
):
svc = user_signup_service_factory(sentinel.context, pyramid_request)
UserSignupService.assert_called_once_with(
request=pyramid_request,
default_authority=pyramid_request.default_authority,
password_service=user_password_service,
subscription_service=subscription_service,
)
assert svc == UserSignupService.return_value
@pytest.fixture
def UserSignupService(self, patch):
return patch("h.services.user_signup.UserSignupService") |
5,294 | test slot behaviour | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2023
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import (
InlineKeyboardButton,
InlineKeyboardMarkup,
InlineQueryResultCachedPhoto,
InlineQueryResultCachedVoice,
InputTextMessageContent,
MessageEntity,
)
from tests.auxil.slots import mro_slots
@pytest.fixture(scope="module")
def inline_query_result_cached_photo():
return InlineQueryResultCachedPhoto(
TestInlineQueryResultCachedPhotoBase.id_,
TestInlineQueryResultCachedPhotoBase.photo_file_id,
title=TestInlineQueryResultCachedPhotoBase.title,
description=TestInlineQueryResultCachedPhotoBase.description,
caption=TestInlineQueryResultCachedPhotoBase.caption,
parse_mode=TestInlineQueryResultCachedPhotoBase.parse_mode,
caption_entities=TestInlineQueryResultCachedPhotoBase.caption_entities,
input_message_content=TestInlineQueryResultCachedPhotoBase.input_message_content,
reply_markup=TestInlineQueryResultCachedPhotoBase.reply_markup,
)
class TestInlineQueryResultCachedPhotoBase:
id_ = "id"
type_ = "photo"
photo_file_id = "photo file id"
title = "title"
description = "description"
caption = "caption"
parse_mode = "HTML"
caption_entities = [MessageEntity(MessageEntity.ITALIC, 0, 7)]
input_message_content = InputTextMessageContent("input_message_content")
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton("reply_markup")]])
class TestInlineQueryResultCachedPhotoWithoutRequest(TestInlineQueryResultCachedPhotoBase):
def METHOD_NAME(self, inline_query_result_cached_photo):
inst = inline_query_result_cached_photo
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
def test_expected_values(self, inline_query_result_cached_photo):
assert inline_query_result_cached_photo.type == self.type_
assert inline_query_result_cached_photo.id == self.id_
assert inline_query_result_cached_photo.photo_file_id == self.photo_file_id
assert inline_query_result_cached_photo.title == self.title
assert inline_query_result_cached_photo.description == self.description
assert inline_query_result_cached_photo.caption == self.caption
assert inline_query_result_cached_photo.parse_mode == self.parse_mode
assert inline_query_result_cached_photo.caption_entities == tuple(self.caption_entities)
assert (
inline_query_result_cached_photo.input_message_content.to_dict()
== self.input_message_content.to_dict()
)
assert (
inline_query_result_cached_photo.reply_markup.to_dict() == self.reply_markup.to_dict()
)
def test_caption_entities_always_tuple(self):
result = InlineQueryResultCachedPhoto(self.id_, self.photo_file_id)
assert result.caption_entities == ()
def test_to_dict(self, inline_query_result_cached_photo):
inline_query_result_cached_photo_dict = inline_query_result_cached_photo.to_dict()
assert isinstance(inline_query_result_cached_photo_dict, dict)
assert (
inline_query_result_cached_photo_dict["type"] == inline_query_result_cached_photo.type
)
assert inline_query_result_cached_photo_dict["id"] == inline_query_result_cached_photo.id
assert (
inline_query_result_cached_photo_dict["photo_file_id"]
== inline_query_result_cached_photo.photo_file_id
)
assert (
inline_query_result_cached_photo_dict["title"]
== inline_query_result_cached_photo.title
)
assert (
inline_query_result_cached_photo_dict["description"]
== inline_query_result_cached_photo.description
)
assert (
inline_query_result_cached_photo_dict["caption"]
== inline_query_result_cached_photo.caption
)
assert (
inline_query_result_cached_photo_dict["parse_mode"]
== inline_query_result_cached_photo.parse_mode
)
assert inline_query_result_cached_photo_dict["caption_entities"] == [
ce.to_dict() for ce in inline_query_result_cached_photo.caption_entities
]
assert (
inline_query_result_cached_photo_dict["input_message_content"]
== inline_query_result_cached_photo.input_message_content.to_dict()
)
assert (
inline_query_result_cached_photo_dict["reply_markup"]
== inline_query_result_cached_photo.reply_markup.to_dict()
)
def test_equality(self):
a = InlineQueryResultCachedPhoto(self.id_, self.photo_file_id)
b = InlineQueryResultCachedPhoto(self.id_, self.photo_file_id)
c = InlineQueryResultCachedPhoto(self.id_, "")
d = InlineQueryResultCachedPhoto("", self.photo_file_id)
e = InlineQueryResultCachedVoice(self.id_, "", "")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e) |
5,295 | resolve tar file or dir | """Utilities for extracting common archive formats"""
import zipfile
import tarfile
import os
import shutil
import posixpath
import contextlib
from distutils.errors import DistutilsError
from ._path import ensure_directory
__all__ = [
"unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
"UnrecognizedFormat", "extraction_drivers", "unpack_directory",
]
class UnrecognizedFormat(DistutilsError):
"""Couldn't recognize the archive type"""
def default_filter(src, dst):
"""The default progress/filter callback; returns True for all files"""
return dst
def unpack_archive(
filename, extract_dir, progress_filter=default_filter,
drivers=None):
"""Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
`progress_filter` is a function taking two arguments: a source path
internal to the archive ('/'-separated), and a filesystem path where it
will be extracted. The callback must return the desired extract path
(which may be the same as the one passed in), or else ``None`` to skip
that file or directory. The callback can thus be used to report on the
progress of the extraction, as well as to filter the items extracted or
alter their extraction paths.
`drivers`, if supplied, must be a non-empty sequence of functions with the
same signature as this function (minus the `drivers` argument), that raise
``UnrecognizedFormat`` if they do not support extracting the designated
archive type. The `drivers` are tried in sequence until one is found that
does not raise an error, or until all are exhausted (in which case
``UnrecognizedFormat`` is raised). If you do not supply a sequence of
drivers, the module's ``extraction_drivers`` constant will be used, which
means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
order.
"""
for driver in drivers or extraction_drivers:
try:
driver(filename, extract_dir, progress_filter)
except UnrecognizedFormat:
continue
else:
return
else:
raise UnrecognizedFormat(
"Not a recognized archive type: %s" % filename
)
def unpack_directory(filename, extract_dir, progress_filter=default_filter):
""""Unpack" a directory, using the same interface as for archives
Raises ``UnrecognizedFormat`` if `filename` is not a directory
"""
if not os.path.isdir(filename):
raise UnrecognizedFormat("%s is not a directory" % filename)
paths = {
filename: ('', extract_dir),
}
for base, dirs, files in os.walk(filename):
src, dst = paths[base]
for d in dirs:
paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
for f in files:
target = os.path.join(dst, f)
target = progress_filter(src + f, target)
if not target:
# skip non-files
continue
ensure_directory(target)
f = os.path.join(base, f)
shutil.copyfile(f, target)
shutil.copystat(f, target)
def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack zip `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
if not zipfile.is_zipfile(filename):
raise UnrecognizedFormat("%s is not a zip file" % (filename,))
with zipfile.ZipFile(filename) as z:
_unpack_zipfile_obj(z, extract_dir, progress_filter)
def _unpack_zipfile_obj(zipfile_obj, extract_dir, progress_filter=default_filter):
"""Internal/private API used by other parts of setuptools.
Similar to ``unpack_zipfile``, but receives an already opened :obj:`zipfile.ZipFile`
object instead of a filename.
"""
for info in zipfile_obj.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name.split('/'):
continue
target = os.path.join(extract_dir, *name.split('/'))
target = progress_filter(name, target)
if not target:
continue
if name.endswith('/'):
# directory
ensure_directory(target)
else:
# file
ensure_directory(target)
data = zipfile_obj.read(info.filename)
with open(target, 'wb') as f:
f.write(data)
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(target, unix_attributes)
def METHOD_NAME(tar_obj, tar_member_obj):
"""Resolve any links and extract link targets as normal files."""
while tar_member_obj is not None and (
tar_member_obj.islnk() or tar_member_obj.issym()):
linkpath = tar_member_obj.linkname
if tar_member_obj.issym():
base = posixpath.dirname(tar_member_obj.name)
linkpath = posixpath.join(base, linkpath)
linkpath = posixpath.normpath(linkpath)
tar_member_obj = tar_obj._getmember(linkpath)
is_file_or_dir = (
tar_member_obj is not None and
(tar_member_obj.isfile() or tar_member_obj.isdir())
)
if is_file_or_dir:
return tar_member_obj
raise LookupError('Got unknown file type')
def _iter_open_tar(tar_obj, extract_dir, progress_filter):
"""Emit member-destination pairs from a tar archive."""
# don't do any chowning!
tar_obj.chown = lambda *args: None
with contextlib.closing(tar_obj):
for member in tar_obj:
name = member.name
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name.split('/'):
continue
prelim_dst = os.path.join(extract_dir, *name.split('/'))
try:
member = METHOD_NAME(tar_obj, member)
except LookupError:
continue
final_dst = progress_filter(name, prelim_dst)
if not final_dst:
continue
if final_dst.endswith(os.sep):
final_dst = final_dst[:-1]
yield member, final_dst
def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError as e:
raise UnrecognizedFormat(
"%s is not a compressed or uncompressed tar file" % (filename,)
) from e
for member, final_dst in _iter_open_tar(
tarobj, extract_dir, progress_filter,
):
try:
# XXX Ugh
tarobj._extract_member(member, final_dst)
except tarfile.ExtractError:
# chown/chmod/mkfifo/mknode/makedev failed
pass
return True
extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile |
5,296 | reset a lead | from common.numpy_fast import mean
from common.kalman.simple_kalman import KF1D
# Default lead acceleration decay set to 50% at 1s
_LEAD_ACCEL_TAU = 1.5
# radar tracks
SPEED, ACCEL = 0, 1 # Kalman filter states enum
# stationary qualification parameters
v_ego_stationary = 4. # no stationary object flag below this speed
RADAR_TO_CENTER = 2.7 # (deprecated) RADAR is ~ 2.7m ahead from center of car
RADAR_TO_CAMERA = 1.52 # RADAR is ~ 1.5m ahead from center of mesh frame
class Track():
def __init__(self, v_lead, kalman_params):
self.cnt = 0
self.aLeadTau = _LEAD_ACCEL_TAU
self.K_A = kalman_params.A
self.K_C = kalman_params.C
self.K_K = kalman_params.K
self.kf = KF1D([[v_lead], [0.0]], self.K_A, self.K_C, self.K_K)
def update(self, d_rel, y_rel, v_rel, v_lead, measured):
# relative values, copy
self.dRel = d_rel # LONG_DIST
self.yRel = y_rel # -LAT_DIST
self.vRel = v_rel # REL_SPEED
self.vLead = v_lead
self.measured = measured # measured or estimate
# computed velocity and accelerations
if self.cnt > 0:
self.kf.update(self.vLead)
self.vLeadK = float(self.kf.x[SPEED][0])
self.aLeadK = float(self.kf.x[ACCEL][0])
# Learn if constant acceleration
if abs(self.aLeadK) < 0.5:
self.aLeadTau = _LEAD_ACCEL_TAU
else:
self.aLeadTau *= 0.9
self.cnt += 1
def get_key_for_cluster(self):
# Weigh y higher since radar is inaccurate in this dimension
return [self.dRel, self.yRel*2, self.vRel]
def METHOD_NAME(self, aLeadK, aLeadTau):
self.kf = KF1D([[self.vLead], [aLeadK]], self.K_A, self.K_C, self.K_K)
self.aLeadK = aLeadK
self.aLeadTau = aLeadTau
class Cluster():
def __init__(self):
self.tracks = set()
def add(self, t):
# add the first track
self.tracks.add(t)
# TODO: make generic
@property
def dRel(self):
return mean([t.dRel for t in self.tracks])
@property
def yRel(self):
return mean([t.yRel for t in self.tracks])
@property
def vRel(self):
return mean([t.vRel for t in self.tracks])
@property
def aRel(self):
return mean([t.aRel for t in self.tracks])
@property
def vLead(self):
return mean([t.vLead for t in self.tracks])
@property
def dPath(self):
return mean([t.dPath for t in self.tracks])
@property
def vLat(self):
return mean([t.vLat for t in self.tracks])
@property
def vLeadK(self):
return mean([t.vLeadK for t in self.tracks])
@property
def aLeadK(self):
if all(t.cnt <= 1 for t in self.tracks):
return 0.
else:
return mean([t.aLeadK for t in self.tracks if t.cnt > 1])
@property
def aLeadTau(self):
if all(t.cnt <= 1 for t in self.tracks):
return _LEAD_ACCEL_TAU
else:
return mean([t.aLeadTau for t in self.tracks if t.cnt > 1])
@property
def measured(self):
return any(t.measured for t in self.tracks)
def get_RadarState(self, model_prob=0.0):
return {
"dRel": float(self.dRel),
"yRel": float(self.yRel),
"vRel": float(self.vRel),
"vLead": float(self.vLead),
"vLeadK": float(self.vLeadK),
"aLeadK": float(self.aLeadK),
"status": True,
"fcw": self.is_potential_fcw(model_prob),
"modelProb": model_prob,
"radar": True,
"aLeadTau": float(self.aLeadTau)
}
def get_RadarState_from_vision(self, lead_msg, v_ego, model_v_ego):
lead_v_rel_pred = lead_msg.v[0] - model_v_ego
return {
"dRel": float(lead_msg.x[0] - RADAR_TO_CAMERA),
"yRel": float(-lead_msg.y[0]),
"vRel": float(lead_v_rel_pred),
"vLead": float(v_ego + lead_v_rel_pred),
"vLeadK": float(v_ego + lead_v_rel_pred),
"aLeadK": 0.0,
"aLeadTau": 0.3,
"fcw": False,
"modelProb": float(lead_msg.prob),
"radar": False,
"status": True
}
def __str__(self):
ret = f"x: {self.dRel:4.1f} y: {self.yRel:4.1f} v: {self.vRel:4.1f} a: {self.aLeadK:4.1f}"
return ret
def potential_low_speed_lead(self, v_ego):
# stop for stuff in front of you and low speed, even without model confirmation
# Radar points closer than 0.75, are almost always glitches on toyota radars
return abs(self.yRel) < 1.0 and (v_ego < v_ego_stationary) and (0.75 < self.dRel < 25)
def is_potential_fcw(self, model_prob):
return model_prob > .9 |
5,297 | to str | # coding: utf-8
"""
OpenPerf API
REST API interface for OpenPerf # noqa: E501
OpenAPI spec version: 1
Contact: support@spirent.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PacketAnalyzerFlowCountersLatency(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'summary': 'PacketAnalyzerFlowSummaryCounters',
'units': 'str'
}
attribute_map = {
'summary': 'summary',
'units': 'units'
}
def __init__(self, summary=None, units=None): # noqa: E501
"""PacketAnalyzerFlowCountersLatency - a model defined in Swagger""" # noqa: E501
self._summary = None
self._units = None
self.discriminator = None
self.summary = summary
self.units = units
@property
def summary(self):
"""Gets the summary of this PacketAnalyzerFlowCountersLatency. # noqa: E501
:return: The summary of this PacketAnalyzerFlowCountersLatency. # noqa: E501
:rtype: PacketAnalyzerFlowSummaryCounters
"""
return self._summary
@summary.setter
def summary(self, summary):
"""Sets the summary of this PacketAnalyzerFlowCountersLatency.
:param summary: The summary of this PacketAnalyzerFlowCountersLatency. # noqa: E501
:type: PacketAnalyzerFlowSummaryCounters
"""
self._summary = summary
@property
def units(self):
"""Gets the units of this PacketAnalyzerFlowCountersLatency. # noqa: E501
Latency measurement units # noqa: E501
:return: The units of this PacketAnalyzerFlowCountersLatency. # noqa: E501
:rtype: str
"""
return self._units
@units.setter
def units(self, units):
"""Sets the units of this PacketAnalyzerFlowCountersLatency.
Latency measurement units # noqa: E501
:param units: The units of this PacketAnalyzerFlowCountersLatency. # noqa: E501
:type: str
"""
self._units = units
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PacketAnalyzerFlowCountersLatency, dict):
for key, value in self.items():
result[key] = value
return result
def METHOD_NAME(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.METHOD_NAME()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PacketAnalyzerFlowCountersLatency):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other |
5,298 | get column text | from typing import TYPE_CHECKING, Any, Optional
from PySide6.QtCore import QAbstractTableModel, QSize, Qt
from PySide6.QtGui import QBrush, QFont
from PySide6.QtWidgets import QAbstractItemView, QHeaderView, QTableView, QVBoxLayout
from angrmanagement.config import Conf
from angrmanagement.logic.debugger import DebuggerWatcher
from .view import BaseView
if TYPE_CHECKING:
import angr
import PySide6
from archinfo import Register
class QRegisterTableModel(QAbstractTableModel):
"""
Register table model.
"""
Headers = ["Name", "Value"]
COL_REGISTER = 0
COL_VALUE = 1
def __init__(self, log_widget: "QRegisterTableWidget" = None):
super().__init__()
self._log_widget = log_widget
self.state: angr.SimState = None
self._last_state: angr.SimState = None
def _filtered_register_list(self):
return [reg for reg in self.state.arch.register_list if reg.general_purpose]
def rowCount(self, parent: "PySide6.QtCore.QModelIndex" = ...) -> int: # pylint:disable=unused-argument
return 0 if self.state is None else len(self._filtered_register_list())
def columnCount(self, parent: "PySide6.QtCore.QModelIndex" = ...) -> int: # pylint:disable=unused-argument
return len(self.Headers)
def headerData(
self, section: int, orientation: "PySide6.QtCore.Qt.Orientation", role: int = ...
) -> Any: # pylint:disable=unused-argument
if role != Qt.DisplayRole:
return None
if section < len(self.Headers):
return self.Headers[section]
return None
def data(self, index: "PySide6.QtCore.QModelIndex", role: int = ...) -> Any:
if not index.isValid():
return None
row = index.row()
reg = self._filtered_register_list()[row]
col = index.column()
if role == Qt.DisplayRole:
return self.METHOD_NAME(reg, col)
elif role == Qt.ForegroundRole:
return QBrush(Qt.red) if self._did_data_change(reg) else None
else:
return None
def METHOD_NAME(self, reg: "Register", col: int) -> Any:
mapping = {
QRegisterTableModel.COL_REGISTER: lambda x: x.name,
QRegisterTableModel.COL_VALUE: lambda x: repr(self.state.regs.get(x.name)),
}
func = mapping.get(col)
if func is None:
return None
return func(reg)
def _did_data_change(self, reg: "Register") -> bool:
if self._last_state is None:
return False
different = self.state.solver.eval(self.state.regs.get(reg.name) != self._last_state.regs.get(reg.name))
return different
def update_state(self, state):
self._last_state = self.state.copy() if self.state else None
self.state = None if state is None else state
class QRegisterTableWidget(QTableView):
"""
Register table widget.
"""
def __init__(self, register_view, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_view = register_view
hheader = self.horizontalHeader()
hheader.setVisible(True)
hheader.setStretchLastSection(True)
vheader = self.verticalHeader()
vheader.setVisible(False)
vheader.setDefaultSectionSize(20)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)
self.model: QRegisterTableModel = QRegisterTableModel(self)
self.setModel(self.model)
font = QFont(Conf.disasm_font)
self.setFont(font)
hheader.setSectionResizeMode(0, QHeaderView.ResizeToContents)
self._dbg_manager = register_view.instance.debugger_mgr
self._dbg_watcher = DebuggerWatcher(self._on_debugger_state_updated, self._dbg_manager.debugger)
self._on_debugger_state_updated()
#
# Events
#
def closeEvent(self, event):
self._dbg_watcher.shutdown()
super().closeEvent(event)
def _on_debugger_state_updated(self):
dbg = self._dbg_manager.debugger
self.model.update_state(None if dbg.am_none else dbg.simstate)
self.model.layoutChanged.emit()
class RegistersView(BaseView):
"""
Register table view.
"""
def __init__(self, workspace, instance, default_docking_position, *args, **kwargs):
super().__init__("registers", workspace, instance, default_docking_position, *args, **kwargs)
self.base_caption = "Registers"
self._tbl_widget: Optional[QRegisterTableWidget] = None
self._init_widgets()
self.reload()
self.width_hint = 500
self.height_hint = 0
self.updateGeometry()
def reload(self):
pass
@staticmethod
def minimumSizeHint(*args, **kwargs): # pylint:disable=unused-argument
return QSize(200, 200)
def _init_widgets(self):
vlayout = QVBoxLayout()
vlayout.setContentsMargins(0, 0, 0, 0)
self._tbl_widget = QRegisterTableWidget(self)
vlayout.addWidget(self._tbl_widget)
self.setLayout(vlayout) |
5,299 | scalars route | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard Scalars plugin.
See `http_api.md` in this directory for specifications of the routes for
this plugin.
"""
import csv
import io
import werkzeug.exceptions
from werkzeug import wrappers
from tensorboard import errors
from tensorboard import plugin_util
from tensorboard.backend import http_util
from tensorboard.data import provider
from tensorboard.plugins import base_plugin
from tensorboard.plugins.scalar import metadata
_DEFAULT_DOWNSAMPLING = 1000 # scalars per time series
class OutputFormat:
"""An enum used to list the valid output formats for API calls."""
JSON = "json"
CSV = "csv"
class ScalarsPlugin(base_plugin.TBPlugin):
"""Scalars Plugin for TensorBoard."""
plugin_name = metadata.PLUGIN_NAME
def __init__(self, context):
"""Instantiates ScalarsPlugin via TensorBoard core.
Args:
context: A base_plugin.TBContext instance.
"""
self._downsample_to = (context.sampling_hints or {}).get(
self.plugin_name, _DEFAULT_DOWNSAMPLING
)
self._data_provider = context.data_provider
self._version_checker = plugin_util._MetadataVersionChecker(
data_kind="scalar",
latest_known_version=0,
)
def get_plugin_apps(self):
return {
"/scalars": self.METHOD_NAME,
"/scalars_multirun": self.scalars_multirun_route,
"/tags": self.tags_route,
}
def is_active(self):
return False # `list_plugins` as called by TB core suffices
def frontend_metadata(self):
return base_plugin.FrontendMetadata(element_name="tf-scalar-dashboard")
def index_impl(self, ctx, experiment=None):
"""Return {runName: {tagName: {displayName: ..., description:
...}}}."""
mapping = self._data_provider.list_scalars(
ctx,
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME,
)
result = {run: {} for run in mapping}
for (run, tag_to_content) in mapping.items():
for (tag, metadatum) in tag_to_content.items():
md = metadata.parse_plugin_metadata(metadatum.plugin_content)
if not self._version_checker.ok(md.version, run, tag):
continue
description = plugin_util.markdown_to_safe_html(
metadatum.description
)
result[run][tag] = {
"displayName": metadatum.display_name,
"description": description,
}
return result
def scalars_impl(self, ctx, tag, run, experiment, output_format):
"""Result of the form `(body, mime_type)`."""
all_scalars = self._data_provider.read_scalars(
ctx,
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME,
downsample=self._downsample_to,
run_tag_filter=provider.RunTagFilter(runs=[run], tags=[tag]),
)
scalars = all_scalars.get(run, {}).get(tag, None)
if scalars is None:
raise errors.NotFoundError(
"No scalar data for run=%r, tag=%r" % (run, tag)
)
values = [(x.wall_time, x.step, x.value) for x in scalars]
if output_format == OutputFormat.CSV:
string_io = io.StringIO()
writer = csv.writer(string_io)
writer.writerow(["Wall time", "Step", "Value"])
writer.writerows(values)
return (string_io.getvalue(), "text/csv")
else:
return (values, "application/json")
def scalars_multirun_impl(self, ctx, tag, runs, experiment):
"""Result of the form `(body, mime_type)`."""
all_scalars = self._data_provider.read_scalars(
ctx,
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME,
downsample=self._downsample_to,
run_tag_filter=provider.RunTagFilter(runs=runs, tags=[tag]),
)
body = {
run: [(x.wall_time, x.step, x.value) for x in run_data[tag]]
for (run, run_data) in all_scalars.items()
}
return (body, "application/json")
@wrappers.Request.application
def tags_route(self, request):
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
index = self.index_impl(ctx, experiment=experiment)
return http_util.Respond(request, index, "application/json")
@wrappers.Request.application
def METHOD_NAME(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
tag = request.args.get("tag")
run = request.args.get("run")
if tag is None or run is None:
raise errors.InvalidArgumentError(
"Both run and tag must be specified: tag=%r, run=%r"
% (tag, run)
)
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
output_format = request.args.get("format")
(body, mime_type) = self.scalars_impl(
ctx, tag, run, experiment, output_format
)
return http_util.Respond(request, body, mime_type)
@wrappers.Request.application
def scalars_multirun_route(self, request):
"""Given a tag and list of runs, return dict of ScalarEvent arrays."""
if request.method != "POST":
raise werkzeug.exceptions.MethodNotAllowed(["POST"])
tags = request.form.getlist("tag")
runs = request.form.getlist("runs")
if len(tags) != 1:
raise errors.InvalidArgumentError(
"tag must be specified exactly once"
)
tag = tags[0]
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
(body, mime_type) = self.scalars_multirun_impl(
ctx, tag, runs, experiment
)
return http_util.Respond(request, body, mime_type) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.