id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
5,100 | test horizontal rois |
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import copy
class RoIAlignRotatedOp(hu.HypothesisTestCase):
def bbox_xywh_to_xyxy(self, boxes):
"""
Convert from [center_x center_y w h] format to [x1 y1 x2 y2].
"""
w, h = boxes[:, 2], boxes[:, 3]
boxes[:, 0] -= w / 2.0 # x1 = center_x - width/2
boxes[:, 1] -= h / 2.0 # y1 = center_y - height/2
boxes[:, 2] = boxes[:, 0] + w # x2 = x1 + width
boxes[:, 3] = boxes[:, 1] + h # y2 = y1 + height
return boxes
@given(
H=st.integers(min_value=50, max_value=100),
W=st.integers(min_value=50, max_value=100),
C=st.integers(min_value=1, max_value=3),
num_rois=st.integers(min_value=0, max_value=10),
pooled_size=st.sampled_from([7, 14]),
**hu.gcs
)
def METHOD_NAME(self, H, W, C, num_rois, pooled_size, gc, dc):
"""
Test that results match with RoIAlign when angle=0.
"""
X = np.random.randn(1, C, H, W).astype(np.float32)
R = np.zeros((num_rois, 6)).astype(np.float32)
angle = 0.0
for i in range(num_rois):
x = np.random.uniform(1, W - 1)
y = np.random.uniform(1, H - 1)
w = np.random.uniform(1, min(x, W - x))
h = np.random.uniform(1, min(y, H - y))
R[i] = [0, x, y, w, h, angle]
op = core.CreateOperator(
"RoIAlignRotated",
["X", "R"],
["Y"],
pooled_h=pooled_size,
pooled_w=pooled_size,
sampling_ratio=0,
)
def roialign_ref(X, R):
# Remove angle and convert from [center_x center_y w h]
# to [x1 y1 x2 y2] format.
R_ref = copy.deepcopy(R[:, 0:5])
R_ref[:, 1:5] = self.bbox_xywh_to_xyxy(R_ref[:, 1:5])
ref_op = core.CreateOperator(
"RoIAlign",
["X_ref", "R_ref"],
["Y_ref"],
pooled_h=pooled_size,
pooled_w=pooled_size,
sampling_ratio=0,
)
workspace.FeedBlob("X_ref", X)
workspace.FeedBlob("R_ref", R_ref)
workspace.RunOperatorOnce(ref_op)
return [workspace.FetchBlob("Y_ref")]
self.assertReferenceChecks(
device_option=gc, op=op, inputs=[X, R], reference=roialign_ref
)
if core.IsGPUDeviceType(gc.device_type):
self.assertGradientChecks(gc, op, [X, R], 0, [0])
@given(
H=st.integers(min_value=50, max_value=100),
W=st.integers(min_value=50, max_value=100),
C=st.integers(min_value=1, max_value=3),
num_rois=st.integers(min_value=0, max_value=10),
pooled_size=st.sampled_from([7, 14]),
angle=st.sampled_from([-270, -180, -90, 90, 180, 270]),
**hu.gcs
)
def test_simple_rotations(
self, H, W, C, num_rois, pooled_size, angle, gc, dc
):
"""
Test with right-angled rotations that don't need interpolation.
"""
X = np.random.randn(1, C, H, W).astype(np.float32)
R = np.zeros((num_rois, 6)).astype(np.float32)
for i in range(num_rois):
x = np.random.uniform(1, W - 1)
y = np.random.uniform(1, H - 1)
w = np.random.uniform(1, min(x, W - x, y, H - y))
h = np.random.uniform(1, min(x, W - x, y, H - y))
R[i] = [0, x, y, w, h, angle]
op = core.CreateOperator(
"RoIAlignRotated",
["X", "R"],
["Y"],
pooled_h=pooled_size,
pooled_w=pooled_size,
sampling_ratio=0,
)
def roialign_rot90(m, k=1, axes=(0,1)):
axes = tuple(axes)
if len(axes) != 2:
raise ValueError("len(axes) must be 2.")
m = np.asanyarray(m)
if axes[0] == axes[1] or np.absolute(axes[0] - axes[1]) == m.ndim:
raise ValueError("Axes must be different.")
if (axes[0] >= m.ndim or axes[0] < -m.ndim or
axes[1] >= m.ndim or axes[1] < -m.ndim):
raise ValueError(
"Axes={} out of range for array of ndim={}.".format(axes, m.ndim))
k %= 4
if k == 0:
return m[:]
if k == 2:
return roialign_flip(roialign_flip(m, axes[0]), axes[1])
axes_list = np.arange(0, m.ndim)
(axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]],
axes_list[axes[0]])
if k == 1:
return np.transpose(roialign_flip(m,axes[1]), axes_list)
else:
# k == 3
return roialign_flip(np.transpose(m, axes_list), axes[1])
def roialign_flip(m, axis):
if not hasattr(m, 'ndim'):
m = np.asarray(m)
indexer = [slice(None)] * m.ndim
try:
indexer[axis] = slice(None, None, -1)
except IndexError as e:
raise ValueError("axis=%i is invalid for the %i-dimensional input array"
% (axis, m.ndim)) from e
return m[tuple(indexer)]
def roialign_ref(X, R):
# `angle` denotes counter-clockwise rotation. Rotate the input
# feature map in the opposite (clockwise) direction and perform
# standard RoIAlign. We assume all RoIs have the same angle.
#
# Also note that we need to have our own version of np.rot90,
# since axes isn't an argument until 1.12.0 and doesn't exist
# on all tested platforms.
norm_angle = (angle + 360) % 360
X_ref = roialign_rot90(X, k=-norm_angle / 90, axes=(2, 3))
# Rotate RoIs clockwise wrt the center of the input feature
# map to make them horizontal and convert from
# [center_x center_y w h] to [x1 y1 x2 y2] format.
roi_x, roi_y = R[:, 1], R[:, 2]
if norm_angle == 90:
new_roi_x = H - roi_y - 1
new_roi_y = roi_x
elif norm_angle == 180:
new_roi_x = W - roi_x - 1
new_roi_y = H - roi_y - 1
elif norm_angle == 270:
new_roi_x = roi_y
new_roi_y = W - roi_x - 1
else:
raise NotImplementedError
R_ref = copy.deepcopy(R[:, 0:5])
R_ref[:, 1], R_ref[:, 2] = new_roi_x, new_roi_y
R_ref[:, 1:5] = self.bbox_xywh_to_xyxy(R_ref[:, 1:5])
ref_op = core.CreateOperator(
"RoIAlign",
["X_ref", "R_ref"],
["Y_ref"],
pooled_h=pooled_size,
pooled_w=pooled_size,
sampling_ratio=0,
)
workspace.FeedBlob("X_ref", X_ref)
workspace.FeedBlob("R_ref", R_ref)
workspace.RunOperatorOnce(ref_op)
return [workspace.FetchBlob("Y_ref")]
self.assertReferenceChecks(
device_option=gc, op=op, inputs=[X, R], reference=roialign_ref
)
if core.IsGPUDeviceType(gc.device_type):
self.assertGradientChecks(gc, op, [X, R], 0, [0])
if __name__ == '__main__':
import unittest
unittest.main() |
5,101 | create from axis angle | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# Based on the quaternion class in the visvis project.
import numpy as np
class Quaternion(object):
"""Quaternion(w=1, x=0, y=0, z=0, normalize=True)
A quaternion is a mathematically convenient way to
describe rotations.
"""
def __init__(self, w=1, x=0, y=0, z=0, normalize=True):
self.w = float(w)
self.x, self.y, self.z = float(x), float(y), float(z)
if normalize:
self._normalize()
def __repr__(self):
return "<Quaternion object %1.3g + %1.3gi + %1.3gj + %1.3gk>" % (
self.w, self.x, self.y, self.z)
def copy(self):
"""Create an exact copy of this quaternion."""
return Quaternion(self.w, self.x, self.y, self.z, False)
def norm(self):
"""Returns the norm of the quaternion
norm = w**2 + x**2 + y**2 + z**2
"""
tmp = self.w**2 + self.x**2 + self.y**2 + self.z**2
return tmp**0.5
def _normalize(self):
"""Make the quaternion unit length."""
# Get length
L = self.norm()
if not L:
raise ValueError('Quaternion cannot have 0-length.')
# Correct
self.w /= L
self.x /= L
self.y /= L
self.z /= L
def normalize(self):
"""Returns a normalized (unit length) version of the quaternion."""
new = self.copy()
new._normalize()
return new
def conjugate(self):
"""Obtain the conjugate of the quaternion.
This is simply the same quaternion but with the sign of the
imaginary (vector) parts reversed.
"""
new = self.copy()
new.x *= -1
new.y *= -1
new.z *= -1
return new
def inverse(self):
"""Returns q.conjugate()/q.norm()**2
So if the quaternion is unit length, it is the same
as the conjugate.
"""
new = self.conjugate()
tmp = self.norm()**2
new.w /= tmp
new.x /= tmp
new.y /= tmp
new.z /= tmp
return new
def exp(self):
"""Returns the exponent of the quaternion.
(not tested)
"""
# Init
vecNorm = self.x**2 + self.y**2 + self.z**2
wPart = np.exp(self.w)
q = Quaternion()
# Calculate
q.w = wPart * np.cos(vecNorm)
q.x = wPart * self.x * np.sin(vecNorm) / vecNorm
q.y = wPart * self.y * np.sin(vecNorm) / vecNorm
q.z = wPart * self.z * np.sin(vecNorm) / vecNorm
return q
def log(self):
"""Returns the natural logarithm of the quaternion.
(not tested)
"""
# Init
norm = self.norm()
vecNorm = self.x**2 + self.y**2 + self.z**2
tmp = self.w / norm
q = Quaternion()
# Calculate
q.w = np.log(norm)
q.x = np.log(norm) * self.x * np.arccos(tmp) / vecNorm
q.y = np.log(norm) * self.y * np.arccos(tmp) / vecNorm
q.z = np.log(norm) * self.z * np.arccos(tmp) / vecNorm
return q
def __add__(self, q):
"""Add quaternions."""
new = self.copy()
new.w += q.w
new.x += q.x
new.y += q.y
new.z += q.z
return new
def __sub__(self, q):
"""Subtract quaternions."""
new = self.copy()
new.w -= q.w
new.x -= q.x
new.y -= q.y
new.z -= q.z
return new
def __mul__(self, q2):
"""Multiply two quaternions."""
new = Quaternion()
q1 = self
new.w = q1.w*q2.w - q1.x*q2.x - q1.y*q2.y - q1.z*q2.z
new.x = q1.w*q2.x + q1.x*q2.w + q1.y*q2.z - q1.z*q2.y
new.y = q1.w*q2.y + q1.y*q2.w + q1.z*q2.x - q1.x*q2.z
new.z = q1.w*q2.z + q1.z*q2.w + q1.x*q2.y - q1.y*q2.x
return new
def rotate_point(self, p):
"""Rotate a Point instance using this quaternion."""
# Prepare
p = Quaternion(0, p[0], p[1], p[2], False) # Do not normalize!
q1 = self.normalize()
q2 = self.inverse()
# Apply rotation
r = (q1*p)*q2
# Make point and return
return r.x, r.y, r.z
def get_matrix(self):
"""Create a 4x4 homography matrix that represents the rotation
of the quaternion.
"""
# Init matrix (remember, a matrix, not an array)
a = np.zeros((4, 4), dtype=np.float32)
w, x, y, z = self.w, self.x, self.y, self.z
# First row
a[0, 0] = - 2.0 * (y * y + z * z) + 1.0
a[1, 0] = + 2.0 * (x * y + z * w)
a[2, 0] = + 2.0 * (x * z - y * w)
a[3, 0] = 0.0
# Second row
a[0, 1] = + 2.0 * (x * y - z * w)
a[1, 1] = - 2.0 * (x * x + z * z) + 1.0
a[2, 1] = + 2.0 * (z * y + x * w)
a[3, 1] = 0.0
# Third row
a[0, 2] = + 2.0 * (x * z + y * w)
a[1, 2] = + 2.0 * (y * z - x * w)
a[2, 2] = - 2.0 * (x * x + y * y) + 1.0
a[3, 2] = 0.0
# Fourth row
a[0, 3] = 0.0
a[1, 3] = 0.0
a[2, 3] = 0.0
a[3, 3] = 1.0
return a
def get_axis_angle(self):
"""Get the axis-angle representation of the quaternion.
(The angle is in radians)
"""
# Init
angle = 2 * np.arccos(max(min(self.w, 1.), -1.))
scale = (self.x**2 + self.y**2 + self.z**2)**0.5
# Calc axis
if scale:
ax = self.x / scale
ay = self.y / scale
az = self.z / scale
else:
# No rotation, so arbitrary axis
ax, ay, az = 1, 0, 0
# Return
return angle, ax, ay, az
@classmethod
def METHOD_NAME(cls, angle, ax, ay, az, degrees=False):
"""Classmethod to create a quaternion from an axis-angle representation.
(angle should be in radians).
"""
if degrees:
angle = np.radians(angle)
while angle < 0:
angle += np.pi*2
angle2 = angle/2.0
sinang2 = np.sin(angle2)
return Quaternion(np.cos(angle2), ax*sinang2, ay*sinang2, az*sinang2)
@classmethod
def create_from_euler_angles(cls, rx, ry, rz, degrees=False):
"""Classmethod to create a quaternion given the euler angles."""
if degrees:
rx, ry, rz = np.radians([rx, ry, rz])
# Obtain quaternions
qx = Quaternion(np.cos(rx/2), 0, 0, np.sin(rx/2))
qy = Quaternion(np.cos(ry/2), 0, np.sin(ry/2), 0)
qz = Quaternion(np.cos(rz/2), np.sin(rz/2), 0, 0)
# Almost done
return qx*qy*qz |
5,102 | transform | # Author: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD-3-Clause
from collections import Counter
import numpy as np
from .mixin import TransformerMixin, EstimatorMixin
from .base import _set_cv
from .._fiff.pick import _picks_to_idx, pick_types, pick_info
from ..parallel import parallel_func
from ..utils import logger, verbose
class EMS(TransformerMixin, EstimatorMixin):
"""Transformer to compute event-matched spatial filters.
This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire
time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note:: EMS only works for binary classification.
Attributes
----------
filters_ : ndarray, shape (n_channels, n_times)
The set of spatial filters.
classes_ : ndarray, shape (n_classes,)
The target classes.
References
----------
.. footbibliography::
"""
def __repr__(self): # noqa: D105
if hasattr(self, "filters_"):
return "<EMS: fitted with %i filters on %i classes.>" % (
len(self.filters_),
len(self.classes_),
)
else:
return "<EMS: not fitted.>"
def fit(self, X, y):
"""Fit the spatial filters.
.. note : EMS is fitted on data normalized by channel type before the
fitting of the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The training data.
y : array of int, shape (n_epochs)
The target classes.
Returns
-------
self : instance of EMS
Returns self.
"""
classes = np.unique(y)
if len(classes) != 2:
raise ValueError("EMS only works for binary classification.")
self.classes_ = classes
filters = X[y == classes[0]].mean(0) - X[y == classes[1]].mean(0)
filters /= np.linalg.norm(filters, axis=0)[None, :]
self.filters_ = filters
return self
def METHOD_NAME(self, X):
"""Transform the data by the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The input data.
Returns
-------
X : array, shape (n_epochs, n_times)
The input data transformed by the spatial filters.
"""
Xt = np.sum(X * self.filters_, axis=1)
return Xt
@verbose
def compute_ems(
epochs, conditions=None, picks=None, n_jobs=None, cv=None, verbose=None
):
"""Compute event-matched spatial filter on epochs.
This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire
time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note : EMS only works for binary classification.
.. note : The present function applies a leave-one-out cross-validation,
following Schurger et al's paper. However, we recommend using
a stratified k-fold cross-validation. Indeed, leave-one-out tends
to overfit and cannot be used to estimate the variance of the
prediction within a given fold.
.. note : Because of the leave-one-out, this function needs an equal
number of epochs in each of the two conditions.
Parameters
----------
epochs : instance of mne.Epochs
The epochs.
conditions : list of str | None, default None
If a list of strings, strings must match the epochs.event_id's key as
well as the number of conditions supported by the objective_function.
If None keys in epochs.event_id are used.
%(picks_good_data)s
%(n_jobs)s
cv : cross-validation object | str | None, default LeaveOneOut
The cross-validation scheme.
%(verbose)s
Returns
-------
surrogate_trials : ndarray, shape (n_trials // 2, n_times)
The trial surrogates.
mean_spatial_filter : ndarray, shape (n_channels, n_times)
The set of spatial filters.
conditions : ndarray, shape (n_classes,)
The conditions used. Values correspond to original event ids.
References
----------
.. footbibliography::
"""
logger.info("...computing surrogate time series. This can take some time")
# Default to leave-one-out cv
cv = "LeaveOneOut" if cv is None else cv
picks = _picks_to_idx(epochs.info, picks)
if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
raise ValueError(
"The same number of epochs is required by "
"this function. Please consider "
"`epochs.equalize_event_counts`"
)
if conditions is None:
conditions = epochs.event_id.keys()
epochs = epochs.copy()
else:
epochs = epochs[conditions]
epochs.drop_bad()
if len(conditions) != 2:
raise ValueError(
"Currently this function expects exactly 2 "
"conditions but you gave me %i" % len(conditions)
)
ev = epochs.events[:, 2]
# Special care to avoid path dependent mappings and orders
conditions = list(sorted(conditions))
cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
info = pick_info(epochs.info, picks)
data = epochs.get_data(picks=picks)
# Scale (z-score) the data by channel type
# XXX the z-scoring is applied outside the CV, which is not standard.
for ch_type in ["mag", "grad", "eeg"]:
if ch_type in epochs:
# FIXME should be applied to all sort of data channels
if ch_type == "eeg":
this_picks = pick_types(info, meg=False, eeg=True)
else:
this_picks = pick_types(info, meg=ch_type, eeg=False)
data[:, this_picks] /= np.std(data[:, this_picks])
# Setup cross-validation. Need to use _set_cv to deal with sklearn
# deprecation of cv objects.
y = epochs.events[:, 2]
_, cv_splits = _set_cv(cv, "classifier", X=y, y=y)
parallel, p_func, n_jobs = parallel_func(_run_ems, n_jobs=n_jobs)
# FIXME this parallelization should be removed.
# 1) it's numpy computation so it's already efficient,
# 2) it duplicates the data in RAM,
# 3) the computation is already super fast.
out = parallel(
p_func(_ems_diff, data, cond_idx, train, test) for train, test in cv_splits
)
surrogate_trials, spatial_filter = zip(*out)
surrogate_trials = np.array(surrogate_trials)
spatial_filter = np.mean(spatial_filter, axis=0)
return surrogate_trials, spatial_filter, epochs.events[:, 2]
def _ems_diff(data0, data1):
"""Compute the default diff objective function."""
return np.mean(data0, axis=0) - np.mean(data1, axis=0)
def _run_ems(objective_function, data, cond_idx, train, test):
"""Run EMS."""
d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
d /= np.sqrt(np.sum(d**2, axis=0))[None, :]
# compute surrogates
return np.sum(data[test[0]] * d, axis=0), d |
5,103 | construct extra parameters | # -*- coding: utf-8 -*-
#
# 2017-07-13 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add period to key uri for TOTP token
# 2016-05-21 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# urlencode token isuuer.
# 2015-07-01 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add SHA Algorithms to QR Code
# 2014-12-01 Cornelius Kölbel <cornelius@privacyidea.org>
# Migrate to flask
#
# * May 08, 2014 Cornelius Kölbel
# * 2014-09-12 added Motp URL. Cornelius Kölbel
#
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
It generates the URL for smartphone apps like
google authenticator
oath token
This only depends on the ConfigPolicy.
"""
import binascii
import logging
from urllib.parse import quote
from privacyidea.lib.log import log_with
from privacyidea.lib.user import User
from privacyidea.lib.utils import to_byte_string, b32encode_and_unicode, parse_time_sec_int
log = logging.getLogger(__name__)
MAX_QRCODE_LEN = 180
def METHOD_NAME(extra_data):
"""
Given a dictionary of extra key-value pairs (all str),
return a string that may be appended to a google authenticator / oathtoken URL.
Values that are non-strings will be converted to str.
Keys and values are converted to UTF-8 and urlquoted.
:return: a string (may be empty if ``extra_data`` is empty)
"""
extra_data_list = []
for key, value in extra_data.items():
encoded_key = quote(to_byte_string(key))
encoded_value = quote(to_byte_string(value))
extra_data_list.append('{key}={value}'.format(key=encoded_key, value=encoded_value))
return ('&' if extra_data_list else '') + '&'.join(extra_data_list)
@log_with(log)
def create_motp_url(key, user=None, realm=None, serial=""):
"""
This creates the motp url as described at
http://huseynov.com/index.php?post=motp-vs-google-authenticator-and-a-new-otp-app
The format is:
motp://SecureSite:alice@wonder.land?secret=JBSWY3DPEHPK3PXP
"""
# For Token2 the OTPKEY is hexencoded, not base32!
otpkey = key
# TODO: Migration: Policy
#Policy = PolicyClass(request, config, c,
# get_privacyidea_config())
# label = Policy.get_tokenlabel(user, realm, serial)
label = "mylabel"
allowed_label_len = 20
label = label[0:allowed_label_len]
url_label = quote(label)
return "motp://privacyidea:{0!s}?secret={1!s}".format(url_label, otpkey)
@log_with(log)
def create_google_authenticator_url(key=None, user=None,
realm=None, tokentype="hotp", period=30,
serial="mylabel", tokenlabel="<s>",
hash_algo="SHA1", digits="6",
issuer="privacyIDEA", user_obj=None,
extra_data=None):
"""
This creates the google authenticator URL.
This url may only be 119 characters long.
If the URL would be longer, we shorten the username
We expect the key to be hexlified!
"""
extra_data = extra_data or {}
# policy depends on some lib.util
user_obj = user_obj or User()
if tokentype.lower() == "hotp":
tokentype = "hotp"
counter = "counter=1&"
else:
counter = ""
# We need realm und user to be a string
realm = realm or ""
user = user or ""
key_bin = binascii.unhexlify(key)
# also strip the padding =, as it will get problems with the google app.
otpkey = b32encode_and_unicode(key_bin).strip('=')
base_len = len("otpauth://{0!s}/?secret={1!s}&counter=1".format(tokentype, otpkey))
allowed_label_len = MAX_QRCODE_LEN - base_len
log.debug("we have got {0!s} characters left for the token label".format(
str(allowed_label_len)))
# Deprecated
label = tokenlabel.replace("<s>",
serial).replace("<u>",
user).replace("<r>", realm)
label = label.format(serial=serial, user=user, realm=realm,
givenname=user_obj.info.get("givenname", ""),
surname=user_obj.info.get("surname", ""))
issuer = issuer.format(serial=serial, user=user, realm=realm,
givenname=user_obj.info.get("givenname", ""),
surname=user_obj.info.get("surname", ""))
label = label[0:allowed_label_len]
url_label = quote(label.encode("utf-8"))
url_issuer = quote(issuer.encode("utf-8"))
if hash_algo.lower() != "sha1":
hash_algo = "algorithm={0!s}&".format(hash_algo)
else:
# If the hash_algo is SHA1, we do not add it to the QR code to keep
# the QR code simpler
hash_algo = ""
if tokentype.lower() == "totp":
period = "period={0!s}&".format(period)
elif tokentype.lower() == "daypassword":
period = "period={0!s}&".format(parse_time_sec_int(period))
else:
period = ""
return ("otpauth://{tokentype!s}/{label!s}?secret={secret!s}&"
"{counter!s}{hash!s}{period!s}"
"digits={digits!s}&"
"issuer={issuer!s}{extra}".format(tokentype=tokentype,
label=url_label, secret=otpkey,
hash=hash_algo, period=period,
digits=digits, issuer=url_issuer,
counter=counter,
extra=METHOD_NAME(extra_data)))
@log_with(log)
def create_oathtoken_url(otpkey=None, user=None, realm=None,
type="hotp", serial="mylabel", tokenlabel="<s>",
extra_data=None):
timebased = ""
if "totp" == type.lower():
timebased = "&timeBased=true"
# We need realm und user to be a string
realm = realm or ""
user = user or ""
extra_data = extra_data or {}
label = tokenlabel.replace("<s>",
serial).replace("<u>",
user).replace("<r>", realm)
url_label = quote(label)
extra_parameters = METHOD_NAME(extra_data)
url = "oathtoken:///addToken?name={0!s}&lockdown=true&key={1!s}{2!s}{3!s}".format(
url_label,
otpkey,
timebased,
extra_parameters
)
return url
|
5,104 | create test sa synchronize | import rebound
import unittest
import warnings
class TestSimulationArchiveMatrix(unittest.TestCase):
pass
def runSimulation(test,tmax=40., restart=False, keep_unsynchronized=1, interval=None, safe_mode=True, integrator="ias15",G=1., testparticle=0,simulationarchive_version=3):
if restart:
if keep_unsynchronized==1:
sim = rebound.Simulation("test.bin")
else:
sa = rebound.SimulationArchive("test.bin")
sim = sa.getSimulation(sa.tmax,keep_unsynchronized=0)
else:
sim = rebound.Simulation()
sim.G = G
sim.add(m=1)
sim.add(m=1e-3,a=1,e=0.1,omega=0.1,M=0.1,inc=0.1,Omega=0.1)
sim.add(m=1e-3,a=-2,e=1.1,omega=0.1,M=0.1,inc=0.1,Omega=0.1)
sim.integrator = integrator
sim.dt = 0.1313
if simulationarchive_version==1:
sim.simulationarchive_version = 1
if safe_mode==False:
sim.ri_whfast.safe_mode = 1
sim.ri_mercurius.safe_mode = 1
if testparticle>0:
if testparticle==1:
sim.testparticle_type=0
if testparticle==2:
sim.testparticle_type=1
sim.add(m=1e-4,a=1.2,e=0.04,omega=0.21,M=1.41,inc=0.21,Omega=1.1)
sim.N_active = sim.N-1 # one test particle
if interval:
sim.automateSimulationArchive("test.bin", interval, deletefile=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sim.integrate(tmax,exact_finish_time=0)
if testparticle==1:
test.assertEqual(1,len(w))
else:
test.assertEqual(0,len(w))
return sim
def compareSim(test,sim1,sim2):
test.assertEqual(sim1.N,sim2.N)
test.assertEqual(sim1.N_active,sim2.N_active)
test.assertEqual(sim1.N_var,sim2.N_var)
test.assertEqual(sim1.t,sim2.t)
test.assertEqual(sim1.G,sim2.G)
for i in range(sim1.N):
test.assertEqual(sim1.particles[i].r,sim2.particles[i].r)
test.assertEqual(sim1.particles[i].m,sim2.particles[i].m)
test.assertEqual(sim1.particles[i].x,sim2.particles[i].x)
test.assertEqual(sim1.particles[i].y,sim2.particles[i].y)
test.assertEqual(sim1.particles[i].z,sim2.particles[i].z)
test.assertEqual(sim1.particles[i].vx,sim2.particles[i].vx)
test.assertEqual(sim1.particles[i].vy,sim2.particles[i].vy)
test.assertEqual(sim1.particles[i].vz,sim2.particles[i].vz)
def create_test_sa_restart(params):
def doTest(self):
runSimulation(self, 40., restart=False, interval=10., **params)
sim1 = runSimulation(self, 80., restart=True, **params)
sim2 = runSimulation(self, 80., restart=False, **params)
compareSim(self,sim1,sim2)
return doTest
def METHOD_NAME(params):
def doTest2(self):
sim1 = runSimulation(self, 40., restart=False, interval=10., **params)
if params['keep_unsynchronized']==1:
sim2 = rebound.Simulation("test.bin")
else:
sa = rebound.SimulationArchive("test.bin")
sim2 = sa.getSimulation(sa.tmax,keep_unsynchronized=0)
compareSim(self,sim1,sim2)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sim1.integrate(sim1.t+12.)
sim2.integrate(sim2.t+12.)
if params["testparticle"]==1:
self.assertEqual(2,len(w))
else:
self.assertEqual(0,len(w))
compareSim(self,sim1,sim2)
return doTest2
for integrator in ["ias15","whfast","leapfrog","janus","mercurius","saba","sabacl4", "saba(10,6,4)"]:
for safe_mode in [True,False]:
for G in [1.,0.9]:
for testparticle in [0,1,2]: # no test particle, passive, semi-active
for keep_unsynchronized in [1,0]:
for simulationarchive_version in [1,3]:
if simulationarchive_version==1:
if integrator=="leapfrog" or integrator=="saba" or integrator=="mercurius" or integrator=="sabacl4" or integrator=="saba(10,6,4)":
continue
params = {'safe_mode':safe_mode,
'integrator':integrator,
'G':G,
'testparticle':testparticle,
'keep_unsynchronized':keep_unsynchronized,
'simulationarchive_version':simulationarchive_version}
test_method = create_test_sa_restart(params)
name = "test_sa_restart"
for key in params:
name += "_"+key+":"+str(params[key])
test_method.__name__ = name
setattr(TestSimulationArchiveMatrix, name,test_method)
test_method = METHOD_NAME(params)
name = "test_sa_synchronize"
for key in params:
name += "_"+key+":"+str(params[key])
test_method.__name__ = name
setattr(TestSimulationArchiveMatrix, name,test_method)
if __name__ == "__main__":
unittest.main() |
5,105 | test zero eps | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import unittest
import torch
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion
import tests.utils as test_utils
class TestLabelSmoothing(unittest.TestCase):
def setUp(self):
# build dictionary
self.d = test_utils.dummy_dictionary(3)
vocab = len(self.d)
self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens
self.assertEqual(self.d.pad(), 1)
self.assertEqual(self.d.eos(), 2)
self.assertEqual(self.d.unk(), 3)
pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841
# build dataset
self.data = [
# the first batch item has padding
{'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, eos])},
{'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, w1, eos])},
]
self.sample = next(test_utils.dummy_dataloader(self.data))
# build model
self.args = argparse.Namespace()
self.args.sentence_avg = False
self.args.probs = torch.FloatTensor([
# pad eos unk w1 w2 w3
[0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05],
[0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10],
[0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15],
]).unsqueeze(0).expand(2, 3, 7) # add batch dimension
self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d)
self.model = self.task.build_model(self.args)
def test_nll_loss(self):
self.args.label_smoothing = 0.1
nll_crit = CrossEntropyCriterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample)
self.assertLess(abs(nll_loss - nll_logging_output['loss']), 1e-6)
self.assertLess(abs(nll_loss - smooth_logging_output['nll_loss']), 1e-6)
def test_padding(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample)
def get_one_no_padding(idx):
# create a new sample with just a single batch item so that there's
# no padding
sample1 = next(test_utils.dummy_dataloader([self.data[idx]]))
args1 = copy.copy(self.args)
args1.probs = args1.probs[idx, :, :].unsqueeze(0)
model1 = self.task.build_model(args1)
loss1, _, _ = crit(model1, sample1)
return loss1
loss1 = get_one_no_padding(0)
loss2 = get_one_no_padding(1)
self.assertAlmostEqual(loss, loss1 + loss2)
def test_reduction(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample, reduce=True)
unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False)
self.assertAlmostEqual(loss, unreduced_loss.sum())
def METHOD_NAME(self):
self.args.label_smoothing = 0.0
nll_crit = CrossEntropyCriterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample)
self.assertAlmostEqual(nll_loss, smooth_loss)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == '__main__':
unittest.main() |
5,106 | location | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetDataManagerResult',
'AwaitableGetDataManagerResult',
'get_data_manager',
'get_data_manager_output',
]
@pulumi.output_type
class GetDataManagerResult:
"""
The DataManager resource.
"""
def __init__(__self__, etag=None, id=None, METHOD_NAME=None, name=None, sku=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the Resource.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The location of the resource. This will be one of the supported and registered Azure Geo Regions (e.g. West US, East
US, Southeast Asia, etc.). The geo region of a resource cannot be changed once it is created, but if an identical geo
region is specified on update the request will succeed.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku type.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The list of key value pairs that describe the resource. These tags can be used in viewing and grouping this resource
(across resource groups).
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetDataManagerResult(GetDataManagerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDataManagerResult(
etag=self.etag,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_data_manager(data_manager_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDataManagerResult:
"""
Gets information about the specified data manager resource.
Azure REST API version: 2019-06-01.
:param str data_manager_name: The name of the DataManager Resource within the specified resource group. DataManager names must be between 3 and 24 characters in length and use any alphanumeric and underscore only
:param str resource_group_name: The Resource Group Name
"""
__args__ = dict()
__args__['dataManagerName'] = data_manager_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:hybriddata:getDataManager', __args__, opts=opts, typ=GetDataManagerResult).value
return AwaitableGetDataManagerResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
sku=pulumi.get(__ret__, 'sku'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_data_manager)
def get_data_manager_output(data_manager_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDataManagerResult]:
"""
Gets information about the specified data manager resource.
Azure REST API version: 2019-06-01.
:param str data_manager_name: The name of the DataManager Resource within the specified resource group. DataManager names must be between 3 and 24 characters in length and use any alphanumeric and underscore only
:param str resource_group_name: The Resource Group Name
"""
... |
5,107 | boost build testing support timing rule with | #!/usr/bin/env python3
# Copyright 2005 David Abrahams
# Copyright 2008, 2012 Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.bfgroup.xyz/b2/LICENSE.txt)
# Tests the build step timing facilities.
# TODO: Missing tests:
# 1. 'time' target with a source target representing more than one virtual
# target. This happens in practice, e.g. when using the time rule on a msvc
# exe target whose generator actually constructs an EXE and a PDB target.
# When this is done - only the main virtual target's constructing action
# should be timed.
# 2. 'time' target with a source target representing a virtual target that
# actually gets built by multiple actions run in sequence. In that case a
# separate timing result should be reported for each of those actions. This
# happens in practice, e.g. when using the time rule on a msvc exe target
# which first gets created as a result of some link action and then its
# manifest gets embedded inside it as a resource using a separate action
# (assuming an appropriate property has been set for this target - see the
# msvc module for details).
import BoostBuild
import re
###############################################################################
#
# basic_jam_action_test()
# -----------------------
#
###############################################################################
def basic_jam_action_test():
"""Tests basic Jam action timing support."""
t = BoostBuild.Tester(pass_toolset=0)
t.write("file.jam", """\
rule time
{
DEPENDS $(<) : $(>) ;
__TIMING_RULE__ on $(>) = record_time $(<) ;
DEPENDS all : $(<) ;
}
actions time
{
echo $(>) user: $(__USER_TIME__) system: $(__SYSTEM_TIME__) clock: $(__CLOCK_TIME__)
echo timed from $(>) >> $(<)
}
rule record_time ( target : source : start end user system clock )
{
__USER_TIME__ on $(target) = $(user) ;
__SYSTEM_TIME__ on $(target) = $(system) ;
__CLOCK_TIME__ on $(target) = $(clock) ;
}
rule make
{
DEPENDS $(<) : $(>) ;
}
actions make
{
echo made from $(>) >> $(<)
}
time foo : bar ;
make bar : baz ;
""")
t.write("baz", "nothing")
expected_output = """\
\.\.\.found 4 targets\.\.\.
\.\.\.updating 2 targets\.\.\.
make bar
time foo
bar +user: [0-9\.]+ +system: +[0-9\.]+ +clock: +[0-9\.]+ *
\.\.\.updated 2 targets\.\.\.$
"""
t.run_build_system(["-ffile.jam", "-d+1"], stdout=expected_output,
match=lambda actual, expected: re.search(expected, actual, re.DOTALL))
t.expect_addition("foo")
t.expect_addition("bar")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# boost_build_testing_support_timing_rule():
# ------------------------------------------
#
###############################################################################
def boost_build_testing_support_timing_rule():
"""
Tests the target build timing rule provided by the Boost Build testing
support system.
"""
t = BoostBuild.Tester(use_test_config=False)
t.write("aaa.cpp", "int main() {}\n")
t.write("jamroot.jam", """\
import testing ;
exe my-exe : aaa.cpp ;
time my-time : my-exe ;
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug*/aaa.obj")
t.expect_addition("bin/$toolset/debug*/my-exe.exe")
t.expect_addition("bin/$toolset/debug*/my-time.time")
t.expect_content_lines("bin/$toolset/debug*/my-time.time",
"user: *[0-9] seconds")
t.expect_content_lines("bin/$toolset/debug*/my-time.time",
"system: *[0-9] seconds")
t.expect_content_lines("bin/$toolset/debug*/my-time.time",
"clock: *[0-9] seconds")
t.cleanup()
###############################################################################
#
# boost_build_testing_support_timing_rule_with_spaces_in_names()
# --------------------------------------------------------------
#
###############################################################################
def METHOD_NAME():
"""
Tests the target build timing rule provided by the Boost Build testing
support system when used with targets contining spaces in their names.
"""
t = BoostBuild.Tester(use_test_config=False)
t.write("aaa bbb.cpp", "int main() {}\n")
t.write("jamroot.jam", """\
import testing ;
exe "my exe" : "aaa bbb.cpp" ;
time "my time" : "my exe" ;
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug*/aaa bbb.obj")
t.expect_addition("bin/$toolset/debug*/my exe.exe")
t.expect_addition("bin/$toolset/debug*/my time.time")
t.expect_content_lines("bin/$toolset/debug*/my time.time", "user: *")
t.expect_content_lines("bin/$toolset/debug*/my time.time", "system: *")
t.cleanup()
###############################################################################
#
# main()
# ------
#
###############################################################################
basic_jam_action_test()
boost_build_testing_support_timing_rule()
METHOD_NAME( |
5,108 | get metadata | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021-2023 CERN.
# Copyright (C) 2021 data-futures.
# Copyright (C) 2022 Universität Hamburg.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""IIIF Presentation API Schema for Invenio RDM Records."""
from flask import current_app
from flask_babel import lazy_gettext as _
from marshmallow import Schema, fields, missing, post_dump
class SelfList(fields.List):
"""Pass parent to the nested key.
https://github.com/marshmallow-code/marshmallow/issues/940
# TODO: move to marshmallow-utils
"""
def get_value(self, obj, attr, accessor=None, default=missing):
"""Return the value for a given key from an object attribute."""
return [obj]
class SelfNested(fields.Nested):
"""Pass parent to the nested key.
https://github.com/marshmallow-code/marshmallow/issues/940
# TODO: move to marshmallow-utils
"""
def get_value(self, obj, attr, accessor=None, default=missing):
"""Return the value for a given key from an object attribute."""
return obj
class IIIFInfoV2Schema(Schema):
"""IIIF info response schema."""
class Meta:
"""Marshmallow meta class."""
include = {
"@context": fields.Constant("http://iiif.io/api/image/2/context.json"),
"@id": fields.String(attribute="links.iiif_base"),
}
protocol = fields.Constant("http://iiif.io/api/image")
profile = fields.Constant(["http://iiif.io/api/image/2/level2.json"])
tiles = fields.Constant([{"width": 256, "scaleFactors": [1, 2, 4, 8, 16, 32, 64]}])
width = fields.Integer(attribute="metadata.width")
height = fields.Integer(attribute="metadata.height")
class IIIFImageServiceV2Schema(Schema):
"""IIIF image service."""
class Meta:
"""Marshmallow meta class."""
include = {
"@context": fields.Constant("http://iiif.io/api/image/2/context.json"),
"@id": fields.String(attribute="links.iiif_base"),
"profile": fields.Constant("http://iiif.io/api/image/2/level1.json"),
}
class IIIFImageResourceV2Schema(Schema):
"""IIIF image resource schema."""
class Meta:
"""Marshmallow meta class."""
include = {
"@id": fields.String(attribute="links.iiif_api"),
"@type": fields.Constant("dctypes:Image"),
}
format = fields.String(attribute="mimetype")
width = fields.Integer(attribute="metadata.width")
height = fields.Integer(attribute="metadata.height")
service = SelfNested(IIIFImageServiceV2Schema)
class IIIFImageV2Schema(Schema):
"""IIIF image resource schema."""
class Meta:
"""Marshmallow meta class."""
include = {
"@context": fields.Constant(
"http://iiif.io/api/presentation/2/context.json"
),
"@id": fields.String(attribute="links.iiif_annotation"),
"@type": fields.Constant("oa:Annotation"),
}
motivation = fields.Constant("sc:painting")
resource = SelfNested(IIIFImageResourceV2Schema)
on = fields.String(attribute="links.iiif_canvas")
class IIIFCanvasV2Schema(Schema):
"""IIIF canvas schema."""
class Meta:
"""Marshmallow meta class."""
include = {
"@context": fields.Constant(
"http://iiif.io/api/presentation/2/context.json"
),
"@id": fields.String(attribute="links.iiif_canvas"),
"@type": fields.Constant("sc:Canvas"),
}
label = fields.String(attribute="key")
height = fields.Integer(attribute="metadata.height")
width = fields.Integer(attribute="metadata.width")
images = SelfList(SelfNested(IIIFImageV2Schema))
class ListIIIFFilesAttribute(fields.List):
"""Similar to ``NestedAttribute`` but for lists."""
def get_value(self, obj, *args, **kwargs):
"""Return the value for a given key from an object attribute."""
return [
f
for f in obj["files"].get("entries", {}).values()
if f["ext"] in current_app.config["IIIF_FORMATS"]
]
class IIIFSequenceV2Schema(Schema):
"""IIIF sequence schema."""
class Meta:
"""Marshmallow meta class."""
include = {
"@id": fields.String(attribute="links.self_iiif_sequence"),
"@type": fields.Constant("sc:Sequence"),
}
label = fields.Constant(_("Current Page Order"))
viewingDirection = fields.Constant("left-to-right")
viewingHint = fields.Constant("paged")
canvases = ListIIIFFilesAttribute(
fields.Nested(IIIFCanvasV2Schema), attribute="files.entries"
)
class IIIFManifestV2Schema(Schema):
"""IIIF manifest schema."""
class Meta:
"""Marshmallow meta class."""
include = {
"@context": fields.Constant(
"http://iiif.io/api/presentation/2/context.json"
),
"@type": fields.Constant("sc:Manifest"),
"@id": fields.String(attribute="links.self_iiif_manifest"),
}
label = fields.String(attribute="metadata.title")
metadata = fields.Method("get_metadata")
description = fields.String(
attribute="metadata.description",
default=_("Manifest generated by InvenioRDM"),
)
license = fields.Method("get_license")
sequences = SelfList(SelfNested(IIIFSequenceV2Schema))
def get_license(self, obj):
"""Create the license."""
# FIXME: only supports one license
try:
return obj["metadata"]["rights"][0]["link"]
except (AttributeError, KeyError):
return missing
def METHOD_NAME(self, obj):
"""Generate metadata entries."""
# TODO: add creator
return [
{
"label": _("Publication Date"),
"value": obj["metadata"]["publication_date"],
}
]
@post_dump
def sortcanvases(self, manifest, many, **kwargs):
"""Sort files by key.
TODO: should sorting be done elsewhere?
"""
# manifest["sequences"][0]["canvases"].sort(key=lambda x: x["@id"])
return manifest |
5,109 | streams | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import logging
from abc import ABC
from typing import Any, Iterable, List, Mapping, Optional, Tuple, Union
import requests
from airbyte_cdk.models import (
AirbyteStream,
ConfiguredAirbyteCatalog,
ConfiguredAirbyteStream,
ConnectorSpecification,
DestinationSyncMode,
SyncMode,
)
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.METHOD_NAME import Stream
from airbyte_cdk.sources.METHOD_NAME.http import HttpStream
from airbyte_cdk.sources.METHOD_NAME.http.requests_native_auth import Oauth2Authenticator
from requests.auth import AuthBase
class SourceTestFixture(AbstractSource):
"""
This is a concrete implementation of a Source connector that provides implementations of all the methods needed to run sync
operations. For simplicity, it also overrides functions that read from files in favor of returning the data directly avoiding
the need to load static files (ex. spec.yaml, config.json, configured_catalog.json) into the unit-test package.
"""
def __init__(self, METHOD_NAME: Optional[List[Stream]] = None, authenticator: Optional[AuthBase] = None):
self._streams = METHOD_NAME
self._authenticator = authenticator
def spec(self, logger: logging.Logger) -> ConnectorSpecification:
return ConnectorSpecification(connectionSpecification={
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Test Fixture Spec",
"type": "object",
"required": ["api_token"],
"properties": {
"api_token": {
"type": "string",
"title": "API token",
"description": "The token used to authenticate requests to the API.",
"airbyte_secret": True
}
}
})
def read_config(self, config_path: str) -> Mapping[str, Any]:
return {
"api_token": "just_some_token"
}
@classmethod
def read_catalog(cls, catalog_path: str) -> ConfiguredAirbyteCatalog:
return ConfiguredAirbyteCatalog(METHOD_NAME=[
ConfiguredAirbyteStream(
stream=AirbyteStream(
name="http_test_stream",
json_schema={},
supported_sync_modes=[SyncMode.full_refresh, SyncMode.incremental],
default_cursor_field=["updated_at"],
source_defined_cursor=True,
source_defined_primary_key=[["id"]]
),
sync_mode=SyncMode.full_refresh,
destination_sync_mode=DestinationSyncMode.overwrite,
)
])
def check_connection(self, *args, **kwargs) -> Tuple[bool, Optional[Any]]:
return True, ""
def METHOD_NAME(self, *args, **kwargs) -> List[Stream]:
return [HttpTestStream(authenticator=self._authenticator)]
class HttpTestStream(HttpStream, ABC):
url_base = "https://airbyte.com/api/v1/"
def supports_incremental(self):
return True
@property
def availability_strategy(self):
return None
def primary_key(self) -> Optional[Union[str, List[str], List[List[str]]]]:
return "id"
def path(
self,
*,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> str:
return "cast"
def parse_response(
self,
response: requests.Response,
*,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
body = response.json() or {}
return body["records"]
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def get_json_schema(self) -> Mapping[str, Any]:
return {}
def fixture_mock_send(self, request, **kwargs) -> requests.Response:
"""
Helper method that can be used by a test to patch the Session.send() function and mock the outbound send operation to provide
faster and more reliable responses compared to actual API requests
"""
response = requests.Response()
response.request = request
response.status_code = 200
response.headers = {"header": "value"}
response_body = {"records": [
{"id": 1, "name": "Celine Song", "position": "director"},
{"id": 2, "name": "Shabier Kirchner", "position": "cinematographer"},
{"id": 3, "name": "Christopher Bear", "position": "composer"},
{"id": 4, "name": "Daniel Rossen", "position": "composer"}
]}
response._content = json.dumps(response_body).encode("utf-8")
return response
class SourceFixtureOauthAuthenticator(Oauth2Authenticator):
"""
Test OAuth authenticator that only overrides the request and response aspect of the authenticator flow
"""
def refresh_access_token(self) -> Tuple[str, int]:
response = requests.request(method="POST", url=self.get_token_refresh_endpoint(), params={})
response.raise_for_status()
return "some_access_token", 1800 # Mock oauth response values to be used during the data retrieval step |
5,110 | test inference list subscript | from typing import List
from astroid import nodes
from hypothesis import HealthCheck, given, settings
from pytest import skip
from python_ta.transforms.type_inference_visitor import NoType
from python_ta.typecheck.base import TypeFail, TypeFailFunction
from .. import custom_hypothesis_support as cs
settings.load_profile("pyta")
@given(cs.subscript_node(cs.simple_homogeneous_list_node(min_size=1)))
@settings(suppress_health_check=[HealthCheck.too_slow])
def METHOD_NAME(node):
"""Test whether visitor properly set the type constraint of Subscript node representing list-index access."""
module, _ = cs._parse_text(node)
for subscript_node in module.nodes_of_class(nodes.Subscript):
list_node = subscript_node.value
assert subscript_node.inf_type.getValue() == list_node.elts[0].inf_type.getValue()
@given(cs.subscript_node(cs.simple_homogeneous_list_node(min_size=1), cs.slice_node()))
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_subscript_homogeneous_list_slice(node):
"""Test visitor of Subscript node representing slicing of homogeneous list."""
module, _ = cs._parse_text(node)
for subscript_node in module.nodes_of_class(nodes.Subscript):
list_node = subscript_node.value
assert subscript_node.inf_type.getValue() == List[list_node.elts[0].inf_type.getValue()]
@given(cs.subscript_node(cs.simple_homogeneous_list_node(min_size=1), cs.slice_node()))
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_subscript_load_ctx(node):
"""Test visitor of Subscript node when loaded in an (if) expression."""
load_node = nodes.If()
load_node.postinit(nodes.Const(True), [node], [])
module, _ = cs._parse_text(load_node)
for subscript_node in module.nodes_of_class(nodes.Subscript):
list_node = subscript_node.value
assert subscript_node.inf_type.getValue() == List[list_node.elts[0].inf_type.getValue()]
def test_homogenous_list_store_ctx():
"""Test visitor of Subscript node within a homogenous list assignment."""
program = """
[1,2,3][0] = 2
"""
module, _ = cs._parse_text(program)
for assign_node in module.nodes_of_class(nodes.Assign):
assert assign_node.inf_type == NoType()
for subscript_node in module.nodes_of_class(nodes.Subscript):
assert subscript_node.inf_type == NoType()
def test_homogenous_list_invalid_store_ctx():
"""Test visitor of Subscript node within an invalid homogenous list assignment."""
program = """
[1,2,3][0] = 'a'
"""
module, _ = cs._parse_text(program)
for assign_node in module.nodes_of_class(nodes.Assign):
assert isinstance(assign_node.inf_type, TypeFail)
for subscript_node in module.nodes_of_class(nodes.Subscript):
assert subscript_node.inf_type == NoType()
@given(cs.subscript_node(cs.list_node(min_size=1), cs.slice_node()))
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_subscript_del_ctx(node):
"""Test visitor of Subscript node within a del statement."""
del_node = nodes.Delete()
del_node.postinit([node])
module, _ = cs._parse_text(del_node)
for subscript_node in module.nodes_of_class(nodes.Subscript):
assert subscript_node.inf_type.getValue() == type(None)
@given(cs.simple_homogeneous_dict_node(min_size=1))
def test_inference_dict_subscript(node):
"""Note that this test only takes in a dictionary because the subscript index
must be the same type as the dictionary's keys in order to type check.
"""
for key, _ in node.items:
new_node = nodes.Subscript()
new_node.postinit(node, key)
module, _ = cs._parse_text(new_node)
for subscript_node in module.nodes_of_class(nodes.Subscript):
dict_node = subscript_node.value
assert (
subscript_node.inf_type.getValue()
== list(dict_node.items)[0][1].inf_type.getValue()
)
@given(cs.simple_homogeneous_list_node(min_size=1))
def test_inference_invalid_slice(node):
sub_node = nodes.Subscript()
slice = nodes.Slice()
slice.postinit(nodes.Const(0), nodes.Const("a"))
sub_node.postinit(node, slice)
module, _ = cs._parse_text(sub_node)
for subscript_node in module.nodes_of_class(nodes.Subscript):
assert isinstance(subscript_node.inf_type, TypeFail)
def test_inference_ext_slice():
skip("Lookup for class methods must be implemeneted before this test can pass")
program = """
class Foo:
def __getitem__(self, tup):
return tup[0]
foo = Foo()
foo[1, 'a']
"""
module, _ = cs._parse_text(program)
subscript_node = list(module.nodes_of_class(nodes.Subscript))[1]
assert subscript_node.inf_type.getValue() == int
def test_subscript_slice():
program = """
x = List[:]
"""
module, _ = cs._parse_text(program)
assign_node = next(module.nodes_of_class(nodes.Assign))
assert isinstance(assign_node.inf_type, TypeFail)
# TODO: this test needs to be converted, but will also fail
# @given(cs.random_list(min_size=2), cs.random_slice_indices())
# def test_subscript_heterogeneous_list_slice(input_list, slice):
# """Test visitor of Subscript node representing slicing of heterogeneous list."""
# assume(not isinstance(input_list[0], type(input_list[1])))
# input_slice = ':'.join([str(index) if index else '' for index in slice])
# program = f'{input_list}[{input_slice}]'
# module, _ = cs._parse_text(program)
# subscript_node = list(module.nodes_of_class(nodes.Subscript))[0]
# assert subscript_node.inf_type.getValue() == List[Any] |
5,111 | do convert | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
import json
import numpy as np
from utils import set_seed, convert_ext_examples
def METHOD_NAME():
set_seed(args.seed)
tic_time = time.time()
if not os.path.exists(args.input_file):
raise ValueError("Please input the correct path of doccano file.")
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if len(args.splits) != 0 and len(args.splits) != 3:
raise ValueError("Only []/ len(splits)==3 accepted for splits.")
if args.splits and sum(args.splits) != 1:
raise ValueError("Please set correct splits, sum of elements in splits should be equal to 1.")
with open(args.input_file, "r", encoding="utf-8") as f:
raw_examples = f.readlines()
def _create_ext_examples(examples, negative_ratio=0, shuffle=False):
entities, relations = convert_ext_examples(examples, negative_ratio)
examples = [e + r for e, r in zip(entities, relations)]
if shuffle:
indexes = np.random.permutation(len(examples))
examples = [examples[i] for i in indexes]
return examples
def _save_examples(save_dir, file_name, examples):
count = 0
save_path = os.path.join(save_dir, file_name)
with open(save_path, "w", encoding="utf-8") as f:
for example in examples:
for x in example:
f.write(json.dumps(x, ensure_ascii=False) + "\n")
count += 1
print("\nSave %d examples to %s." % (count, save_path))
if len(args.splits) == 0:
examples = _create_ext_examples(raw_examples, args.negative_ratio, args.is_shuffle)
_save_examples(args.save_dir, "train.txt", examples)
else:
if args.is_shuffle:
indexes = np.random.permutation(len(raw_examples))
raw_examples = [raw_examples[i] for i in indexes]
i1, i2, _ = args.splits
p1 = int(len(raw_examples) * i1)
p2 = int(len(raw_examples) * (i1 + i2))
train_examples = _create_ext_examples(raw_examples[:p1], args.negative_ratio, args.is_shuffle)
dev_examples = _create_ext_examples(raw_examples[p1:p2])
test_examples = _create_ext_examples(raw_examples[p2:])
_save_examples(args.save_dir, "train.txt", train_examples)
_save_examples(args.save_dir, "dev.txt", dev_examples)
_save_examples(args.save_dir, "test.txt", test_examples)
print("Finished! It takes %.2f seconds" % (time.time() - tic_time))
if __name__ == "__main__":
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", default="./data/data.json", type=str, help="The data file exported from doccano platform.")
parser.add_argument("--save_dir", default="./data", type=str, help="The path to save processed data.")
parser.add_argument("--negative_ratio", default=5, type=int, help="Used only for the classification task, the ratio of positive and negative samples, number of negtive samples = negative_ratio * number of positive samples")
parser.add_argument("--splits", default=[0.8, 0.1, 0.1], type=float, nargs="*", help="The ratio of samples in datasets. [0.6, 0.2, 0.2] means 60% samples used for training, 20% for evaluation and 20% for test.")
parser.add_argument("--is_shuffle", default=True, type=bool, help="Whether to shuffle the labeled dataset, defaults to True.")
parser.add_argument("--seed", type=int, default=1000, help="random seed for initialization")
args = parser.parse_args()
# yapf: enable
METHOD_NAME() |
5,112 | refine copy | # -----------------------------------------------------------------------------
# BSD 3-Clause License
#
# Copyright (c) 2021-2022, Science and Technology Facilities Council.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
# Authors: A. R. Porter and S. Siso, STFC Daresbury Lab
# -----------------------------------------------------------------------------
''' This module contains the ScopingNode implementation.'''
from psyclone.psyir.nodes.node import Node
from psyclone.psyir.nodes.reference import Reference
from psyclone.psyir.symbols import SymbolTable
class ScopingNode(Node):
''' Abstract node that has an associated Symbol Table to keep track of
symbols declared in its scope (symbols that can be accessed by this node
and any of its descendants). If a pre-existing symbol table is provided,
it will be attached to the node (raising an error if the symbol table
is already attached to another scope), otherwise a new empty Symbol Table
will be created.
:param children: the PSyIR nodes that are children of this node.
:type children: List[:py:class:`psyclone.psyir.nodes.Node`]
:param parent: the parent node of this node in the PSyIR.
:type parent: Optional[:py:class:`psyclone.psyir.nodes.Node`]
:param symbol_table: attach the given symbol table to the new node.
:type symbol_table: \
Optional[:py:class:`psyclone.psyir.symbols.SymbolTable`]
'''
# Polymorphic parameter to initialize the Symbol Table of the ScopingNode
_symbol_table_class = SymbolTable
def __init__(self, children=None, parent=None, symbol_table=None):
super(ScopingNode, self).__init__(self, children=children,
parent=parent)
self._symbol_table = None
if symbol_table is not None:
# Attach the provided symbol table to this scope
symbol_table.attach(self)
else:
# Create a new symbol table attached to this scope
self._symbol_table = self._symbol_table_class(self)
def __eq__(self, other):
'''
Checks whether two nodes are equal. Scoping nodes are equal if their
symbol tables are equal.
:param object other: the object to check equality to.
:returns: whether other is equal to self.
:rtype: bool
'''
is_eq = super().__eq__(other)
is_eq = is_eq and self.symbol_table == other.symbol_table
return is_eq
def METHOD_NAME(self, other):
''' Refine the object attributes when a shallow copy is not the most
appropriate operation during a call to the copy() method.
:param other: object we are copying from.
:type other: :py:class:`psyclone.psyir.node.Node`
'''
super(ScopingNode, self).METHOD_NAME(other)
self._symbol_table = other.symbol_table.deep_copy()
# pylint: disable=protected-access
self._symbol_table._node = self # Associate to self
# Update of children references to point to the equivalent symbols in
# the new symbol table attached to self.
# TODO #1377 Unfortunately Loop nodes currently store the associated
# loop variable in a `_variable` property rather than as a child so we
# must handle those separately. Also, in the LFRic API a Loop does not
# initially have the `_variable` property set which means that calling
# the `variable` getter causes an error (because it checks the
# internal-consistency of the Loop node). We therefore have to check
# the value of the 'private' `_variable` for now.
# We have to import Loop here to avoid a circular dependency.
# pylint: disable=import-outside-toplevel
from psyclone.psyir.nodes.loop import Loop
for node in self.walk((Reference, Loop)):
if isinstance(node, Reference):
if node.symbol in other.symbol_table.symbols:
node.symbol = self.symbol_table.lookup(node.symbol.name)
if isinstance(node, Loop) and node._variable:
if node.variable in other.symbol_table.symbols:
node.variable = self.symbol_table.lookup(
node.variable.name)
@property
def symbol_table(self):
'''
:returns: table containing symbol information for this scope.
:rtype: :py:class:`psyclone.psyGen.SymbolTable`
'''
return self._symbol_table
# For AutoAPI documentation generation
__all__ = ['ScopingNode'] |
5,113 | create typed dict doc | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os.path
from robot.running import ArgInfo
from robot.errors import DataError
from .datatypes import EnumMember, TypedDictItem, TypeDoc
from .model import LibraryDoc, KeywordDoc
class JsonDocBuilder:
def build(self, path):
spec = self._parse_spec_json(path)
return self.build_from_dict(spec)
def build_from_dict(self, spec):
libdoc = LibraryDoc(name=spec['name'],
doc=spec['doc'],
version=spec['version'],
type=spec['type'],
scope=spec['scope'],
doc_format=spec['docFormat'],
source=spec['source'],
lineno=int(spec.get('lineno', -1)))
libdoc.inits = [self._create_keyword(kw) for kw in spec['inits']]
libdoc.keywords = [self._create_keyword(kw) for kw in spec['keywords']]
# RF >= 5 have 'typedocs', RF >= 4 have 'dataTypes', older/custom may have neither.
if 'typedocs' in spec:
libdoc.type_docs = self._parse_type_docs(spec['typedocs'])
elif 'dataTypes' in spec:
libdoc.type_docs = self._parse_data_types(spec['dataTypes'])
return libdoc
def _parse_spec_json(self, path):
if not os.path.isfile(path):
raise DataError(f"Spec file '{path}' does not exist.")
with open(path) as json_source:
libdoc_dict = json.load(json_source)
return libdoc_dict
def _create_keyword(self, data):
kw = KeywordDoc(name=data.get('name'),
doc=data['doc'],
shortdoc=data['shortdoc'],
tags=data['tags'],
private=data.get('private', False),
deprecated=data.get('deprecated', False),
source=data['source'],
lineno=int(data.get('lineno', -1)))
self._create_arguments(data['args'], kw)
return kw
def _create_arguments(self, arguments, kw: KeywordDoc):
spec = kw.args
setters = {
ArgInfo.POSITIONAL_ONLY: spec.positional_only.append,
ArgInfo.POSITIONAL_ONLY_MARKER: lambda value: None,
ArgInfo.POSITIONAL_OR_NAMED: spec.positional_or_named.append,
ArgInfo.VAR_POSITIONAL: lambda value: setattr(spec, 'var_positional', value),
ArgInfo.NAMED_ONLY_MARKER: lambda value: None,
ArgInfo.NAMED_ONLY: spec.named_only.append,
ArgInfo.VAR_NAMED: lambda value: setattr(spec, 'var_named', value),
}
for arg in arguments:
name = arg['name']
setters[arg['kind']](name)
default = arg.get('defaultValue')
if default is not None:
spec.defaults[name] = default
if arg.get('type'):
type_docs = {}
type_info = self._parse_modern_type_info(arg['type'], type_docs)
else: # Compatibility with RF < 6.1.
type_docs = arg.get('typedocs', {})
type_info = tuple(arg['types'])
if type_info:
if not spec.types:
spec.types = {}
spec.types[name] = type_info
kw.type_docs[name] = type_docs
def _parse_modern_type_info(self, data, type_docs):
if data.get('typedoc'):
type_docs[data['name']] = data['typedoc']
return {'name': data['name'],
'nested': [self._parse_modern_type_info(nested, type_docs)
for nested in data.get('nested', ())]}
def _parse_type_docs(self, type_docs):
for data in type_docs:
doc = TypeDoc(data['type'], data['name'], data['doc'], data['accepts'],
data['usages'])
if doc.type == TypeDoc.ENUM:
doc.members = [EnumMember(d['name'], d['value'])
for d in data['members']]
if doc.type == TypeDoc.TYPED_DICT:
doc.items = [TypedDictItem(d['key'], d['type'], d['required'])
for d in data['items']]
yield doc
# Code below used for parsing legacy 'dataTypes'.
def _parse_data_types(self, data_types):
for obj in data_types['enums']:
yield self._create_enum_doc(obj)
for obj in data_types['typedDicts']:
yield self.METHOD_NAME(obj)
def _create_enum_doc(self, data):
return TypeDoc(TypeDoc.ENUM, data['name'], data['doc'],
members=[EnumMember(member['name'], member['value'])
for member in data['members']])
def METHOD_NAME(self, data):
return TypeDoc(TypeDoc.TYPED_DICT, data['name'], data['doc'],
items=[TypedDictItem(item['key'], item['type'], item['required'])
for item in data['items']]) |
5,114 | register all modules | # Copyright (c) OpenMMLab. All rights reserved.
import datetime
import logging
import os
import platform
import warnings
import cv2
import torch.multiprocessing as mp
from mmengine import DefaultScope
from mmengine.logging import print_log
from mmengine.utils import digit_version
def setup_cache_size_limit_of_dynamo():
"""Setup cache size limit of dynamo.
Note: Due to the dynamic shape of the loss calculation and
post-processing parts in the object detection algorithm, these
functions must be compiled every time they are run.
Setting a large value for torch._dynamo.config.cache_size_limit
may result in repeated compilation, which can slow down training
and testing speed. Therefore, we need to set the default value of
cache_size_limit smaller. An empirical value is 4.
"""
import torch
if digit_version(torch.__version__) >= digit_version('2.0.0'):
if 'DYNAMO_CACHE_SIZE_LIMIT' in os.environ:
import torch._dynamo
cache_size_limit = int(os.environ['DYNAMO_CACHE_SIZE_LIMIT'])
torch._dynamo.config.cache_size_limit = cache_size_limit
print_log(
f'torch._dynamo.config.cache_size_limit is force '
f'set to {cache_size_limit}.',
logger='current',
level=logging.WARNING)
def setup_multi_processes(cfg):
"""Setup multi-processing environment variables."""
# set multi-process start method as `fork` to speed up the training
if platform.system() != 'Windows':
mp_start_method = cfg.get('mp_start_method', 'fork')
current_method = mp.get_start_method(allow_none=True)
if current_method is not None and current_method != mp_start_method:
warnings.warn(
f'Multi-processing start method `{mp_start_method}` is '
f'different from the previous setting `{current_method}`.'
f'It will be force set to `{mp_start_method}`. You can change '
f'this behavior by changing `mp_start_method` in your config.')
mp.set_start_method(mp_start_method, force=True)
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = cfg.get('opencv_num_threads', 0)
cv2.setNumThreads(opencv_num_threads)
# setup OMP threads
# This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa
workers_per_gpu = cfg.data.get('workers_per_gpu', 1)
if 'train_dataloader' in cfg.data:
workers_per_gpu = \
max(cfg.data.train_dataloader.get('workers_per_gpu', 1),
workers_per_gpu)
if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1:
omp_num_threads = 1
warnings.warn(
f'Setting OMP_NUM_THREADS environment variable for each process '
f'to be {omp_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
# setup MKL threads
if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1:
mkl_num_threads = 1
warnings.warn(
f'Setting MKL_NUM_THREADS environment variable for each process '
f'to be {mkl_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
def METHOD_NAME(init_default_scope: bool = True) -> None:
"""Register all modules in mmdet into the registries.
Args:
init_default_scope (bool): Whether initialize the mmdet default scope.
When `init_default_scope=True`, the global default scope will be
set to `mmdet`, and all registries will build modules from mmdet's
registry node. To understand more about the registry, please refer
to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md
Defaults to True.
""" # noqa
import mmdet.datasets # noqa: F401,F403
import mmdet.engine # noqa: F401,F403
import mmdet.evaluation # noqa: F401,F403
import mmdet.models # noqa: F401,F403
import mmdet.visualization # noqa: F401,F403
if init_default_scope:
never_created = DefaultScope.get_current_instance() is None \
or not DefaultScope.check_instance_created('mmdet')
if never_created:
DefaultScope.get_instance('mmdet', scope_name='mmdet')
return
current_scope = DefaultScope.get_current_instance()
if current_scope.scope_name != 'mmdet':
warnings.warn('The current default scope '
f'"{current_scope.scope_name}" is not "mmdet", '
'`register_all_modules` will force the current'
'default scope to be "mmdet". If this is not '
'expected, please set `init_default_scope=False`.')
# avoid name conflict
new_instance_name = f'mmdet-{datetime.datetime.now()}'
DefaultScope.get_instance(new_instance_name, scope_name='mmdet') |
5,115 | test offset transformer wrapper | # -*- coding: utf-8 -
"""Test the created constraints against approved constraints.
This file is part of project oemof (github.com/oemof/oemof). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location oemof/tests/test_solph_network_classes.py
SPDX-License-Identifier: MIT
"""
import warnings
import pytest
from oemof.tools.debugging import SuspiciousUsageWarning
from oemof import solph
class TestConverterClass:
@classmethod
def setup_class(cls):
"""Setup default values"""
cls.bus = solph.buses.Bus()
warnings.filterwarnings("ignore", category=SuspiciousUsageWarning)
@classmethod
def teardown_class(cls):
warnings.filterwarnings("always", category=SuspiciousUsageWarning)
def test_empty_converter(self):
transf = solph.components.Converter()
assert isinstance(transf.conversion_factors, dict)
assert len(transf.conversion_factors.keys()) == 0
def test_default_conversion_factor(self):
transf = solph.components.Converter(
inputs={self.bus: solph.flows.Flow()}
)
assert transf.conversion_factors[self.bus][2] == 1
def test_sequence_conversion_factor_from_scalar(self):
transf = solph.components.Converter(
inputs={self.bus: solph.flows.Flow()},
conversion_factors={self.bus: 2},
)
assert transf.conversion_factors[self.bus][6] == 2
def test_sequence_conversion_factor_from_list_correct_length(self):
transf = solph.components.Converter(
inputs={self.bus: solph.flows.Flow()},
conversion_factors={self.bus: [2]},
)
assert len(transf.conversion_factors[self.bus]) == 1
def test_sequence_conversion_factor_from_list_wrong_length(self):
transf = solph.components.Converter(
inputs={self.bus: solph.flows.Flow()},
conversion_factors={self.bus: [2]},
)
with pytest.raises(IndexError):
self.a = transf.conversion_factors[self.bus][6]
def test_converter_missing_output_create_empty_dict(self):
trfr = solph.components.Converter(inputs={})
assert trfr.outputs == {}
def test_converter_missing_input_create_empty_dict(self):
trfr = solph.components.Converter(outputs={})
assert trfr.inputs == {}
def test_transformer_wrapper():
with pytest.warns(FutureWarning):
solph.components.Transformer()
def METHOD_NAME():
with pytest.warns(FutureWarning):
solph.components.OffsetTransformer(inputs={}, outputs={})
def test_wrong_combination_invest_and_nominal_value():
msg = "For backward compatibility, the option investment overwrites"
with pytest.raises(AttributeError, match=msg):
solph.flows.Flow(investment=solph.Investment(), nominal_value=4)
def test_fixed_costs_warning():
msg = (
"Be aware that the fixed costs attribute is only\n"
"meant to be used for multi-period models.\n"
"If you wish to set up a multi-period model, explicitly "
"set the `periods` attribute of your energy system.\n"
"It has been decided to remove the `fixed_costs` "
"attribute with v0.2 for regular uses.\n"
"If you specify `fixed_costs` for a regular model, "
"it will simply be ignored."
)
with warnings.catch_warnings(record=True) as w:
solph.flows.Flow(fixed_costs=34)
assert len(w) != 0
assert msg == str(w[-1].message)
def test_flow_with_fix_and_min_max():
msg = "It is not allowed to define `min`/`max` if `fix` is defined."
with pytest.raises(AttributeError, match=msg):
solph.flows.Flow(fix=[1, 3], min=[0, 5])
with pytest.raises(AttributeError, match=msg):
solph.flows.Flow(fix=[1, 3], max=[0, 5])
with pytest.raises(AttributeError, match=msg):
solph.flows.Flow(fix=[1, 3], max=[0, 5], min=[4, 9])
def test_infinite_values():
msg1 = "nominal_value must be a finite value"
msg2 = "max must be a finite value"
with pytest.raises(ValueError, match=msg1):
solph.flows.Flow(nominal_value=float("+inf"))
with pytest.raises(ValueError, match=msg2):
solph.flows.Flow(nominal_value=1, max=float("+inf"))
def test_attributes_needing_nominal_value_get_it():
with pytest.raises(AttributeError, match="If fix is set in a flow "):
solph.flows.Flow(fix=0.3)
with pytest.raises(AttributeError, match="If max is set in a flow "):
solph.flows.Flow(max=0.3)
with pytest.raises(AttributeError, match="If min is set in a flow "):
solph.flows.Flow(min=0.3)
with pytest.raises(
AttributeError, match="If full_load_time_max is set in a flow "
):
solph.flows.Flow(full_load_time_max=0.3)
with pytest.raises(
AttributeError, match="If full_load_time_min is set in a flow "
):
solph.flows.Flow(full_load_time_min=0.3)
def test_min_max_values_for_bidirectional_flow():
a = solph.flows.Flow(bidirectional=True) # use default values
b = solph.flows.Flow(
bidirectional=True, nominal_value=1, min=-0.8, max=0.9
)
assert a.bidirectional
assert a.max[0] == 1
assert a.min[0] == -1
assert b.bidirectional
assert b.max[0] == 0.9
assert b.min[0] == -0.8 |
5,116 | check level |
from sage.all import (
cached_function, floor, Gamma1, dimension_new_cusp_forms,
dimension_eis, dimension_cusp_forms, dimension_modular_forms)
from lmfdb.lmfdb_database import db, SQL
from .mf import MfChecker, check_analytic_conductor
from .verification import overall, slow, fast, accumulate_failures
@cached_function
def sturm_bound1(level, weight):
return floor(weight * Gamma1(level).index()/12)
class mf_gamma1(MfChecker):
table = db.mf_gamma1
label = ['level', 'weight']
uniqueness_constraints = [[table._label_col], label]
@overall
def check_box_count(self):
"""
there should be a row present for every pair (N,k) satisfying a box constraint on N,k,Nk2
"""
# TIME about 5s
def make_query(box):
query = self._box_query(box, drop=['char_order', 'dim'])
# Have to remove small N if there are restrictions on the character order
if 'omin' in box:
if box['omin'] == 2:
if 'level' not in query:
query['level'] = {}
if '$gte' not in query['level'] or query['level']['$gte'] < 3:
query['level']['$gte'] = 3
else:
raise NotImplementedError
if 'Dmin' in box:
query['newspace_dims']['$maxgte'] = box['Dmin']
if 'Dmax' in box:
query['newspace_dims']['$anylte'] = box['Dmax']
return query
return accumulate_failures(self.check_count(box['Nk_count'], make_query(box))
for box in db.mf_boxes.search())
@overall
def check_box_traces(self):
"""
check that traces is set if space is in a box with traces set and no dimension/character constraint
"""
return accumulate_failures(self.check_non_null(['traces'], self._box_query(box, drop=['char_order', 'dim']))
for box in db.mf_boxes.search({'omin':None, 'omax':None, 'Dmin':None, 'Dmax':None, 'straces':True}))
@overall
def check_dim_wt1(self):
"""
for k = 1 check that dim = dihedral_dim + a4_dim + a5_dim + s4_dim
"""
return self.check_sum(['dim'], ['dihedral_dim', 'a4_dim', 'a5_dim', 's4_dim'], {'weight': 1})
@overall
def check_trace_display(self):
"""
check that trace_display is set whenever traces is set and dim > 0
"""
return self.check_non_null(['trace_display'], {'traces':{'$exists': True}, 'dim':{'$gt': 0}})
@overall
def check_traces_len(self):
"""
if present, check that traces has length at least 1000
"""
# TIME about 5s
return self.check_array_len_gte_constant('traces', 1000, {'traces':{'$exists': True}})
@overall
def check_mf_dim(self):
"""
check that eis_dim + cusp_dim = mf_dim
"""
return self.check_sum(['eis_dim','cusp_dim'],['mf_dim'])
@overall
def check_dim(self):
"""
check that eis_new_dim + dim = mf_new_dim
"""
return self.check_sum(['eis_new_dim','dim'], ['mf_new_dim'])
@overall
def check_Nk2(self):
"""
check that Nk2 = N*k*k
"""
return self.check_product('Nk2', ['level', 'weight', 'weight'])
@overall
def weight_parity_even(self):
"""
check weight_parity
"""
return self.check_divisible('weight', 2, {'weight_parity':1})
@overall
def weight_parity_odd(self):
"""
check weight_parity
"""
return self.check_non_divisible('weight', 2, {'weight_parity':-1})
@overall
def check_newspaces_numforms(self):
"""
if num_forms is set verify that it is equal to the sum of num_forms over newspaces with matching level and weight
"""
# TIME about 2s
return self.check_crosstable_sum('mf_newspaces', 'num_forms', ['level', 'weight'])
@overall
def check_newspaces_hecke_orbit_dims(self):
"""
if hecke_orbit_dims is set, verify that it is equal to the (sorted) concatenation of dim over newspaces with matching level and weight
"""
# TIME about 10s
return self.check_crosstable_aggregate('mf_newforms', 'hecke_orbit_dims', ['level', 'weight'], 'dim', sort=['char_orbit_index', 'hecke_orbit'])
@overall
def check_newspaces_newspace_dims(self):
"""
check that newspace_dims is equal to the (sorted) concatenation of dim over newspaces with this level and weight
"""
# TIME about 5s
return self.check_crosstable_aggregate('mf_newspaces', 'newspace_dims', ['level', 'weight'], 'dim', sort=['char_orbit_index'])
@overall
def check_newspaces_num_spaces(self):
"""
check that num_spaces matches the number of records in mf_newspaces with this level and weight and positive dimension
"""
# TIME about 2s
# TODO: check that the number of char_orbits of level N and weight k is the same as the number of rows in mf_newspaces with this weight and level. The following doesn't work since num_spaces counts spaces with positive dimension
return self._run_crosstable(SQL("COUNT(*)"), 'mf_newspaces', 'num_spaces', ['level', 'weight'], extra=SQL(" AND t2.dim > 0"))
### mf_gamma1_subspaces ###
@overall
def check_oldspace_decomposition_totaldim(self):
"""
check that summing sub_dim * sub_mult over rows with a given label gives dim S_k(Gamma1(N))
"""
# TIME about 1s
return self.check_crosstable_dotprod('mf_gamma1_subspaces', 'cusp_dim', 'label', ['sub_mult', 'sub_dim'])
### mf_gamma1_portraits ###
@overall
def check_portraits_count(self):
"""
check that there is a portrait present for every record in mf_gamma1 with `dim > 0` and `level <= 4000`
"""
return self.check_crosstable_count('mf_gamma1_portraits', 1, 'label', constraint={'dim':{'$gt':0}, 'level':{'$lte':4000}})
### slow ###
@slow(projection=['level', 'level_radical', 'level_primes', 'level_is_prime', 'level_is_prime_power', 'level_is_squarefree', 'level_is_square'])
def METHOD_NAME(self, rec, verbose=False):
"""
check level_* attributes
"""
return self._check_level(rec, verbose=verbose)
@slow(projection=['level', 'weight', 'analytic_conductor'])
def check_analytic_conductor(self, rec, verbose=False):
"""
check analytic_conductor
"""
return check_analytic_conductor(rec['level'], rec['weight'], rec['analytic_conductor'], verbose=verbose)
@slow(max_failures=2000, projection=['level', 'weight', 'sturm_bound'])
def check_sturm_bound(self, rec, verbose=False):
"""
check that sturm_bound is exactly floor(k*Index(Gamma1(N))/12)
"""
return self._test_equality(rec['sturm_bound'], sturm_bound1(rec['level'], rec['weight']), verbose)
@fast(constraint={'weight':{'$gt':1}}, projection=['level', 'weight', 'dim'])
def check_Sk_dim_formula(self, rec, verbose=False):
"""
check that dim = dim S_k^new(Gamma1(N))
"""
# TIME about 60s
return self._test_equality(rec['dim'], dimension_new_cusp_forms(Gamma1(rec['level']), rec['weight']), verbose)
@fast(constraint={'weight':{'$gt':1}}, projection=['level', 'weight', 'eis_dim', 'cusp_dim', 'mf_dim'])
def check_dims(self, rec, verbose=False):
"""
for k > 1 check eis_dim, eis_new_dim, cusp_dim, mf_dim, mf_new_dim using Sage dimension formulas
"""
# TIME about 30s
G = Gamma1(rec['level'])
k = rec['weight']
for func, key in [(dimension_eis, 'eis_dim'), (dimension_cusp_forms, 'cusp_dim'), (dimension_modular_forms, 'mf_dim')]:
if not self._test_equality(rec[key], func(G, k), verbose):
return False
return True |
5,117 | add attachment | from collections.abc import Generator, Iterator, Sequence
from email import _ParamsType, _ParamType
from email.charset import Charset
from email.contentmanager import ContentManager
from email.errors import MessageDefect
from email.policy import Policy
from typing import Any, TypeVar, overload
from typing_extensions import Self, TypeAlias
__all__ = ["Message", "EmailMessage"]
_T = TypeVar("_T")
_PayloadType: TypeAlias = list[Message] | str | bytes | bytearray
_CharsetType: TypeAlias = Charset | str | None
_HeaderType: TypeAlias = Any
class Message:
policy: Policy # undocumented
preamble: str | None
epilogue: str | None
defects: list[MessageDefect]
def is_multipart(self) -> bool: ...
def set_unixfrom(self, unixfrom: str) -> None: ...
def get_unixfrom(self) -> str | None: ...
def attach(self, payload: Message) -> None: ...
def get_payload(self, i: int | None = None, decode: bool = False) -> Any: ... # returns _PayloadType | None
def set_payload(self, payload: _PayloadType, charset: _CharsetType = None) -> None: ...
def set_charset(self, charset: _CharsetType) -> None: ...
def get_charset(self) -> _CharsetType: ...
def __len__(self) -> int: ...
def __contains__(self, name: str) -> bool: ...
def __iter__(self) -> Iterator[str]: ...
def __getitem__(self, name: str) -> _HeaderType: ...
def __setitem__(self, name: str, val: _HeaderType) -> None: ...
def __delitem__(self, name: str) -> None: ...
def keys(self) -> list[str]: ...
def values(self) -> list[_HeaderType]: ...
def items(self) -> list[tuple[str, _HeaderType]]: ...
@overload
def get(self, name: str, failobj: None = None) -> _HeaderType | None: ...
@overload
def get(self, name: str, failobj: _T) -> _HeaderType | _T: ...
@overload
def get_all(self, name: str, failobj: None = None) -> list[_HeaderType] | None: ...
@overload
def get_all(self, name: str, failobj: _T) -> list[_HeaderType] | _T: ...
def add_header(self, _name: str, _value: str, **_params: _ParamsType) -> None: ...
def replace_header(self, _name: str, _value: _HeaderType) -> None: ...
def get_content_type(self) -> str: ...
def get_content_maintype(self) -> str: ...
def get_content_subtype(self) -> str: ...
def get_default_type(self) -> str: ...
def set_default_type(self, ctype: str) -> None: ...
@overload
def get_params(
self, failobj: None = None, header: str = "content-type", unquote: bool = True
) -> list[tuple[str, str]] | None: ...
@overload
def get_params(self, failobj: _T, header: str = "content-type", unquote: bool = True) -> list[tuple[str, str]] | _T: ...
@overload
def get_param(
self, param: str, failobj: None = None, header: str = "content-type", unquote: bool = True
) -> _ParamType | None: ...
@overload
def get_param(self, param: str, failobj: _T, header: str = "content-type", unquote: bool = True) -> _ParamType | _T: ...
def del_param(self, param: str, header: str = "content-type", requote: bool = True) -> None: ...
def set_type(self, type: str, header: str = "Content-Type", requote: bool = True) -> None: ...
@overload
def get_filename(self, failobj: None = None) -> str | None: ...
@overload
def get_filename(self, failobj: _T) -> str | _T: ...
@overload
def get_boundary(self, failobj: None = None) -> str | None: ...
@overload
def get_boundary(self, failobj: _T) -> str | _T: ...
def set_boundary(self, boundary: str) -> None: ...
@overload
def get_content_charset(self) -> str | None: ...
@overload
def get_content_charset(self, failobj: _T) -> str | _T: ...
@overload
def get_charsets(self, failobj: None = None) -> list[str | None]: ...
@overload
def get_charsets(self, failobj: _T) -> list[str | _T]: ...
def walk(self) -> Generator[Self, None, None]: ...
def get_content_disposition(self) -> str | None: ...
def as_string(self, unixfrom: bool = False, maxheaderlen: int = 0, policy: Policy | None = None) -> str: ...
def as_bytes(self, unixfrom: bool = False, policy: Policy | None = None) -> bytes: ...
def __bytes__(self) -> bytes: ...
def set_param(
self,
param: str,
value: str,
header: str = "Content-Type",
requote: bool = True,
charset: str | None = None,
language: str = "",
replace: bool = False,
) -> None: ...
def __init__(self, policy: Policy = ...) -> None: ...
# The following two methods are undocumented, but a source code comment states that they are public API
def set_raw(self, name: str, value: _HeaderType) -> None: ...
def raw_items(self) -> Iterator[tuple[str, _HeaderType]]: ...
class MIMEPart(Message):
def __init__(self, policy: Policy | None = None) -> None: ...
def get_body(self, preferencelist: Sequence[str] = ("related", "html", "plain")) -> Message | None: ...
def iter_attachments(self) -> Iterator[Message]: ...
def iter_parts(self) -> Iterator[Message]: ...
def get_content(self, *args: Any, content_manager: ContentManager | None = None, **kw: Any) -> Any: ...
def set_content(self, *args: Any, content_manager: ContentManager | None = None, **kw: Any) -> None: ...
def make_related(self, boundary: str | None = None) -> None: ...
def make_alternative(self, boundary: str | None = None) -> None: ...
def make_mixed(self, boundary: str | None = None) -> None: ...
def add_related(self, *args: Any, content_manager: ContentManager | None = ..., **kw: Any) -> None: ...
def add_alternative(self, *args: Any, content_manager: ContentManager | None = ..., **kw: Any) -> None: ...
def METHOD_NAME(self, *args: Any, content_manager: ContentManager | None = ..., **kw: Any) -> None: ...
def clear(self) -> None: ...
def clear_content(self) -> None: ...
def as_string(self, unixfrom: bool = False, maxheaderlen: int | None = None, policy: Policy | None = None) -> str: ...
def is_attachment(self) -> bool: ...
class EmailMessage(MIMEPart): ... |
5,118 | test timers | import os
import time
from SpiffWorkflow.task import TaskState
from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine
from SpiffWorkflow.bpmn.PythonScriptEngineEnvironment import TaskDataEnvironment
from SpiffWorkflow.bpmn.serializer.migration.exceptions import VersionMigrationError
from .BaseTestCase import BaseTestCase
class Version_1_0_Test(BaseTestCase):
def test_convert_subprocess(self):
# The serialization used here comes from NestedSubprocessTest saved at line 25 with version 1.0
fn = os.path.join(self.DATA_DIR, 'serialization', 'v1.0.json')
with open(fn) as fh:
wf = self.serializer.deserialize_json(fh.read())
# We should be able to finish the workflow from this point
ready_tasks = wf.get_tasks(TaskState.READY)
self.assertEqual('Action3', ready_tasks[0].task_spec.bpmn_name)
ready_tasks[0].run()
wf.do_engine_steps()
wf.refresh_waiting_tasks()
wf.do_engine_steps()
wf.refresh_waiting_tasks()
wf.do_engine_steps()
self.assertEqual(True, wf.is_completed())
class Version_1_1_Test(BaseTestCase):
def METHOD_NAME(self):
fn = os.path.join(self.DATA_DIR, 'serialization', 'v1.1-timers.json')
wf = self.serializer.deserialize_json(open(fn).read())
wf.script_engine = PythonScriptEngine(environment=TaskDataEnvironment({"time": time}))
wf.refresh_waiting_tasks()
wf.do_engine_steps()
wf.refresh_waiting_tasks()
wf.do_engine_steps()
self.assertTrue(wf.is_completed())
def test_convert_data_specs(self):
fn = os.path.join(self.DATA_DIR, 'serialization', 'v1.1-data.json')
wf = self.serializer.deserialize_json(open(fn).read())
wf.do_engine_steps()
wf.refresh_waiting_tasks()
wf.do_engine_steps()
self.assertTrue(wf.is_completed())
def test_convert_exclusive_gateway(self):
fn = os.path.join(self.DATA_DIR, 'serialization', 'v1.1-gateways.json')
wf = self.serializer.deserialize_json(open(fn).read())
wf.do_engine_steps()
task = wf.get_tasks_from_spec_name('Gateway_askQuestion')[0]
self.assertEqual(len(task.task_spec.cond_task_specs), 2)
ready_task = wf.get_ready_user_tasks()[0]
ready_task.data['NeedClarification'] = 'Yes'
ready_task.run()
wf.do_engine_steps()
ready_task = wf.get_ready_user_tasks()[0]
self.assertEqual(ready_task.task_spec.name, 'Activity_A2')
def test_check_multiinstance(self):
fn = os.path.join(self.DATA_DIR, 'serialization', 'v1.1-multi.json')
with self.assertRaises(VersionMigrationError) as ctx:
self.serializer.deserialize_json(open(fn).read())
self.assertEqual(ctx.exception.message, "This workflow cannot be migrated because it contains MultiInstance Tasks")
def test_remove_loop_reset(self):
fn = os.path.join(self.DATA_DIR, 'serialization', 'v1.1-loop-reset.json')
wf = self.serializer.deserialize_json(open(fn).read())
# Allow 3 seconds max to allow this test to complete (there are 20 loops with a 0.1s timer)
end = time.time() + 3
while not wf.is_completed() and time.time() < end:
wf.do_engine_steps()
wf.refresh_waiting_tasks()
self.assertTrue(wf.is_completed())
self.assertEqual(wf.last_task.data['counter'], 20)
def test_update_task_states(self):
fn = os.path.join(self.DATA_DIR, 'serialization', 'v1.1-task-states.json')
wf = self.serializer.deserialize_json(open(fn).read())
start = wf.get_tasks_from_spec_name('Start')[0]
self.assertEqual(start.state, TaskState.COMPLETED)
signal = wf.get_tasks_from_spec_name('signal')[0]
self.assertEqual(signal.state, TaskState.CANCELLED)
ready_tasks = wf.get_tasks(TaskState.READY)
while len(ready_tasks) > 0:
ready_tasks[0].run()
ready_tasks = wf.get_tasks(TaskState.READY)
self.assertTrue(wf.is_completed())
class Version_1_2_Test(BaseTestCase):
def test_remove_boundary_events(self):
fn = os.path.join(self.DATA_DIR, 'serialization', 'v1.2-boundary-events.json')
wf = self.serializer.deserialize_json(open(fn).read())
ready_tasks = wf.get_tasks(TaskState.READY)
ready_tasks[0].update_data({'value': 'asdf'})
ready_tasks[0].run()
wf.do_engine_steps()
ready_tasks = wf.get_tasks(TaskState.READY)
ready_tasks[0].update_data({'quantity': 2})
ready_tasks[0].run()
wf.do_engine_steps()
self.assertIn('value', wf.last_task.data)
# Check that workflow and next task completed
subprocess = wf.get_tasks_from_spec_name('Subprocess')[0]
self.assertEqual(subprocess.state, TaskState.COMPLETED)
print_task = wf.get_tasks_from_spec_name("Activity_Print_Data")[0]
self.assertEqual(print_task.state, TaskState.COMPLETED)
# Check that the boundary events were cancelled
cancel_task = wf.get_tasks_from_spec_name("Catch_Cancel_Event")[0]
self.assertEqual(cancel_task.state, TaskState.CANCELLED)
error_1_task = wf.get_tasks_from_spec_name("Catch_Error_1")[0]
self.assertEqual(error_1_task.state, TaskState.CANCELLED)
error_none_task = wf.get_tasks_from_spec_name("Catch_Error_None")[0]
self.assertEqual(error_none_task.state, TaskState.CANCELLED)
def test_remove_noninterrupting_boundary_events(self):
fn = os.path.join(self.DATA_DIR, 'serialization', 'v1.2-boundary-events-noninterrupting.json')
wf = self.serializer.deserialize_json(open(fn).read())
wf.get_tasks_from_spec_name('sid-D3365C47-2FAE-4D17-98F4-E68B345E18CE')[0].run()
wf.do_engine_steps()
self.assertEqual(1, len(wf.get_tasks(TaskState.READY)))
self.assertEqual(3, len(wf.get_tasks(TaskState.WAITING)))
wf.get_tasks_from_spec_name('sid-6FBBB56D-00CD-4C2B-9345-486986BB4992')[0].run()
wf.do_engine_steps()
self.assertTrue(wf.is_completed()) |
5,119 | get tests | import os, sys, unittest, getopt, time
use_resources = []
import ctypes
ctypes_symbols = dir(ctypes)
def need_symbol(name):
return unittest.skipUnless(name in ctypes_symbols,
'{!r} is required'.format(name))
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. Resources are defined by test modules.
"""
def is_resource_enabled(resource):
"""Test whether a resource is enabled.
If the caller's module is __main__ then automatically return True."""
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return True
result = use_resources is not None and \
(resource in use_resources or "*" in use_resources)
if not result:
_unavail[resource] = None
return result
_unavail = {}
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
def find_package_modules(package, mask):
import fnmatch
if (hasattr(package, "__loader__") and
hasattr(package.__loader__, '_files')):
path = package.__name__.replace(".", os.path.sep)
mask = os.path.join(path, mask)
for fnm in package.__loader__._files.iterkeys():
if fnmatch.fnmatchcase(fnm, mask):
yield os.path.splitext(fnm)[0].replace(os.path.sep, ".")
else:
path = package.__path__[0]
for fnm in os.listdir(path):
if fnmatch.fnmatchcase(fnm, mask):
yield "%s.%s" % (package.__name__, os.path.splitext(fnm)[0])
def METHOD_NAME(package, mask, verbosity, exclude=()):
"""Return a list of skipped test modules, and a list of test cases."""
tests = []
skipped = []
for modname in find_package_modules(package, mask):
if modname.split(".")[-1] in exclude:
skipped.append(modname)
if verbosity > 1:
print >> sys.stderr, "Skipped %s: excluded" % modname
continue
try:
mod = __import__(modname, globals(), locals(), ['*'])
except (ResourceDenied, unittest.SkipTest) as detail:
skipped.append(modname)
if verbosity > 1:
print >> sys.stderr, "Skipped %s: %s" % (modname, detail)
continue
for name in dir(mod):
if name.startswith("_"):
continue
o = getattr(mod, name)
if type(o) is type(unittest.TestCase) and issubclass(o, unittest.TestCase):
tests.append(o)
return skipped, tests
def usage():
print __doc__
return 1
def test_with_refcounts(runner, verbosity, testcase):
"""Run testcase several times, tracking reference counts."""
import gc
import ctypes
ptc = ctypes._pointer_type_cache.copy()
cfc = ctypes._c_functype_cache.copy()
wfc = ctypes._win_functype_cache.copy()
# when searching for refcount leaks, we have to manually reset any
# caches that ctypes has.
def cleanup():
ctypes._pointer_type_cache = ptc.copy()
ctypes._c_functype_cache = cfc.copy()
ctypes._win_functype_cache = wfc.copy()
gc.collect()
test = unittest.makeSuite(testcase)
for i in range(5):
rc = sys.gettotalrefcount()
runner.run(test)
cleanup()
COUNT = 5
refcounts = [None] * COUNT
for i in range(COUNT):
rc = sys.gettotalrefcount()
runner.run(test)
cleanup()
refcounts[i] = sys.gettotalrefcount() - rc
if filter(None, refcounts):
print "%s leaks:\n\t" % testcase, refcounts
elif verbosity:
print "%s: ok." % testcase
class TestRunner(unittest.TextTestRunner):
def run(self, test, skipped):
"Run the given test case or test suite."
# Same as unittest.TextTestRunner.run, except that it reports
# skipped tests.
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
if _unavail: #skipped:
requested = _unavail.keys()
requested.sort()
self.stream.writeln("Ran %d test%s in %.3fs (%s module%s skipped)" %
(run, run != 1 and "s" or "", timeTaken,
len(skipped),
len(skipped) != 1 and "s" or ""))
self.stream.writeln("Unavailable resources: %s" % ", ".join(requested))
else:
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
def main(*packages):
try:
opts, args = getopt.getopt(sys.argv[1:], "rqvu:x:")
except getopt.error:
return usage()
verbosity = 1
search_leaks = False
exclude = []
for flag, value in opts:
if flag == "-q":
verbosity -= 1
elif flag == "-v":
verbosity += 1
elif flag == "-r":
try:
sys.gettotalrefcount
except AttributeError:
print >> sys.stderr, "-r flag requires Python debug build"
return -1
search_leaks = True
elif flag == "-u":
use_resources.extend(value.split(","))
elif flag == "-x":
exclude.extend(value.split(","))
mask = "test_*.py"
if args:
mask = args[0]
for package in packages:
run_tests(package, mask, verbosity, search_leaks, exclude)
def run_tests(package, mask, verbosity, search_leaks, exclude):
skipped, testcases = METHOD_NAME(package, mask, verbosity, exclude)
runner = TestRunner(verbosity=verbosity)
suites = [unittest.makeSuite(o) for o in testcases]
suite = unittest.TestSuite(suites)
result = runner.run(suite, skipped)
if search_leaks:
# hunt for refcount leaks
runner = BasicTestRunner()
for t in testcases:
test_with_refcounts(runner, verbosity, t)
return bool(result.errors)
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result |
5,120 | on mouse move | # Copyright © 2012-2023 Forschungszentrum Jülich GmbH
# SPDX-License-Identifier: LGPL-3.0-or-later
from jupedsim_visualizer.config import ZLayers
from vtkmodules.vtkCommonCore import vtkCommand, vtkPoints
from vtkmodules.vtkCommonDataModel import (
vtkCellArray,
vtkPolyData,
vtkPolyLine,
)
from vtkmodules.vtkInteractionStyle import vtkInteractorStyleUser
from vtkmodules.vtkRenderingCore import vtkActor, vtkCamera, vtkPolyDataMapper
from jupedsim import RoutingEngine
class MoveController:
def __init__(
self, interactor_style: vtkInteractorStyleUser, cam: vtkCamera
):
self.route_from = None
self.route_to = None
self.lmb_pressed = False
self.cam = cam
self.navi = None
self.actor = None
self.interactor_style = interactor_style
interactor_style.AddObserver(vtkCommand.CharEvent, self._on_char)
interactor_style.AddObserver(
vtkCommand.KeyPressEvent, self._ignore_evt
)
interactor_style.AddObserver(
vtkCommand.KeyReleaseEvent, self._ignore_evt
)
interactor_style.AddObserver(
vtkCommand.LeftButtonPressEvent, self._on_lmb_pressed
)
interactor_style.AddObserver(
vtkCommand.LeftButtonReleaseEvent, self._on_lmb_released
)
interactor_style.AddObserver(
vtkCommand.MouseMoveEvent, self.METHOD_NAME
)
def set_navi(self, navi: RoutingEngine | None):
self.navi = navi
def _on_char(self, obj, evt):
char = chr(obj.GetChar())
def offset():
return self.cam.GetParallelScale() * 0.035
if char == "e":
self.cam.SetParallelScale(self.cam.GetParallelScale() * 0.95)
elif char == "q":
self.cam.SetParallelScale(self.cam.GetParallelScale() * 1.05)
elif char == "w":
(x, y, z) = self.cam.GetPosition()
y += offset()
self.cam.SetPosition(x, y, z)
self.cam.SetFocalPoint(x, y, 0)
elif char == "s":
(x, y, z) = self.cam.GetPosition()
y -= offset()
self.cam.SetPosition(x, y, z)
self.cam.SetFocalPoint(x, y, 0)
elif char == "a":
(x, y, z) = self.cam.GetPosition()
x -= offset()
self.cam.SetPosition(x, y, z)
self.cam.SetFocalPoint(x, y, 0)
elif char == "d":
(x, y, z) = self.cam.GetPosition()
x += offset()
self.cam.SetPosition(x, y, z)
self.cam.SetFocalPoint(x, y, 0)
obj.GetInteractor().Render()
def _ignore_evt(self, obj, evt):
pass
def _on_lmb_pressed(self, obj, evt):
if not self.navi:
return
interactor = obj.GetInteractor()
if not self.lmb_pressed:
self.lmb_pressed = True
display_pos = interactor.GetEventPosition()
world_pos = self._to_world_coordinate_2d(display_pos)
self.route_from = world_pos
def METHOD_NAME(self, obj, evt):
interactor = obj.GetInteractor()
if not self.lmb_pressed:
return
if not self.navi:
return
self.lmb_pressed = True
display_pos = interactor.GetEventPosition()
world_pos = self._to_world_coordinate_2d(display_pos)
self.route_to = world_pos
self._render_path()
def _on_lmb_released(self, obj, evt):
self.lmb_pressed = False
self.route_from = None
self.route_to = None
def _render_path(self):
if not self.navi:
return
interactor = self.interactor_style.GetInteractor()
renderer = (
interactor.GetRenderWindow().GetRenderers().GetFirstRenderer()
)
if (
not self.route_to
or not self.route_from
or not self.navi.is_routable(self.route_from)
or not self.navi.is_routable(self.route_to)
):
if self.actor:
renderer.RemoveActor(self.actor)
self.actor = None
return
points = self.navi.compute_waypoints(self.route_from, self.route_to)
vtk_points = vtkPoints()
polyline = vtkPolyLine()
polyline.GetPointIds().SetNumberOfIds(len(points))
for idx, pt in enumerate(points):
vtk_points.InsertNextPoint(pt[0], pt[1], ZLayers.nav_line)
polyline.GetPointIds().SetId(idx, idx)
poly_data = vtkPolyData()
poly_data.SetPoints(vtk_points)
cells = vtkCellArray()
cells.InsertNextCell(polyline)
poly_data.SetLines(cells)
mapper = vtkPolyDataMapper()
mapper.SetInputData(poly_data)
if self.actor:
renderer.RemoveActor(self.actor)
self.actor = vtkActor()
self.actor.SetMapper(mapper)
self.actor.GetProperty().SetColor(1, 0, 0)
self.actor.GetProperty().SetLineWidth(3)
renderer.AddActor(self.actor)
interactor.Render()
def _to_world_coordinate_2d(
self, display_pos: tuple[float, float]
) -> tuple[float, float]:
renderer = (
self.interactor_style.GetInteractor()
.GetRenderWindow()
.GetRenderers()
.GetFirstRenderer()
)
renderer.SetDisplayPoint(display_pos[0], display_pos[1], 0)
renderer.DisplayToWorld()
world = renderer.GetWorldPoint()
return (world[0] / world[3], world[1] / world[3]) |
5,121 | can change keyword | # -*- coding: utf-8 -*-
#
# This file is part of SENAITE.CORE.
#
# SENAITE.CORE is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2018-2023 by it's authors.
# Some rights reserved, see README and LICENSE.
from six import string_types
from itertools import chain
from bika.lims import api
from bika.lims import senaiteMessageFactory as _
from bika.lims.catalog import SETUP_CATALOG
from bika.lims.catalog import CATALOG_ANALYSIS_LISTING
from bika.lims.validators import ServiceKeywordValidator
from senaite.core.browser.form.adapters import EditFormAdapterBase
class EditForm(EditFormAdapterBase):
"""Edit form adapter for Analysis Service
"""
def initialized(self, data):
form = data.get("form")
# Check if method is set
method = form.get("Method")
methods = form.get("Methods:list")
if not (method or methods):
self.add_status_message(
_("No Method selected for this Service"),
level="warning")
# Protect keyword field
keyword = form.get("Keyword")
if keyword:
writable = self.METHOD_NAME(keyword)
if not writable:
self.add_readonly_field(
"Keyword", _("Keyword is used in active analyses "
"and can not be changed anymore"))
return self.data
def modified(self, data):
name = data.get("name")
value = data.get("value")
# Handle Keyword change
if name == "Keyword":
self.add_error_field("Keyword", self.validate_keyword(value))
# Handle Methods Change
elif name == "Methods":
# Available Methods
empty = [{"title": _("None"), "value": None}]
# Get selected methods
methods = map(api.get_object_by_uid, value)
# Available instruments for the selected methods
instruments = self.get_available_instruments_for(methods)
# Available calculations for the selected methods
calculations = self.get_available_calculations_for(methods)
# Build select options
m_opts = map(lambda o: dict(
title=api.get_title(o), value=api.get_uid(o)), methods)
i_opts = map(lambda o: dict(
title=api.get_title(o), value=api.get_uid(o)), instruments)
c_opts = map(lambda o: dict(
title=api.get_title(o), value=api.get_uid(o)), calculations)
# When methods are selected, we filter other fields accordingly
if methods:
# selected instruments
i_sel = map(api.get_uid, instruments)
# set update fields
self.add_update_field("Method", {
"options": empty + m_opts})
self.add_update_field("Instruments_options", {
"options": i_opts})
self.add_update_field("Instruments:list", {
"options": i_opts, "selected": i_sel})
self.add_update_field("Instrument", {
"options": empty + i_opts})
self.add_update_field("Calculation", {
"options": empty + c_opts})
else:
self.add_update_field("Method", {
"options": empty})
self.add_update_field("Instruments:list", {
"options": []})
self.add_update_field("Instruments_options", {
"options": i_opts})
self.add_update_field("Instrument", {
"options": empty})
self.add_update_field("Calculation", {
"options": empty + c_opts})
# Handle Instruments Change
elif name == "Instruments":
instruments = map(api.get_object_by_uid, value)
empty = [{"title": _("None"), "value": None}]
i_opts = map(lambda o: dict(
title=api.get_title(o), value=api.get_uid(o)), instruments)
self.add_update_field("Instrument", {
"options": empty + i_opts})
return self.data
def get_available_instruments_for(self, methods):
"""Return all available instruments for the given methods
If no methods are given, all active instruments are returned
"""
if methods:
return list(chain(*map(lambda m: m.getInstruments(), methods)))
query = {
"portal_type": "Instrument",
"is_active": True,
"sort_on": "sortable_title"
}
brains = self.setup_catalog(query)
return map(api.get_object, brains)
def get_available_calculations_for(self, methods):
"""Return all available instruments for the given methods
If no methods are given, all active instruments are returned
"""
if methods:
return list(chain(*map(lambda m: m.getCalculations(), methods)))
query = {
"portal_type": "Calculation",
"is_active": True,
"sort_on": "sortable_title"
}
brains = self.setup_catalog(query)
return map(api.get_object, brains)
@property
def setup_catalog(self):
return api.get_tool(SETUP_CATALOG)
@property
def analysis_catalog(self):
return api.get_tool(CATALOG_ANALYSIS_LISTING)
def METHOD_NAME(self, keyword):
"""Check if the keyword can be changed
Writable if no active analyses exist with the given keyword
"""
query = {
"portal_type": "Analysis",
"is_active": True,
"getKeyword": keyword}
brains = self.analysis_catalog(query)
return len(brains) == 0
def validate_keyword(self, value):
"""Validate the service keyword
"""
current_value = self.context.getKeyword()
# Check if the values changed
if current_value == value:
# nothing changed
return
# Check if the new value is empty
if not value:
return _("Keyword required")
# Check the current value with the validator
validator = ServiceKeywordValidator()
check = validator(value, instance=self.context)
if isinstance(check, string_types):
return _(check)
return None |
5,122 | description | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGeofenceCollectionResult',
'AwaitableGetGeofenceCollectionResult',
'get_geofence_collection',
'get_geofence_collection_output',
]
@pulumi.output_type
class GetGeofenceCollectionResult:
"""
A collection of values returned by getGeofenceCollection.
"""
def __init__(__self__, collection_arn=None, collection_name=None, create_time=None, METHOD_NAME=None, id=None, kms_key_id=None, tags=None, update_time=None):
if collection_arn and not isinstance(collection_arn, str):
raise TypeError("Expected argument 'collection_arn' to be a str")
pulumi.set(__self__, "collection_arn", collection_arn)
if collection_name and not isinstance(collection_name, str):
raise TypeError("Expected argument 'collection_name' to be a str")
pulumi.set(__self__, "collection_name", collection_name)
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kms_key_id and not isinstance(kms_key_id, str):
raise TypeError("Expected argument 'kms_key_id' to be a str")
pulumi.set(__self__, "kms_key_id", kms_key_id)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if update_time and not isinstance(update_time, str):
raise TypeError("Expected argument 'update_time' to be a str")
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter(name="collectionArn")
def collection_arn(self) -> str:
"""
ARN for the geofence collection resource. Used when you need to specify a resource across all AWS.
"""
return pulumi.get(self, "collection_arn")
@property
@pulumi.getter(name="collectionName")
def collection_name(self) -> str:
return pulumi.get(self, "collection_name")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
Timestamp for when the geofence collection resource was created in ISO 8601 format.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Optional description of the geofence collection resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> str:
"""
Key identifier for an AWS KMS customer managed key assigned to the Amazon Location resource.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Key-value map of resource tags for the geofence collection.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> str:
"""
Timestamp for when the geofence collection resource was last updated in ISO 8601 format.
"""
return pulumi.get(self, "update_time")
class AwaitableGetGeofenceCollectionResult(GetGeofenceCollectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGeofenceCollectionResult(
collection_arn=self.collection_arn,
collection_name=self.collection_name,
create_time=self.create_time,
METHOD_NAME=self.METHOD_NAME,
id=self.id,
kms_key_id=self.kms_key_id,
tags=self.tags,
update_time=self.update_time)
def get_geofence_collection(collection_name: Optional[str] = None,
kms_key_id: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGeofenceCollectionResult:
"""
Retrieve information about a Location Service Geofence Collection.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.location.get_geofence_collection(collection_name="example")
```
:param str collection_name: Name of the geofence collection.
:param str kms_key_id: Key identifier for an AWS KMS customer managed key assigned to the Amazon Location resource.
:param Mapping[str, str] tags: Key-value map of resource tags for the geofence collection.
"""
__args__ = dict()
__args__['collectionName'] = collection_name
__args__['kmsKeyId'] = kms_key_id
__args__['tags'] = tags
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:location/getGeofenceCollection:getGeofenceCollection', __args__, opts=opts, typ=GetGeofenceCollectionResult).value
return AwaitableGetGeofenceCollectionResult(
collection_arn=pulumi.get(__ret__, 'collection_arn'),
collection_name=pulumi.get(__ret__, 'collection_name'),
create_time=pulumi.get(__ret__, 'create_time'),
METHOD_NAME=pulumi.get(__ret__, 'description'),
id=pulumi.get(__ret__, 'id'),
kms_key_id=pulumi.get(__ret__, 'kms_key_id'),
tags=pulumi.get(__ret__, 'tags'),
update_time=pulumi.get(__ret__, 'update_time'))
@_utilities.lift_output_func(get_geofence_collection)
def get_geofence_collection_output(collection_name: Optional[pulumi.Input[str]] = None,
kms_key_id: Optional[pulumi.Input[Optional[str]]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGeofenceCollectionResult]:
"""
Retrieve information about a Location Service Geofence Collection.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.location.get_geofence_collection(collection_name="example")
```
:param str collection_name: Name of the geofence collection.
:param str kms_key_id: Key identifier for an AWS KMS customer managed key assigned to the Amazon Location resource.
:param Mapping[str, str] tags: Key-value map of resource tags for the geofence collection.
"""
... |
5,123 | test read imageseries non external format | import numpy as np
from pathlib import Path
import warnings
from pynwb import NWBHDF5IO, validate, TimeSeries
from pynwb.image import ImageSeries
from pynwb.testing import TestCase
class TestReadOldVersions(TestCase):
expected_warnings = {
''
'2.1.0_imageseries_non_external_format.nwb': [(
"ImageSeries 'test_imageseries': Format must be 'external' when external_file is specified."
)],
'2.1.0_imageseries_nonmatch_starting_frame.nwb': [(
"ImageSeries 'test_imageseries': The number of frame indices in 'starting_frame' should have the same "
"length as 'external_file'."
)],
}
expected_errors = {
'1.0.2_str_experimenter.nwb': [("root/general/experimenter (general/experimenter): incorrect shape - expected "
"an array of shape '[None]', got non-array data 'one experimenter'")],
'1.0.3_str_experimenter.nwb': [("root/general/experimenter (general/experimenter): incorrect shape - expected "
"an array of shape '[None]', got non-array data 'one experimenter'")],
'1.0.2_str_pub.nwb': [("root/general/related_publications (general/related_publications): incorrect shape "
"- expected an array of shape '[None]', got non-array data 'one publication'")],
'1.0.3_str_pub.nwb': [("root/general/related_publications (general/related_publications): incorrect shape "
"- expected an array of shape '[None]', got non-array data 'one publication'")],
}
def test_read(self):
"""Test reading and validating all NWB files in the same folder as this file.
This folder contains NWB files generated by previous versions of NWB using the script
src/pynwb/testing/make_test_files.py
"""
dir_path = Path(__file__).parent
nwb_files = dir_path.glob('*.nwb')
for f in nwb_files:
with self.subTest(file=f.name):
with warnings.catch_warnings(record=True) as warnings_on_read:
warnings.simplefilter("always")
with NWBHDF5IO(str(f), 'r', load_namespaces=True) as io:
errors = validate(io)
io.read()
for w in warnings_on_read:
if f.name in self.expected_warnings:
if str(w.message) not in self.expected_warnings[f.name]:
pass
# will replace above with below after the test file is updated
# raise Exception("Unexpected warning: %s: %s" % (f.name, str(w.message)))
else:
pass
# will replace above with below after the test file is updated
# raise Exception("Unexpected warning: %s: %s" % (f.name, str(w.message)))
if errors:
for e in errors:
if f.name in self.expected_errors:
if str(e) not in self.expected_errors[f.name]:
warnings.warn('%s: %s' % (f.name, e))
else:
raise Exception("Unexpected validation error: %s: %s" % (f.name, e))
# TODO uncomment below when validation errors have been fixed
# raise Exception('%d validation error(s). See warnings.' % len(errors))
def test_read_timeseries_no_data(self):
"""Test that a TimeSeries written without data is read with data set to the default value."""
f = Path(__file__).parent / '1.5.1_timeseries_no_data.nwb'
with NWBHDF5IO(str(f), 'r') as io:
read_nwbfile = io.read()
np.testing.assert_array_equal(read_nwbfile.acquisition['test_timeseries'].data, TimeSeries.DEFAULT_DATA)
def test_read_timeseries_no_unit(self):
"""Test that an ImageSeries written without unit is read with unit set to the default value."""
f = Path(__file__).parent / '1.5.1_timeseries_no_unit.nwb'
with NWBHDF5IO(str(f), 'r') as io:
read_nwbfile = io.read()
self.assertEqual(read_nwbfile.acquisition['test_timeseries'].unit, TimeSeries.DEFAULT_UNIT)
def test_read_imageseries_no_data(self):
"""Test that an ImageSeries written without data is read with data set to the default value."""
f = Path(__file__).parent / '1.5.1_imageseries_no_data.nwb'
with NWBHDF5IO(str(f), 'r') as io:
read_nwbfile = io.read()
np.testing.assert_array_equal(read_nwbfile.acquisition['test_imageseries'].data, ImageSeries.DEFAULT_DATA)
def test_read_imageseries_no_unit(self):
"""Test that an ImageSeries written without unit is read with unit set to the default value."""
f = Path(__file__).parent / '1.5.1_imageseries_no_unit.nwb'
with NWBHDF5IO(str(f), 'r') as io:
read_nwbfile = io.read()
self.assertEqual(read_nwbfile.acquisition['test_imageseries'].unit, ImageSeries.DEFAULT_UNIT)
def METHOD_NAME(self):
"""Test that reading an ImageSeries with an inconsistent format does not change the value."""
fbase = "2.1.0_imageseries_non_external_format.nwb"
f = Path(__file__).parent / fbase
expected_warning = self.expected_warnings[fbase][0]
with self.assertWarnsWith(UserWarning, expected_warning):
with NWBHDF5IO(str(f), 'r') as io:
read_nwbfile = io.read()
self.assertEqual(read_nwbfile.acquisition['test_imageseries'].format, "tiff")
def test_read_imageseries_nonmatch_starting_frame(self):
"""Test that reading an ImageSeries with an inconsistent starting_frame does not change the value."""
fbase = "2.1.0_imageseries_nonmatch_starting_frame.nwb"
f = Path(__file__).parent / fbase
expected_warning = self.expected_warnings[fbase][0]
with self.assertWarnsWith(UserWarning, expected_warning):
with NWBHDF5IO(str(f), 'r') as io:
read_nwbfile = io.read()
np.testing.assert_array_equal(read_nwbfile.acquisition['test_imageseries'].starting_frame, [1, 2, 3])
def test_read_subject_no_age__reference(self):
"""Test that reading a Subject without an age__reference set with NWB schema 2.5.0 sets the value to None"""
f = Path(__file__).parent / '2.2.0_subject_no_age__reference.nwb'
with NWBHDF5IO(str(f), 'r') as io:
read_nwbfile = io.read()
self.assertIsNone(read_nwbfile.subject.age__reference) |
5,124 | establish connection | """`Dataset` class(es) for reading data from SQLite databases."""
from typing import Any, List, Optional, Tuple, Union
import pandas as pd
import sqlite3
from graphnet.data.dataset.dataset import Dataset, ColumnMissingException
class SQLiteDataset(Dataset):
"""Pytorch dataset for reading data from SQLite databases."""
# Implementing abstract method(s)
def _init(self) -> None:
# Check(s)
self._database_list: Optional[List[str]]
if isinstance(self._path, list):
self._database_list = self._path
self._all_connections_established = False
self._all_connections: List[sqlite3.Connection] = []
else:
self._database_list = None
assert isinstance(self._path, str)
assert self._path.endswith(
".db"
), f"Format of input file `{self._path}` is not supported."
if self._database_list is not None:
self._current_database: Optional[int] = None
# Set custom member variable(s)
self._features_string = ", ".join(self._features)
self._truth_string = ", ".join(self._truth)
if self._node_truth:
self._node_truth_string = ", ".join(self._node_truth)
self._conn: Optional[sqlite3.Connection] = None
def _post_init(self) -> None:
self._close_connection()
def query_table(
self,
table: str,
columns: Union[List[str], str],
sequential_index: Optional[int] = None,
selection: Optional[str] = None,
) -> List[Tuple[Any, ...]]:
"""Query table at a specific index, optionally with some selection."""
# Check(s)
if isinstance(columns, list):
columns = ", ".join(columns)
if not selection: # I.e., `None` or `""`
selection = "1=1" # Identically true, to select all
index = self._get_event_index(sequential_index)
# Query table
assert index is not None
self.METHOD_NAME(index)
try:
assert self._conn
if sequential_index is None:
combined_selections = selection
else:
combined_selections = (
f"{self._index_column} = {index} and {selection}"
)
result = self._conn.execute(
f"SELECT {columns} FROM {table} WHERE "
f"{combined_selections}"
).fetchall()
except sqlite3.OperationalError as e:
if "no such column" in str(e):
raise ColumnMissingException(str(e))
else:
raise e
return result
def _get_all_indices(self) -> List[int]:
self.METHOD_NAME(0)
indices = pd.read_sql_query(
f"SELECT {self._index_column} FROM {self._truth_table}", self._conn
)
self._close_connection()
return indices.values.ravel().tolist()
def _get_event_index(
self, sequential_index: Optional[int]
) -> Optional[int]:
index: int = 0
if sequential_index is not None:
index_ = self._indices[sequential_index]
if self._database_list is None:
assert isinstance(index_, int)
index = index_
else:
assert isinstance(index_, list)
index = index_[0]
return index
# Custom, internal method(s)
# @TODO: Is it necessary to return anything here?
def METHOD_NAME(self, i: int) -> "SQLiteDataset":
"""Make sure that a sqlite3 connection is open."""
if self._database_list is None:
assert isinstance(self._path, str)
if self._conn is None:
self._conn = sqlite3.connect(self._path)
else:
indices = self._indices[i]
assert isinstance(indices, list)
if self._conn is None:
if self._all_connections_established is False:
self._all_connections = []
for database in self._database_list:
con = sqlite3.connect(database)
self._all_connections.append(con)
self._all_connections_established = True
self._conn = self._all_connections[indices[1]]
if indices[1] != self._current_database:
self._conn = self._all_connections[indices[1]]
self._current_database = indices[1]
return self
# @TODO: Is it necessary to return anything here?
def _close_connection(self) -> "SQLiteDataset":
"""Make sure that no sqlite3 connection is open.
This is necessary to calls this before passing to
`torch.DataLoader` such that the dataset replica on each worker
is required to create its own connection (thereby avoiding
`sqlite3.DatabaseError: database disk image is malformed` errors
due to inability to use sqlite3 connection accross processes.
"""
if self._conn is not None:
self._conn.close()
del self._conn
self._conn = None
if self._database_list is not None:
if self._all_connections_established:
for con in self._all_connections:
con.close()
del self._all_connections
self._all_connections_established = False
self._conn = None
return self |
5,125 | get input output buffer | #!/usr/bin/python3
#
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2019-2021 Xilinx, Inc
#
import sys
import time
import math
# Following found in PYTHONPATH setup by XRT
import pyxrt
# utils_binding.py
sys.path.append('../')
from utils_binding import *
current_micro_time = lambda: int(round(time.time() * 1000000))
DATASIZE = int(1024*1024*16) #16 MB
def getThreshold(devhdl):
threshold = 40000
name = devhdl.get_info(pyxrt.xrt_info_device.name)
if "qdma" in name or "qep" in name:
threshold = 30000
if "gen3x4" in name or "_u26z_" in name:
threshold = 20000
if "u2x4" in name or "U2x4" in name or "u2_gen3x4" in name:
threshold = 10000
if "_u25_" in name or "_u30_" in name: # so that it doesn't set theshold for u250
threshold = 9000
return threshold
def METHOD_NAME(devhdl, krnlhdl, argno, isInput):
bo = pyxrt.bo(devhdl, DATASIZE, pyxrt.bo.normal, krnlhdl.group_id(argno))
buf = bo.map()
for i in range(DATASIZE):
buf[i] = i%256 if isInput else 0
bo.sync(pyxrt.xclBOSyncDirection.XCL_BO_SYNC_BO_TO_DEVICE, DATASIZE, 0)
return bo, buf
def runKernel(opt):
d = pyxrt.device(opt.index)
xbin = pyxrt.xclbin(opt.bitstreamFile)
uuid = d.load_xclbin(xbin)
khandle1 = pyxrt.kernel(d, uuid, "bandwidth1", pyxrt.kernel.shared)
khandle2 = pyxrt.kernel(d, uuid, "bandwidth2", pyxrt.kernel.shared)
output_bo1, output_buf1 = METHOD_NAME(d, khandle1, 0, False)
output_bo2, output_buf2 = METHOD_NAME(d, khandle2, 0, False)
input_bo1, input_buf1 = METHOD_NAME(d, khandle1, 1, True)
input_bo2, input_buf2 = METHOD_NAME(d, khandle2, 1, True)
TYPESIZE = 512
threshold = getThreshold(d)
beats = 16
#lists
dnsduration = []
dsduration = []
dbytes = []
dmbytes = []
bpersec = []
mbpersec = []
#run tests with burst length 1 beat to DATASIZE
#double burst length each test
test=0
throughput = []
failed = False
while beats <= 1024 and not failed:
print("LOOP PIPELINE %d beats" %beats)
usduration = 0
fiveseconds = 5*1000000
reps = 64
while usduration < fiveseconds:
start = current_micro_time()
rhandle1 = khandle1(output_bo1, input_bo1, beats, reps)
rhandle2 = khandle2(output_bo2, input_bo2, beats, reps)
rhandle1.wait()
rhandle2.wait()
end = current_micro_time()
usduration = end - start
limit = beats * int(TYPESIZE / 8)
output_bo1.sync(pyxrt.xclBOSyncDirection.XCL_BO_SYNC_BO_FROM_DEVICE, limit, 0)
output_bo2.sync(pyxrt.xclBOSyncDirection.XCL_BO_SYNC_BO_FROM_DEVICE, limit, 0)
failed = (input_buf1[:limit] != output_buf1[:limit])
if (failed):
break
failed = (input_buf2[:limit] != output_buf2[:limit])
if (failed):
break
# print("Reps = %d, Beats = %d, Duration = %lf us" %(reps, beats, usduration)) # for debug
if usduration < fiveseconds:
reps = reps*2
dnsduration.append(usduration)
dsduration.append(dnsduration[test]/1000000.0)
dbytes.append(reps*beats*int(TYPESIZE / 8))
dmbytes.append(dbytes[test]/(1024 * 1024))
bpersec.append(2.0*dbytes[test]/dsduration[test])
mbpersec.append(2.0*bpersec[test]/(1024 * 1024))
throughput.append(mbpersec[test])
print("Test %d, Throughput: %d MB/s" %(test, throughput[test]))
beats = beats*4
test+=1
if failed:
raise RuntimeError("ERROR: Failed to copy entries")
print("TTTT: %d" %throughput[0])
print("Maximum throughput: %d MB/s" %max(throughput))
if max(throughput) < threshold:
raise RuntimeError("ERROR: Throughput is less than expected value of %d GB/sec" %(threshold/1000))
def main(args):
opt = Options()
b_file = "bandwidth.xclbin"
Options.getOptions(opt, args, b_file)
try:
runKernel(opt)
print("PASSED TEST")
return 0
except OSError as o:
print(o)
print("FAILED TEST")
return -o.errno
except AssertionError as a:
print(a)
print("FAILED TEST")
return -1
except Exception as e:
print(e)
print("FAILED TEST")
return -1
if __name__ == "__main__":
result = main(sys.argv)
sys.exit(result) |
5,126 | get estimate for fitperiod with data | import datetime
import pandas as pd
from syscore.genutils import str2Bool
from syscore.constants import arg_not_supplied
from sysquant.fitting_dates import fitDates
from sysquant.estimators.correlations import (
correlationEstimate,
create_boring_corr_matrix,
modify_correlation,
)
from sysquant.estimators.generic_estimator import exponentialEstimator
class exponentialCorrelation(exponentialEstimator):
def __init__(
self,
data_for_correlation,
ew_lookback: int = 250,
min_periods: int = 20,
cleaning: bool = True,
floor_at_zero: bool = True,
length_adjustment: int = 1,
shrinkage_parameter: float = 0.0,
offdiag: float = 0.99,
**_ignored_kwargs,
):
super().__init__(
data_for_correlation,
ew_lookback=ew_lookback,
min_periods=min_periods,
cleaning=cleaning,
floor_at_zero=floor_at_zero,
length_adjustment=length_adjustment,
shrinkage_parameter=shrinkage_parameter,
offdiag=offdiag,
**_ignored_kwargs,
)
def perform_calculations(
self,
data_for_correlation: pd.DataFrame,
adjusted_lookback=500,
adjusted_min_periods=20,
**other_kwargs,
):
correlation_calculations = exponentialCorrelationResults(
data_for_correlation,
ew_lookback=adjusted_lookback,
min_periods=adjusted_min_periods,
)
return correlation_calculations
@property
def offdiag(self) -> float:
return self.other_kwargs["offdiag"]
@property
def cleaning(self) -> bool:
cleaning = str2Bool(self.other_kwargs["cleaning"])
return cleaning
@property
def shrinkage_parameter(self) -> float:
shrinkage_parameter = float(self.other_kwargs["shrinkage_parameter"])
return shrinkage_parameter
@property
def floor_at_zero(self) -> bool:
floor_at_zero = str2Bool(self.other_kwargs["floor_at_zero"])
return floor_at_zero
@property
def clip(self) -> float:
clip = self.other_kwargs.get("clip", arg_not_supplied)
return clip
def missing_data(self):
asset_names = list(self.data.columns)
return create_boring_corr_matrix(len(asset_names), columns=asset_names)
def METHOD_NAME(
self, fit_period: fitDates = arg_not_supplied
) -> correlationEstimate:
if fit_period is arg_not_supplied:
fit_period = self._get_default_fit_period_cover_all_data()
raw_corr_matrix = self._get_raw_corr_for_fitperiod(fit_period)
cleaning = self.cleaning
if cleaning:
data_for_correlation = self.data
offdiag = self.offdiag
corr_matrix = raw_corr_matrix.clean_corr_matrix_given_data(
fit_period, data_for_correlation, offdiag=offdiag
)
else:
corr_matrix = raw_corr_matrix
floor_at_zero = self.floor_at_zero
clip = self.clip
shrinkage = self.shrinkage_parameter
corr_matrix = modify_correlation(
corr_matrix, floor_at_zero=floor_at_zero, shrinkage=shrinkage, clip=clip
)
return corr_matrix
def _get_default_fit_period_cover_all_data(self) -> fitDates:
last_date_in_fit_period = self.data.last_valid_index()
first_date_in_fit_period = self.data.first_valid_index()
fit_period = fitDates(
period_start=first_date_in_fit_period,
period_end=last_date_in_fit_period,
fit_start=first_date_in_fit_period,
fit_end=last_date_in_fit_period,
)
return fit_period
def _get_raw_corr_for_fitperiod(self, fit_period: fitDates) -> correlationEstimate:
last_date_in_fit_period = fit_period.fit_end
return self._get_raw_corr_period_ends_at_date(last_date_in_fit_period)
def _get_raw_corr_period_ends_at_date(
self, last_date_in_fit_period: datetime.datetime
) -> correlationEstimate:
correlation_calculations = self.calculations
raw_corr_matrix = correlation_calculations.last_valid_cor_matrix_for_date(
last_date_in_fit_period
)
return raw_corr_matrix
class exponentialCorrelationResults(object):
def __init__(
self,
data_for_correlation,
ew_lookback: int = 250,
min_periods: int = 20,
**_ignored_kwargs,
):
columns = data_for_correlation.columns
self._columns = columns
raw_correlations = data_for_correlation.ewm(
span=ew_lookback, min_periods=min_periods, ignore_na=True
).corr(pairwise=True, ignore_na=True)
self._raw_correlations = raw_correlations
@property
def raw_correlations(self):
return self._raw_correlations
def last_valid_cor_matrix_for_date(
self, date_point: datetime.datetime
) -> correlationEstimate:
raw_correlations = self.raw_correlations
columns = self.columns
return last_valid_cor_matrix_for_date(
raw_correlations=raw_correlations, date_point=date_point, columns=columns
)
@property
def size_of_matrix(self) -> int:
return len(self.columns)
@property
def columns(self) -> list:
return self._columns
def last_valid_cor_matrix_for_date(
raw_correlations: pd.DataFrame, columns: list, date_point: datetime.datetime
) -> correlationEstimate:
size_of_matrix = len(columns)
corr_matrix_values = (
raw_correlations[raw_correlations.index.get_level_values(0) < date_point]
.tail(size_of_matrix)
.values
)
return correlationEstimate(values=corr_matrix_values, columns=columns) |
5,127 | test use service supported args | # -*- coding: utf-8 -*-
import unittest
from unittest.mock import Mock
from django import test
from tcms.issuetracker import services
from tcms.issuetracker.services import IssueTrackerService
from tests import BaseCaseRun
from tests import factories as f
class TestFindService(test.TestCase):
"""Test factory method find_service"""
@classmethod
def setUpTestData(cls):
cls.issue_tracker_1 = f.IssueTrackerFactory()
cls.issue_tracker_2 = f.IssueTrackerFactory()
def test_class_path_must_be_set(self):
self.issue_tracker_1.class_path = ""
self.assertRaisesRegex(
ValueError,
"class_path must be set",
services.find_service,
self.issue_tracker_1,
)
def test_find_the_service(self):
srv = services.find_service(self.issue_tracker_2)
self.assertTrue(isinstance(srv, services.IssueTrackerService))
self.assertEqual(self.issue_tracker_2, srv.tracker_model)
class TestBaseIssueTrackerService(BaseCaseRun):
"""Test default issue tracker behaviors"""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.tracker_product = f.IssueTrackerProductFactory()
def test_get_issue_report_url(self):
tracker = f.IssueTrackerFactory(
service_url="http://localhost/",
issue_report_endpoint="/enter_bug.cgi",
tracker_product=self.tracker_product,
)
s = IssueTrackerService(tracker)
url = s.make_issue_report_url(self.case_run_1)
self.assertEqual("http://localhost/enter_bug.cgi", url)
def test_subclass_could_provide_extra_issue_report_url_args(self):
class CoolService(IssueTrackerService):
def get_extra_issue_report_url_args(self, case_run):
return {"body": "content"}
fake_tracker = f.IssueTrackerFactory(
service_url="http://localhost/",
tracker_product=self.tracker_product,
issue_report_endpoint="/new_issue",
issue_report_params="subject: hello",
)
service = CoolService(fake_tracker)
url = service.make_issue_report_url(self.case_run_1)
expected_url = "http://localhost/new_issue?subject=hello&body=content"
self.assert_url(expected_url, url)
def test_extra_arg_is_overwritten_by_predefined_service_supported_arg(self):
class CoolService(IssueTrackerService):
def get_extra_issue_report_url_args(self, case_run):
return {"body": "content"}
fake_tracker = f.IssueTrackerFactory(
service_url="http://localhost/",
tracker_product=self.tracker_product,
issue_report_endpoint="/new_issue",
# body listed here will overwrite that body above
issue_report_params="subject: hello\nbody: write content here",
)
service = CoolService(fake_tracker)
url = service.make_issue_report_url(self.case_run_1)
expected_url = "http://localhost/new_issue?subject=hello&body=write%20content%20here"
self.assert_url(expected_url, url)
def METHOD_NAME(self):
"""
Ensure supported args listed in issue_report_params are filled with
correct value.
"""
class CoolService(IssueTrackerService):
def get_stock_issue_report_args(self, case_run):
return {
"case_summary": "test case 1",
"verbose": True,
}
fake_tracker = f.IssueTrackerFactory(
service_url="http://localhost/",
tracker_product=self.tracker_product,
issue_report_endpoint="/new_issue",
# case_summary should be in the final URL with supported value.
issue_report_params="subject: hello\ncase_summary:",
)
service = CoolService(fake_tracker)
url = service.make_issue_report_url(self.case_run_1)
expected_url = "http://localhost/new_issue?subject=hello&case_summary=test%20case%201"
self.assert_url(expected_url, url)
class TestMakeIssueReportURLForBugzilla(BaseCaseRun):
"""Test the default behavior of Bugzilla to make issue report URL"""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.cp_db = f.ComponentFactory(name="db")
cls.cp_docs = f.ComponentFactory(name="docs")
f.TestCaseComponentFactory(case=cls.case_1, component=cls.cp_db)
f.TestCaseComponentFactory(case=cls.case_1, component=cls.cp_docs)
cls.tracker = f.IssueTrackerProductFactory(name="myissuetracker")
cls.issue_tracker_bz = f.IssueTrackerFactory(
service_url="http://bugs.example.com",
tracker_product=cls.tracker,
issue_report_endpoint="/enter_bug.cgi",
issue_report_params="product:\ncomponent:\n",
issue_report_templ="content:",
)
PITRF = f.ProductIssueTrackerRelationshipFactory
cls.rel_bz_product = PITRF(product=cls.product, issue_tracker=cls.issue_tracker_bz)
def setUp(self):
self.rel_bz_product.refresh_from_db()
def test_use_default_values(self):
srv = services.Bugzilla(self.issue_tracker_bz)
url = srv.make_issue_report_url(self.case_run_1)
expected_url = (
"http://bugs.example.com/enter_bug.cgi?"
"product={}&comment=content:&component=db&component=docs".format(self.product.name)
)
self.assert_url(expected_url, url)
def test_alias_is_set(self):
self.rel_bz_product.alias = "alternative-name"
self.rel_bz_product.save(update_fields=["alias"])
srv = services.Bugzilla(self.issue_tracker_bz)
url = srv.make_issue_report_url(self.case_run_1)
expected_url = (
"http://bugs.example.com/enter_bug.cgi?"
"product=alternative-name&comment=content:&"
"component=db&component=docs"
)
self.assert_url(expected_url, url)
def test_namespace_is_set(self):
self.rel_bz_product.namespace = "upstream"
self.rel_bz_product.save(update_fields=["namespace"])
srv = services.Bugzilla(self.issue_tracker_bz)
url = srv.make_issue_report_url(self.case_run_1)
expected_url = (
"http://bugs.example.com/enter_bug.cgi?"
"product=upstream&comment=content:&component={}".format(self.product.name)
)
self.assert_url(expected_url, url)
def test_use_alias_and_namespace_if_both_are_set(self):
self.rel_bz_product.alias = "alternative-name"
self.rel_bz_product.namespace = "upstream"
self.rel_bz_product.save(update_fields=["alias", "namespace"])
srv = services.Bugzilla(self.issue_tracker_bz)
url = srv.make_issue_report_url(self.case_run_1)
expected_url = (
"http://bugs.example.com/enter_bug.cgi?"
"product=upstream&comment=content:&component=alternative-name"
)
self.assert_url(expected_url, url)
class TestFormatIssuesDisplayURL(unittest.TestCase):
"""Test IssueTrackerService.make_issues_display_url_fmt"""
def test_format_url(self):
tracker = Mock(issues_display_url_fmt="http://bugs.example.com/?ids={issue_keys}")
service = services.IssueTrackerService(tracker)
url = service.make_issues_display_url([1, 2, 3, 4])
expected_url = "http://bugs.example.com/?ids=1,2,3,4"
self.assertEqual(expected_url, url) |
5,128 | jacobian | #
# HES1 Michaelis-Menten model of regulatory dynamics.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import numpy as np
import pints
import scipy
from . import ToyODEModel
class Hes1Model(ToyODEModel, pints.ForwardModelS1):
"""
HES1 Michaelis-Menten model of regulatory dynamics [1]_.
This model describes the expression level of the transcription factor
Hes1.
.. math::
\\frac{dm}{dt} &= -k_{deg}m + \\frac{1}{1 + (p_2/P_0)^h} \\\\
\\frac{dp_1}{dt} &= -k_{deg} p_1 + \\nu m - k_1 p_1 \\\\
\\frac{dp_2}{dt} &= -k_{deg} p_2 + k_1 p_1
The system is determined by 3 state variables :math:`m`, :math:`p_1`, and
:math:`p_2`. It is assumed that only :math:`m` can be observed, that is
only :math:`m` is an observable. The initial condition of the other two
state variables and :math:`k_{deg}` are treated as implicit parameters of
the system. The input order of parameters of interest is
:math:`\\{ P_0, \\nu, k_1, h \\}`.
Extends :class:`pints.ForwardModel`, :class:`pints.toy.ToyModel`.
Parameters
----------
m0 : float
The initial condition of the observable ``m``. Requires ``m0 >= 0``.
fixed_parameters
The fixed parameters of the model which are not inferred, given as a
vector ``[p1_0, p2_0, k_deg]`` with ``p1_0, p2_0, k_deg >= 0``.
References
----------
.. [1] Silk, D., el al. 2011. Designing attractive models via automated
identification of chaotic and oscillatory dynamical regimes. Nature
communications, 2, p.489.
https://doi.org/10.1038/ncomms1496
"""
def __init__(self, m0=None, fixed_parameters=None):
if fixed_parameters is None:
self.set_fixed_parameters([5., 3., 0.03])
else:
self.set_fixed_parameters(fixed_parameters)
if m0 is None:
self.set_m0(2)
else:
self.set_m0(m0)
def _dfdp(self, state, time, parameters):
""" See :meth:`pints.ToyModel.jacobian()`. """
m, p1, p2 = state
P0, v, k1, h = parameters
p2_over_p0 = p2 / P0
p2_over_p0_h = p2_over_p0**h
one_plus_p2_expression_sq = (1 + p2_over_p0_h)**2
ret = np.empty((self.n_states(), self.n_parameters()))
ret[0, 0] = h * p2 * p2_over_p0**(h - 1) / (
P0**2 * one_plus_p2_expression_sq)
ret[0, 1] = 0
ret[0, 2] = 0
ret[0, 3] = - (p2_over_p0_h * np.log(p2_over_p0)) / (
one_plus_p2_expression_sq
)
ret[1, 0] = 0
ret[1, 1] = m
ret[1, 2] = -p1
ret[1, 3] = 0
ret[2, 0] = 0
ret[2, 1] = 0
ret[2, 2] = p1
ret[2, 3] = 0
return ret
def m0(self):
"""
Returns the initial conditions of the ``m`` variable.
"""
return self._y0[0]
def fixed_parameters(self):
"""
Returns the fixed parameters of the model which are not inferred, given
as a vector ``[p1_0, p2_0, k_deg]``.
"""
return [self._p0[0], self._p0[1], self._kdeg]
def METHOD_NAME(self, state, time, parameters):
""" See :meth:`pints.ToyModel.jacobian()`. """
m, p1, p2 = state
P0, v, k1, h = parameters
k_deg = self._kdeg
p2_over_p0 = p2 / P0
p2_over_p0_h = p2_over_p0**h
one_plus_p2_expression_sq = (1 + p2_over_p0_h)**2
ret = np.zeros((self.n_states(), self.n_states()))
ret[0, 0] = -k_deg
ret[0, 1] = 0
ret[0, 2] = -h * p2_over_p0**(h - 1) / (P0 * one_plus_p2_expression_sq)
ret[1, 0] = v
ret[1, 1] = -k1 - k_deg
ret[1, 2] = 0
ret[2, 0] = 0
ret[2, 1] = k1
ret[2, 2] = -k_deg
return ret
def n_states(self):
""" See :meth:`pints.ToyODEModel.n_states()`. """
return 3
def n_outputs(self):
""" See :meth:`pints.ForwardModel.n_outputs()`. """
return 1
def n_parameters(self):
""" See :meth:`pints.ForwardModel.n_parameters()`. """
return 4
def _rhs(self, state, time, parameters):
"""
Right-hand side equation of the ode to solve.
"""
m, p1, p2 = state
P0, v, k1, h = parameters
output = np.array([
- self._kdeg * m + 1. / (1. + (p2 / P0)**h),
- self._kdeg * p1 + v * m - k1 * p1,
- self._kdeg * p2 + k1 * p1])
return output
def set_m0(self, m0):
"""
Sets the initial conditions of the ``m`` variable.
"""
if m0 < 0:
raise ValueError('Initial condition cannot be negative.')
y0 = [m0, self._p0[0], self._p0[1]]
super(Hes1Model, self).set_initial_conditions(y0)
def set_fixed_parameters(self, k):
"""
Changes the implicit parameters for this model.
"""
a, b, c = k
if a < 0 or b < 0 or c < 0:
raise ValueError('Implicit parameters cannot be negative.')
self._p0 = [a, b]
self._kdeg = c
def simulate_all_states(self, parameters, times):
"""
Returns all state variables that ``simulate()`` does not return.
"""
solved_states = scipy.integrate.odeint(
self._rhs, self._y0, times, args=(parameters,))
# Return all states
return solved_states
def suggested_parameters(self):
""" See :meth:`pints.toy.ToyModel.suggested_parameters()`. """
return np.array([2.4, 0.025, 0.11, 6.9])
def suggested_times(self):
""" See :meth:`pints.toy.ToyModel.suggested_times()`. """
return np.arange(0, 270, 30)
def suggested_values(self):
"""
Returns a suggested set of values that matches
:meth:`suggested_times()`.
"""
return np.array([2, 1.20, 5.90, 4.58, 2.64, 5.38, 6.42, 5.60, 4.48]) |
5,129 | run | #!/usr/bin/env python
"""
Horizontal align demo with HSplit.
"""
from prompt_toolkit.application import Application
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout.containers import (
HorizontalAlign,
HSplit,
VerticalAlign,
VSplit,
Window,
WindowAlign,
)
from prompt_toolkit.layout.controls import FormattedTextControl
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.widgets import Frame
TITLE = HTML(
""" <u>HSplit HorizontalAlign</u> example.
Press <b>'q'</b> to quit."""
)
LIPSUM = """\
Lorem ipsum dolor
sit amet, consectetur
adipiscing elit.
Maecenas quis
interdum enim."""
# 1. The layout
body = HSplit(
[
Frame(
Window(FormattedTextControl(TITLE), height=2), style="bg:#88ff88 #000000"
),
HSplit(
[
# Left alignment.
VSplit(
[
Window(
FormattedTextControl(HTML("<u>LEFT</u>")),
width=10,
ignore_content_width=True,
style="bg:#ff3333 ansiblack",
align=WindowAlign.CENTER,
),
VSplit(
[
Window(
FormattedTextControl(LIPSUM),
height=4,
style="bg:#444488",
),
Window(
FormattedTextControl(LIPSUM),
height=4,
style="bg:#444488",
),
Window(
FormattedTextControl(LIPSUM),
height=4,
style="bg:#444488",
),
],
padding=1,
padding_style="bg:#888888",
align=HorizontalAlign.LEFT,
height=5,
padding_char="|",
),
]
),
# Center alignment.
VSplit(
[
Window(
FormattedTextControl(HTML("<u>CENTER</u>")),
width=10,
ignore_content_width=True,
style="bg:#ff3333 ansiblack",
align=WindowAlign.CENTER,
),
VSplit(
[
Window(
FormattedTextControl(LIPSUM),
height=4,
style="bg:#444488",
),
Window(
FormattedTextControl(LIPSUM),
height=4,
style="bg:#444488",
),
Window(
FormattedTextControl(LIPSUM),
height=4,
style="bg:#444488",
),
],
padding=1,
padding_style="bg:#888888",
align=HorizontalAlign.CENTER,
height=5,
padding_char="|",
),
]
),
# Right alignment.
VSplit(
[
Window(
FormattedTextControl(HTML("<u>RIGHT</u>")),
width=10,
ignore_content_width=True,
style="bg:#ff3333 ansiblack",
align=WindowAlign.CENTER,
),
VSplit(
[
Window(
FormattedTextControl(LIPSUM),
height=4,
style="bg:#444488",
),
Window(
FormattedTextControl(LIPSUM),
height=4,
style="bg:#444488",
),
Window(
FormattedTextControl(LIPSUM),
height=4,
style="bg:#444488",
),
],
padding=1,
padding_style="bg:#888888",
align=HorizontalAlign.RIGHT,
height=5,
padding_char="|",
),
]
),
# Justify
VSplit(
[
Window(
FormattedTextControl(HTML("<u>JUSTIFY</u>")),
width=10,
ignore_content_width=True,
style="bg:#ff3333 ansiblack",
align=WindowAlign.CENTER,
),
VSplit(
[
Window(
FormattedTextControl(LIPSUM), style="bg:#444488"
),
Window(
FormattedTextControl(LIPSUM), style="bg:#444488"
),
Window(
FormattedTextControl(LIPSUM), style="bg:#444488"
),
],
padding=1,
padding_style="bg:#888888",
align=HorizontalAlign.JUSTIFY,
height=5,
padding_char="|",
),
]
),
],
padding=1,
padding_style="bg:#ff3333 #ffffff",
padding_char=".",
align=VerticalAlign.TOP,
),
]
)
# 2. Key bindings
kb = KeyBindings()
@kb.add("q")
def _(event):
"Quit application."
event.app.exit()
# 3. The `Application`
application = Application(layout=Layout(body), key_bindings=kb, full_screen=True)
def METHOD_NAME():
application.METHOD_NAME()
if __name__ == "__main__":
METHOD_NAME() |
5,130 | test 2019 | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <dr.prodigy.github@gmail.com> (c) 2017-2023
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from holidays.countries.luxembourg import Luxembourg, LU, LUX
from tests.common import TestCase
class TestLuxembourg(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass(Luxembourg)
def test_country_aliases(self):
self.assertCountryAliases(Luxembourg, LU, LUX)
def test_2018(self):
self.assertHolidays(
("2018-01-01", "Neijoerschdag"),
("2018-04-02", "Ouschterméindeg"),
("2018-05-01", "Dag vun der Aarbecht"),
("2018-05-10", "Christi Himmelfaart"),
("2018-05-21", "Péngschtméindeg"),
("2018-06-23", "Nationalfeierdag"),
("2018-08-15", "Léiffrawëschdag"),
("2018-11-01", "Allerhellgen"),
("2018-12-25", "Chrëschtdag"),
("2018-12-26", "Stiefesdag"),
)
def METHOD_NAME(self):
self.assertHolidays(
("2019-01-01", "Neijoerschdag"),
("2019-04-22", "Ouschterméindeg"),
("2019-05-01", "Dag vun der Aarbecht"),
("2019-05-09", "Europadag"),
("2019-05-30", "Christi Himmelfaart"),
("2019-06-10", "Péngschtméindeg"),
("2019-06-23", "Nationalfeierdag"),
("2019-08-15", "Léiffrawëschdag"),
("2019-11-01", "Allerhellgen"),
("2019-12-25", "Chrëschtdag"),
("2019-12-26", "Stiefesdag"),
)
def test_2020(self):
self.assertHolidays(
("2020-01-01", "Neijoerschdag"),
("2020-04-13", "Ouschterméindeg"),
("2020-05-01", "Dag vun der Aarbecht"),
("2020-05-09", "Europadag"),
("2020-05-21", "Christi Himmelfaart"),
("2020-06-01", "Péngschtméindeg"),
("2020-06-23", "Nationalfeierdag"),
("2020-08-15", "Léiffrawëschdag"),
("2020-11-01", "Allerhellgen"),
("2020-12-25", "Chrëschtdag"),
("2020-12-26", "Stiefesdag"),
)
def test_l10n_default(self):
self.assertLocalizedHolidays(
("2022-01-01", "Neijoerschdag"),
("2022-04-18", "Ouschterméindeg"),
("2022-05-01", "Dag vun der Aarbecht"),
("2022-05-09", "Europadag"),
("2022-05-26", "Christi Himmelfaart"),
("2022-06-06", "Péngschtméindeg"),
("2022-06-23", "Nationalfeierdag"),
("2022-08-15", "Léiffrawëschdag"),
("2022-11-01", "Allerhellgen"),
("2022-12-25", "Chrëschtdag"),
("2022-12-26", "Stiefesdag"),
)
def test_l10n_de(self):
self.assertLocalizedHolidays(
"de",
("2022-01-01", "Neujahr"),
("2022-04-18", "Ostermontag"),
("2022-05-01", "Tag der Arbeit"),
("2022-05-09", "Europatag"),
("2022-05-26", "Christi Himmelfahrt"),
("2022-06-06", "Pfingstmontag"),
("2022-06-23", "Nationalfeiertag"),
("2022-08-15", "Maria Himmelfahrt"),
("2022-11-01", "Allerheiligen"),
("2022-12-25", "Weihnachten"),
("2022-12-26", "Zweiter Weihnachtsfeiertag"),
)
def test_l10n_en_us(self):
self.assertLocalizedHolidays(
"en_US",
("2022-01-01", "New Year's Day"),
("2022-04-18", "Easter Monday"),
("2022-05-01", "Labor Day"),
("2022-05-09", "Europe Day"),
("2022-05-26", "Ascension Day"),
("2022-06-06", "Whit Monday"),
("2022-06-23", "National Day"),
("2022-08-15", "Assumption Day"),
("2022-11-01", "All Saints' Day"),
("2022-12-25", "Christmas Day"),
("2022-12-26", "St. Stephen's Day"),
)
def test_l10n_fr(self):
self.assertLocalizedHolidays(
"fr",
("2022-01-01", "Jour de l'an"),
("2022-04-18", "Lundi de Pâques"),
("2022-05-01", "Fête du Travail"),
("2022-05-09", "Journée de l'Europe"),
("2022-05-26", "Ascension"),
("2022-06-06", "Lundi de Pentecôte"),
("2022-06-23", "Fête nationale"),
("2022-08-15", "Assomption"),
("2022-11-01", "Toussaint"),
("2022-12-25", "Noël"),
("2022-12-26", "St. Etienne"),
)
def test_l10n_uk(self):
self.assertLocalizedHolidays(
"uk",
("2022-01-01", "Новий рік"),
("2022-04-18", "Великодній понеділок"),
("2022-05-01", "День праці"),
("2022-05-09", "День Європи"),
("2022-05-26", "Вознесіння Господнє"),
("2022-06-06", "День Святого Духа"),
("2022-06-23", "Національне свято"),
("2022-08-15", "Внебовзяття Пресвятої Діви Марії"),
("2022-11-01", "День усіх святих"),
("2022-12-25", "Різдво Христове"),
("2022-12-26", "День Святого Стефана"),
) |
5,131 | test post call with path setup with | import pytest
import requests
from parameterized import parameterized_class
from tests.integration.local.start_api.start_api_integ_base import StartApiIntegBaseClass
@parameterized_class(
("template_path",),
[
("/testdata/start_api/cdk/template-rest-api.yaml",),
("/testdata/start_api/cdk/template-open-api.yaml",),
("/testdata/start_api/cdk/template-http-api.yaml",),
],
)
class TestStartAPICDKTemplateRestAPI(StartApiIntegBaseClass):
def setUp(self):
self.url = "http://127.0.0.1:{}".format(self.port)
@pytest.mark.flaky(reruns=3)
@pytest.mark.timeout(timeout=600, method="thread")
def test_calling_proxy_endpoint(self):
response = requests.get(self.url + "/proxypath/this/is/some/path", timeout=300)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"hello": "world"})
@pytest.mark.flaky(reruns=3)
@pytest.mark.timeout(timeout=600, method="thread")
def test_get_call_with_path_setup_with_any_implicit_api(self):
response = requests.get(self.url + "/anyandall", timeout=300)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"hello": "world"})
@pytest.mark.flaky(reruns=3)
@pytest.mark.timeout(timeout=600, method="thread")
def METHOD_NAME(self):
response = requests.post(self.url + "/anyandall", json={}, timeout=300)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"hello": "world"})
@pytest.mark.flaky(reruns=3)
@pytest.mark.timeout(timeout=600, method="thread")
def test_put_call_with_path_setup_with_any_implicit_api(self):
response = requests.put(self.url + "/anyandall", json={}, timeout=300)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"hello": "world"})
@pytest.mark.flaky(reruns=3)
@pytest.mark.timeout(timeout=600, method="thread")
def test_head_call_with_path_setup_with_any_implicit_api(self):
response = requests.head(self.url + "/anyandall", timeout=300)
self.assertEqual(response.status_code, 200)
@pytest.mark.flaky(reruns=3)
@pytest.mark.timeout(timeout=600, method="thread")
def test_delete_call_with_path_setup_with_any_implicit_api(self):
response = requests.delete(self.url + "/anyandall", timeout=300)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"hello": "world"})
@pytest.mark.flaky(reruns=3)
@pytest.mark.timeout(timeout=600, method="thread")
def test_options_call_with_path_setup_with_any_implicit_api(self):
response = requests.options(self.url + "/anyandall", timeout=300)
self.assertEqual(response.status_code, 200)
@pytest.mark.flaky(reruns=3)
@pytest.mark.timeout(timeout=600, method="thread")
def test_patch_call_with_path_setup_with_any_implicit_api(self):
response = requests.patch(self.url + "/anyandall", timeout=300)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"hello": "world"})
class TestServiceCorsRequests(StartApiIntegBaseClass):
"""
Test to check that the correct headers are being added with Cors
"""
template_path = "/testdata/start_api/cdk/template-cors-configs.yaml"
def setUp(self):
self.url = "http://127.0.0.1:{}".format(self.port)
@pytest.mark.flaky(reruns=3)
@pytest.mark.timeout(timeout=600, method="thread")
def test_cors_swagger_options(self):
"""
This tests that the Cors are added to option requests in the swagger template
"""
response = requests.options(self.url + "/", timeout=300)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers.get("Access-Control-Allow-Origin"), "*")
self.assertEqual(response.headers.get("Access-Control-Allow-Headers"), "Content-Type,X-Amz-Date,Authorization")
self.assertEqual(response.headers.get("Access-Control-Allow-Methods"), "GET,OPTIONS")
self.assertEqual(response.headers.get("Access-Control-Allow-Credentials"), "true")
self.assertEqual(response.headers.get("Access-Control-Max-Age"), "510") |
5,132 | get record | """
Match script, useful for public matches. Features verbose announcements
on IRC and a custom timer.
Commands
^^^^^^^^
* ``/timer`` starts timer *admin only*
* ``/stoptimer`` stops timer *admin only*
* ``/startrecord`` starts a fresh records *admin only*
* ``/stoprecord`` clears the current record *admin only*
* ``/saverecord`` save record to a file *admin only*
.. codeauthor: mat^2
"""
import os
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
import json
from piqueserver.commands import command, admin
from piqueserver.config import config
config_dir = config.config_dir
@command('timer', admin_only=True)
def start_timer(connection, end):
return connection.protocol.start_timer(int(end) * 60)
@command('stoptimer', admin_only=True)
def stop_timer(connection, end):
return connection.protocol.stop_timer()
@command('startrecord', admin_only=True)
def start_record(connection):
connection.protocol.start_record()
return 'Recording started.'
@command('stoprecord', admin_only=True)
def stop_record(connection):
connection.protocol.stop_record()
return 'Recording stopped.'
@command('saverecord', admin_only=True)
def save_record(connection, name):
if not connection.protocol.save_record(name):
return 'No record file available.'
return 'Record saved.'
def apply_script(protocol, connection, config):
class MatchConnection(connection):
def on_flag_take(self):
self.add_message(
"%s took %s's flag!" %
(self.printable_name, self.team.other.name.lower()))
return connection.on_flag_take(self)
def on_flag_drop(self):
self.add_message(
"%s dropped %s's flag!" %
(self.printable_name, self.team.other.name.lower()))
return connection.on_flag_drop(self)
def on_flag_capture(self):
self.add_message(
"%s captured %s's flag!" %
(self.printable_name, self.team.other.name.lower()))
return connection.on_flag_capture(self)
def on_kill(self, killer, type, grenade):
if killer is None:
killer = self
self.add_message("%s was killed by %s!" %
(self.printable_name, killer.printable_name))
self.protocol.add_kill(self, killer)
return connection.on_kill(self, killer, type, grenade)
def add_message(self, value):
self.protocol.messages.append(value)
class MatchProtocol(protocol):
timer_left = None
timer_call = None
timer_end = None
record = None
def __init__(self, *arg, **kw):
protocol.__init__(self, *arg, **kw)
self.messages = []
self.send_message_loop = LoopingCall(self.display_messages)
self.send_message_loop.start(3)
def start_timer(self, end):
if self.timer_end is not None:
return 'Timer is running already.'
self.timer_end = reactor.seconds() + end
self.broadcast_chat('Timer started, ending in %s minutes'
% (end / 60), irc=True)
self.display_timer(True)
def stop_timer(self):
if self.timer_call is not None:
self.timer_call.cancel()
self.broadcast_chat('Timer stopped.')
self.timer_call = None
else:
return 'No timer in progress.'
def display_timer(self, silent=False):
time_left = self.timer_end - reactor.seconds()
minutes_left = time_left / 60.0
next_call = 60
if not silent:
if time_left <= 0:
self.broadcast_chat('Timer ended!', irc=True)
self.timer_end = None
return
elif minutes_left <= 1:
self.broadcast_chat('%s seconds left' % int(time_left),
irc=True)
next_call = max(1, int(time_left / 2.0))
else:
self.broadcast_chat('%s minutes left' % int(minutes_left),
irc=True)
self.timer_call = reactor.callLater(next_call, self.display_timer)
def display_messages(self):
if not self.messages:
return
message = self.messages.pop(0)
self.irc_say(message)
# recording
def add_kill(self, player, killing_player):
if self.record is None:
return
self.METHOD_NAME(player.name)['deaths'] += 1
self.METHOD_NAME(killing_player.name)['kills'] += 1
def METHOD_NAME(self, name):
try:
return self.record[name]
except KeyError:
record = {'deaths': 0, 'kills': 0}
self.record[name] = record
return record
def start_record(self):
self.record = {}
def stop_record(self):
self.record = None
def save_record(self, name):
if self.record is None:
return False
path = os.path.join(config_dir, "record_{}.json".format(name))
with open(path, "w") as f:
json.dump(self.record, f)
return True
return MatchProtocol, MatchConnection |
5,133 | way | #-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2018 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from modules.OsmoseTranslation import T_
from plugins.Plugin import Plugin
class Name_Quotation(Plugin):
def init(self, logger):
Plugin.init(self, logger)
self.errors[50704] = self.def_class(item = 5070, level = 2, tags = ['name', 'fix:chair'],
title = T_('Unbalanced quotation mark or bracket in name'),
resource = 'https://en.wikipedia.org/wiki/Bracket#Encoding_in_digital_media')
self.quotes = [
# https://en.wikipedia.org/wiki/Quotation_mark#Unicode_code_point_table
u"«»", u"‹›", u"“”‟„", u"〝〞〟",
# https://en.wikipedia.org/wiki/Bracket#Encoding_in_digital_media
u"()", u"[]", u"{}", u"«»", u"‹›", u"⌈⌉", u"⌊⌋", u"⌜⌝", u"⌞⌟", u"⁽⁾", u"₍₎", u"⸢⸣", u"⸤⸥", u"﴾﴿", u"⸜⸝", u"᚛᚜", u"༼༡༽", u"〔〕", u"〖〗", u"〘〙", u"〚〛", u"〝〞", u"〈〉", u"「」", u"〈〉", u"《》", u"「」", u"『』", u"【】", u"()", u"[]", u"<>", u"{}", u"⦅⦆",
]
self.quotes_j = u"".join(self.quotes)
def node(self, data, tags):
if 'name' not in tags:
return
stack = []
for c in tags["name"]:
if c in self.quotes_j:
if len(stack) == 0 or stack[-1][0] == c or c not in stack[-1][1]:
group = next(q for q in self.quotes if c in q)
stack.append([c, group])
else:
p, group = stack.pop()
if c not in group:
return [{"class": 50704, "subclass": 0, "text": T_("Unbalanced {0} with {1}", p, c)}]
if len(stack) > 0:
return [{"class": 50704, "subclass": 1, "text": T_("Unbalanced {0}", "".join(map(lambda q: q[0], stack)))}]
def METHOD_NAME(self, data, tags, nds):
return self.node(data, tags)
def relation(self, data, tags, members):
return self.node(data, tags)
###########################################################################
from plugins.Plugin import TestPluginCommon
class Test(TestPluginCommon):
def test(self):
TestPluginCommon.setUp(self)
self.p = Name_Quotation(None)
self.p.init(None)
assert not self.p.node(None, {"foo": "bar"})
assert self.p.node(None, {"name": "("})
assert self.p.node(None, {"name": "(]"})
assert self.p.node(None, {"name": "(("})
assert not self.p.node(None, {"name": "{[]}"})
assert self.p.node(None, {"name": "{[}]"})
assert not self.p.node(None, {"name": "קריית מוצקין (תפעולית)"})
assert self.p.node(None, {"name": "קריית מוצקין (תפעולית"})
assert self.p.node(None, {"name": "120 - (ביס אסיף (התפתחות הילד "}) # Twice '(' but not writer in the same direction |
5,134 | close | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import TimeSeriesInsightsClientConfiguration
from .operations import Operations
from .operations import EnvironmentsOperations
from .operations import EventSourcesOperations
from .operations import ReferenceDataSetsOperations
from .operations import AccessPoliciesOperations
from . import models
class TimeSeriesInsightsClient(object):
"""Time Series Insights client.
:ivar operations: Operations operations
:vartype operations: time_series_insights_client.operations.Operations
:ivar environments: EnvironmentsOperations operations
:vartype environments: time_series_insights_client.operations.EnvironmentsOperations
:ivar event_sources: EventSourcesOperations operations
:vartype event_sources: time_series_insights_client.operations.EventSourcesOperations
:ivar reference_data_sets: ReferenceDataSetsOperations operations
:vartype reference_data_sets: time_series_insights_client.operations.ReferenceDataSetsOperations
:ivar access_policies: AccessPoliciesOperations operations
:vartype access_policies: time_series_insights_client.operations.AccessPoliciesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Azure Subscription ID.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = TimeSeriesInsightsClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.environments = EnvironmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.event_sources = EventSourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.reference_data_sets = ReferenceDataSetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.access_policies = AccessPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
def METHOD_NAME(self):
# type: () -> None
self._client.METHOD_NAME()
def __enter__(self):
# type: () -> TimeSeriesInsightsClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details) |
5,135 | test load 1 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Gregory Bowman
# Contributors: Robert McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import tempfile
import unittest
import numpy as np
import tables
from mdtraj import io
from mdtraj.testing import eq
fd, temp = tempfile.mkstemp(suffix='.h5')
os.close(fd)
def teardown_module(module):
"""remove the temporary file created by tests in this file
this gets automatically called by pytest"""
os.unlink(temp)
def test_overwrite_1():
fid, fn = tempfile.mkstemp()
try:
a = np.arange(10)
b = a + 1
io.saveh(fn, a=a)
io.saveh(fn, b=b)
eq(io.loadh(fn, 'a'), a)
eq(io.loadh(fn, 'b'), b)
except:
raise
finally:
if os.path.exists(fn):
os.close(fid)
os.unlink(fn)
def test_overwrite_2():
fid, fn = tempfile.mkstemp()
try:
a = np.arange(10)
b = a + 1
io.saveh(fn, a=a)
io.saveh(fn, a=b)
eq(io.loadh(fn, 'a'), b)
except:
raise
finally:
if os.path.exists(fn):
os.close(fid)
os.unlink(fn)
class test_io(unittest.TestCase):
def setUp(self):
# setup() is called before very test and just creates a
# temporary work space for reading/writing files.
fid, self.filename1 = tempfile.mkstemp()
fid, self.filename2 = tempfile.mkstemp()
self.data = np.arange(10000, dtype=np.float32)
# Write Data to an HDF5 file as a compressed CArray.
hdf_file = tables.open_file(self.filename1, 'a')
hdf_file.create_carray("/", "arr_0", tables.Float32Atom(),
self.data.shape, filters=io.COMPRESSION)
hdf_file.root.arr_0[:] = self.data[:]
hdf_file.flush()
hdf_file.close()
def METHOD_NAME(self):
# Load by specifying array name
TestData = io.loadh(self.filename1, 'arr_0')
eq(TestData, self.data)
def test_load_2(self):
# load using deferred=False
TestData = io.loadh(self.filename1, deferred=False)['arr_0']
eq(TestData, self.data)
def test_load_3(self):
# load using deferred=True
deferred = io.loadh(self.filename1, deferred=True)
eq(deferred['arr_0'], self.data)
deferred.close()
def test_save(self):
# Save HDF5 to disk and load it back up
io.saveh(self.filename2, self.data)
TestData = io.loadh(self.filename2, 'arr_0')
eq(TestData, self.data)
def teardown(self):
os.remove(self.filename1)
os.remove(self.filename2)
class test_io_int(test_io):
"Run the same test as the class above, but using int64 data"
def setUp(self):
# setup() is called before very test and just creates
# a temporary work space for reading/writing files.
fid, self.filename1 = tempfile.mkstemp()
fid, self.filename2 = tempfile.mkstemp()
self.data = np.arange(10000, dtype=np.int64)
# Write Data to an HDF5 file as a compressed CArray.
hdf_file = tables.open_file(self.filename1, 'a')
hdf_file.create_carray("/", "arr_0", tables.Int64Atom(),
self.data.shape, filters=io.COMPRESSION)
hdf_file.root.arr_0[:] = self.data[:]
hdf_file.flush()
hdf_file.close()
def test_groups():
# Test to ensure that files are loaded correctly even if they contain
# nested groups and stuff
x = np.random.randn(10)
y = np.random.randn(11)
f = tables.open_file(temp, 'w')
f.create_group(where='/', name='mygroup')
f.create_array(where='/mygroup', name='myarray', obj=x)
f.create_array(where='/', name='mya2', obj=y)
f.close()
assert eq(io.loadh(temp)['mygroup/myarray'], x)
assert eq(io.loadh(temp)['mya2'], y)
assert eq(io.loadh(temp, deferred=False)['mygroup/myarray'], x)
assert eq(io.loadh(temp, deferred=False)['mya2'], y)
assert eq(io.loadh(temp, 'mygroup/myarray'), x)
assert eq(io.loadh(temp, 'mya2'), y) |
5,136 | test force token predictions | import pytest
import flair
from flair.embeddings import FlairEmbeddings, WordEmbeddings
from flair.models import SequenceTagger
from flair.trainers import ModelTrainer
from tests.model_test_utils import BaseModelTest
class TestSequenceTagger(BaseModelTest):
model_cls = SequenceTagger
pretrained_model = "ner-fast"
train_label_type = "ner"
training_args = {
"max_epochs": 2,
"learning_rate": 0.1,
"mini_batch_size": 2,
}
model_args = {
"hidden_size": 64,
"use_crf": False,
}
def has_embedding(self, sentence):
return all(token.get_embedding().cpu().numpy().size != 0 for token in sentence)
def build_model(self, embeddings, label_dict, **kwargs):
model_args = dict(self.model_args)
for k in kwargs:
if k in model_args:
del model_args[k]
return self.model_cls(
embeddings=embeddings,
tag_dictionary=label_dict,
tag_type=self.train_label_type,
**model_args,
**kwargs,
)
@pytest.fixture()
def embeddings(self):
return WordEmbeddings("turian")
@pytest.fixture()
def corpus(self, tasks_base_path):
return flair.datasets.ColumnCorpus(data_folder=tasks_base_path / "fashion", column_format={0: "text", 3: "ner"})
@pytest.mark.integration()
def test_all_tag_proba_embedding(self, example_sentence, loaded_pretrained_model):
loaded_pretrained_model.predict(example_sentence, return_probabilities_for_all_classes=True)
for token in example_sentence:
assert len(token.get_tags_proba_dist(loaded_pretrained_model.label_type)) == len(
loaded_pretrained_model.label_dictionary
)
score_sum = 0.0
for label in token.get_tags_proba_dist(loaded_pretrained_model.label_type):
assert label.data_point == token
score_sum += label.score
assert abs(score_sum - 1.0) < 1.0e-5
@pytest.mark.integration()
def METHOD_NAME(self, example_sentence, loaded_pretrained_model):
loaded_pretrained_model.predict(example_sentence, force_token_predictions=True)
assert example_sentence.get_token(3).text == "Berlin"
assert example_sentence.get_token(3).tag == "S-LOC"
@pytest.mark.integration()
def test_train_load_use_tagger_flair_embeddings(self, results_base_path, corpus, example_sentence):
tag_dictionary = corpus.make_label_dictionary("ner", add_unk=False)
model = self.build_model(FlairEmbeddings("news-forward-fast"), tag_dictionary)
trainer = ModelTrainer(model, corpus)
trainer.train(results_base_path, shuffle=False, **self.training_args)
del trainer, model, tag_dictionary, corpus
loaded_model = self.model_cls.load(results_base_path / "final-model.pt")
loaded_model.predict(example_sentence)
loaded_model.predict([example_sentence, self.empty_sentence])
loaded_model.predict([self.empty_sentence])
del loaded_model
@pytest.mark.integration()
def test_train_load_use_tagger_with_trainable_hidden_state(
self, embeddings, results_base_path, corpus, example_sentence
):
tag_dictionary = corpus.make_label_dictionary("ner", add_unk=False)
model = self.build_model(embeddings, tag_dictionary, train_initial_hidden_state=True)
trainer = ModelTrainer(model, corpus)
trainer.train(results_base_path, shuffle=False, **self.training_args)
del trainer, model, tag_dictionary, corpus
loaded_model = self.model_cls.load(results_base_path / "final-model.pt")
loaded_model.predict(example_sentence)
loaded_model.predict([example_sentence, self.empty_sentence])
loaded_model.predict([self.empty_sentence])
del loaded_model
@pytest.mark.integration()
def test_train_load_use_tagger_disjunct_tags(
self, results_base_path, tasks_base_path, embeddings, example_sentence
):
corpus = flair.datasets.ColumnCorpus(
data_folder=tasks_base_path / "fashion_disjunct",
column_format={0: "text", 3: "ner"},
)
tag_dictionary = corpus.make_label_dictionary("ner", add_unk=True)
model = self.build_model(embeddings, tag_dictionary, allow_unk_predictions=True)
trainer = ModelTrainer(model, corpus)
trainer.train(results_base_path, shuffle=False, **self.training_args)
del trainer, model, tag_dictionary, corpus
loaded_model = self.model_cls.load(results_base_path / "final-model.pt")
loaded_model.predict(example_sentence)
loaded_model.predict([example_sentence, self.empty_sentence])
loaded_model.predict([self.empty_sentence])
del loaded_model |
5,137 | setup repository modules | # Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
from typing import (
List, Dict
)
from kiwi.api_helper import decommissioned
from kiwi.command import command_call_type
from kiwi.system.root_bind import RootBind
from kiwi.repository.base import RepositoryBase
class PackageManagerBase:
"""
**Implements base class for Package Management**
:param object repository: instance of :class:`Repository`
:param str root_dir: root directory path name
:param list package_requests: list of packages to install or delete
:param list collection_requests: list of collections to install
:param list product_requests: list of products to install
"""
def __init__(
self, repository: RepositoryBase, custom_args: List = [],
release_version: str = ''
) -> None:
self.repository = repository
self.root_dir = repository.root_dir
self.package_requests: List[str] = []
self.collection_requests: List[str] = []
self.product_requests: List[str] = []
self.exclude_requests: List[str] = []
self.release_version = release_version or '0'
self.post_init(custom_args or [])
def post_init(self, custom_args: List = []) -> None:
"""
Post initialization method
Implementation in specialized package manager class
:param list custom_args: unused
"""
pass
def request_package(self, name: str) -> None:
"""
Queue a package request
Implementation in specialized package manager class
:param str name: unused
"""
raise NotImplementedError
def request_collection(self, name: str) -> None:
"""
Queue a package collection
Implementation in specialized package manager class
:param str name: unused
"""
raise NotImplementedError
def request_product(self, name: str) -> None:
"""
Queue a product request
Implementation in specialized package manager class
:param str name: unused
"""
raise NotImplementedError
@decommissioned
def request_package_lock(self, name: str) -> None:
pass # pragma: no cover
def request_package_exclusion(self, name: str) -> None:
"""
Queue a package exclusion(skip) request
Implementation in specialized package manager class
:param str name: unused
"""
raise NotImplementedError
def METHOD_NAME(
self, collection_modules: Dict[str, List[str]]
) -> None:
"""
Setup repository modules and streams
Implementation in specialized package manager class
:param dict collection_modules: unused
"""
raise NotImplementedError
def process_install_requests_bootstrap(
self, root_bind: RootBind = None, bootstrap_package: str = None
) -> command_call_type:
"""
Process package install requests for bootstrap phase (no chroot)
Implementation in specialized package manager class
"""
raise NotImplementedError
def process_install_requests(self) -> command_call_type:
"""
Process package install requests for image phase (chroot)
Implementation in specialized package manager class
"""
raise NotImplementedError
def process_delete_requests(self, force: bool = False) -> command_call_type:
"""
Process package delete requests (chroot)
Implementation in specialized package manager class
:param bool force: unused
"""
raise NotImplementedError
def update(self) -> command_call_type:
"""
Process package update requests (chroot)
Implementation in specialized package manager class
"""
raise NotImplementedError
def process_only_required(self) -> None:
"""
Setup package processing only for required packages
Implementation in specialized package manager class
"""
raise NotImplementedError
def process_plus_recommended(self) -> None:
"""
Setup package processing to also include recommended dependencies
Implementation in specialized package manager class
"""
raise NotImplementedError
def match_package_installed(
self, package_name: str, package_manager_output: str
) -> bool:
"""
Match expression to indicate a package has been installed
Implementation in specialized package manager class
:param str package_name: unused
:param str package_manager_output: unused
:return: True|False
:rtype: bool
"""
raise NotImplementedError
def match_package_deleted(
self, package_name: str, package_manager_output: str
) -> bool:
"""
Match expression to indicate a package has been deleted
Implementation in specialized package manager class
:param str package_name: unused
:param str package_manager_output: unused
:return: True|False
:rtype: bool
"""
raise NotImplementedError
@decommissioned
def database_consistent(self) -> None:
pass # pragma: no cover
@decommissioned
def dump_reload_package_database(self, version: int = 45) -> None:
pass # pragma: no cover
def post_process_install_requests_bootstrap(
self, root_bind: RootBind = None, delta_root: bool = False
) -> None:
"""
Process extra code required after bootstrapping
Implementation in specialized package manager class
"""
pass
def post_process_delete_requests(
self, root_bind: RootBind = None
) -> None:
"""
Process extra code required after deleting packages
Implementation in specialized package manager class
"""
pass
@staticmethod
def has_failed(returncode: int) -> bool:
"""
Evaluate given result return code
Any returncode != 0 is considered an error unless
overwritten in specialized package manager class
:param int returncode: return code number
:return: True|False
:rtype: boolean
"""
return True if returncode != 0 else False
def get_error_details(self) -> str:
"""
Provide further error details
In case the package manager call failed this
method will return package manager specific error
information if there is any
:return: further error data as str or empty str
:rtype: str
"""
return ''
def clean_leftovers(self) -> None:
"""
Cleans package manager related data not needed in the
resulting image such as custom macros
Implementation in specialized package manager class
"""
pass
def cleanup_requests(self) -> None:
"""
Cleanup request queues
"""
del self.package_requests[:]
del self.collection_requests[:]
del self.product_requests[:]
del self.exclude_requests[:] |
5,138 | ignore registration fields | """
Forms for events
"""
import operator
from django.forms import (
BooleanField,
ChoiceField,
DateField,
DateInput,
Form,
ModelForm,
ValidationError,
)
from django.forms.models import fields_for_model
from django.forms.widgets import RadioSelect
from .models import Event
class EventForm(ModelForm):
"""
Form to validate creation and editing of Event-instances in the admin interface
"""
has_queue = BooleanField(
initial=True,
required=False,
label="Har venteliste",
help_text=(
"Hvis arrangementet har venteliste, "
"går det ann å melde seg på selv etter at det er fullt. "
"Man havner da på venteliste, og blir automatisk meldt på om det blir ledig."
),
)
# Fields required when registration_required is set
required_registration_fields = (
"places",
"registration_deadline",
"has_queue",
"registration_start",
)
registration_fields = required_registration_fields + ("deregistration_deadline",)
# Restrict order of the DateTimeFields
datetime_restrictions = (
# ("field_to_validate", "before/after", "field_to_compare_with"),
("event_end", "after", "event_start"),
("registration_start", "before", "event_start"),
("registration_deadline", "before", "event_start"),
("registration_deadline", "after", "registration_start"),
("deregistration_deadline", "before", "event_start"),
("deregistration_deadline", "after", "registration_start"),
)
class Meta:
model = Event
fields = fields_for_model(Event)
def clean(self):
self._validate_datetime_order()
registration_required = self.cleaned_data.get("registration_required")
if registration_required:
self._assert_required_registration_fields_supplied()
else:
self.METHOD_NAME()
return self.cleaned_data
def _validate_datetime_order(self):
"""Check if the datetime fields have the correct order."""
comparison_dict = {
"after": (
operator.gt,
'"%(field1)s" må ikke være tidligere enn "%(field2)s".',
),
"before": (
operator.lt,
'"%(field1)s" må ikke være senere enn "%(field2)s".',
),
}
for field1, comparison, field2 in self.datetime_restrictions:
date1 = self.cleaned_data.get(field1)
date2 = self.cleaned_data.get(field2)
if not (date1 and date2):
continue
op, msg = comparison_dict.get(comparison)
if not op(date1, date2):
error = ValidationError(
msg,
params={
"field1": self.fields[field1].label,
"field2": self.fields[field2].label,
},
)
self.add_error(field1, error)
def _assert_required_registration_fields_supplied(self):
for field in self.required_registration_fields:
if self.cleaned_data.get(field) is None and (field not in self._errors):
error = ValidationError(
'Feltet "%(field)s" er påkrevd når %(field2)s er valgt.',
params={
"field": self.fields[field].label,
"field2": self.fields["registration_required"].label,
},
)
self.add_error(field, error)
def METHOD_NAME(self):
for name in self.registration_fields:
self.cleaned_data[name] = None
# Ignorer feil relatert til feltet.
if name in self._errors:
del self._errors[name]
class HTML5DateInput(DateInput):
"""Use type="date" in input tag,
because HTML5 is nice"""
input_type = "date"
class FilterEventsForm(Form):
"""Form to filter and sort events in EventMainPage"""
type = ChoiceField(
choices=[("", "Alle"), ("event", "Arrangement"), ("bedpres", "Bedpres")],
widget=RadioSelect(),
required=False,
)
type.widget.attrs.update({"class": "filter__option"})
type.widget.option_template_name = "events/radio_option.html"
start_time = DateField(widget=HTML5DateInput, required=False, label="Etter dato") |
5,139 | write raw | """
A mixin module for USB Human Interface Device instruments
"""
import os
import struct
import time
from typing import Any, Optional
try:
import pywinusb.hid as hid # pyright: ignore[reportMissingModuleSource]
imported_hid = True
except ImportError:
# We will raise a proper error when we attempt to instantiate a driver.
# Raising an exception here will cause CI to fail under Linux
imported_hid = False
from qcodes.instrument.base import Instrument
class USBHIDMixin(Instrument):
"""
Args:
instance_id: The id of the instrument we want to connect to. If
there is only one instrument, then this argument is optional.
If more than one instrument happen to be connected, use
`enumerate_devices` method to query their IDs
timeout: Specify a timeout for this instrument in seconds
"""
# The following class attributes should be set by subclasses
vendor_id = 0x0000
product_id = 0x0000
@staticmethod
def _check_hid_import() -> None:
if os.name != 'nt':
raise ImportError("This driver only works on Windows.")
if imported_hid is False:
raise ImportError(
"pywinusb is not installed. Please install it by typing "
"'pip install pywinusb' in a qcodes environment terminal"
)
def __init__(self, name: str, instance_id: Optional[str] = None,
timeout: float = 2,
**kwargs: Any):
self._check_hid_import()
devs = hid.HidDeviceFilter(
product_id=self.product_id,
vendor_id=self.vendor_id,
instance_id=instance_id
).get_devices()
if len(devs) == 0:
raise RuntimeError("No instruments found!")
elif len(devs) > 1:
raise RuntimeError("Multiple HID devices detected! Please supply "
"a instance id")
self._device = devs[0]
self._device.open()
self._data_buffer: Optional[bytes] = None
self._device.set_raw_data_handler(self._handler)
self._timeout = timeout
self._tries_per_second = 5
super().__init__(name, **kwargs)
def _handler(self, data: bytes) -> None:
self._data_buffer = data
def _get_data_buffer(self) -> Optional[bytes]:
data = self._data_buffer
self._data_buffer = None
return data
def _pack_string(self, cmd: str) -> bytes:
raise NotImplementedError("Please subclass")
def _unpack_string(self, response: bytes) -> str:
raise NotImplementedError("Please subclass")
def METHOD_NAME(self, cmd: str) -> None:
"""
Send a string command to the human interface device
The given command is processed by `_pack_string` method to return a
byte sequence that is going to be actually sent to the device.
Subclasses must implement `_pack_string` method.
Args:
cmd: a command to send in a form of a string
"""
data = self._pack_string(cmd)
result = self._device.send_output_report(data)
if not result:
raise RuntimeError(f"Communication with device failed for command "
f"{cmd}")
def ask_raw(self, cmd: str) -> str:
"""
Send a string command to the human interface device and wait for a reply
The given command is processed by `_pack_string` method to return a
byte sequence that is going to be actually sent to the device.
Subclasses must implement `_pack_string` method.
The byte sequence of the reply is processed by `_unpack_string`
method, and the resulting string is returned. Subclasses must
implement `_unpack_string` method.
Args:
cmd: a command to send in a form of a string
"""
self.METHOD_NAME(cmd)
number_of_tries = int(self._tries_per_second * self._timeout)
response = None
for _ in range(number_of_tries):
time.sleep(1 / self._tries_per_second)
response = self._get_data_buffer()
if response is not None:
break
if response is None:
raise TimeoutError(f"Timed out for command {cmd}")
return self._unpack_string(response)
def close(self) -> None:
self._device.close()
@classmethod
def enumerate_devices(cls) -> list[str]:
"""
This method returns the 'instance_id's of all connected devices for
with the given product and vendor IDs.
"""
cls._check_hid_import()
devs = hid.HidDeviceFilter(
porduct_id=cls.product_id,
vendor_id=cls.vendor_id
).get_devices()
return [dev.instance_id for dev in devs]
class MiniCircuitsHIDMixin(USBHIDMixin):
"""
The specific implementation for mini circuit human interface devices.
This implementation allows to use `write`/`ask` methods of the instrument
instance to send SCPI commands to MiniCircuits instruments over USB HID
connection.
Args:
name: instrument name
instance_id: The id of the instrument we want to connect. If there is
only one instrument then this is an optional argument. If we have
more then one instrument, use the class method
`enumerate_devices` to query their IDs
timeout: Specify a timeout for this instrument in seconds
"""
def __init__(self, name: str, instance_id: Optional[str] = None,
timeout: float = 2,
**kwargs: Any):
# USB interrupt code for sending SCPI commands
self._sending_scpi_cmds_code = 1
self._usb_endpoint = 0
self._end_of_message = b"\x00"
self.packet_size = 64
super().__init__(name, instance_id, timeout, **kwargs)
def _pack_string(self, cmd: str) -> bytes:
"""
Pack a string to a binary format such that it can be sent to the HID.
Args:
cmd: a SCPI command to send
"""
str_len = len(cmd)
# "-1" is here because we need to compensate for the first byte in
# the packet which is always the usb interrupt code of the command
# (in this case the command tell the device that we are querying a
# SCPI command)
pad_len = self.packet_size - str_len - 1
if pad_len < 0:
raise ValueError(f"Length of data exceeds {self.packet_size} B")
packed_data = struct.pack(
f"BB{str_len}s{pad_len}x",
self._usb_endpoint,
self._sending_scpi_cmds_code,
cmd.encode("ascii")
)
return packed_data
def _unpack_string(self, response: bytes) ->str:
"""
Unpack data received from the instrument into a string
Note that this method is not specific to SCPI-only responses.
Args:
response: a raw byte sequence response from the instrument
"""
_, _, reply_data = struct.unpack(
f"BB{self.packet_size - 1}s",
bytes(response)
)
span = reply_data.find(self._end_of_message)
return reply_data[:span].decode("ascii") |
5,140 | test cvm marked op stop not filter | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import time
import pytest
from tc_common import BaseTest
from test_tc_cvm import assert_instance_states
def get_tags(policy, resource):
r = dict(resource)
policy.resource_manager.source.get_resource_tag([r])
return {t['Key']: t['Value'] for t in r.get('Tags', ())}
class TestCvmTagAction(BaseTest):
@pytest.mark.vcr
def test_cvm_mark_op_stop(self):
policy = self.load_policy(
{
"name": "cvm-mark-for-op-stop",
"resource": "tencentcloud.cvm",
"query": [{
"InstanceIds": ["ins-00lycyy6"]
}],
"filters": [{"tag:maid_status": "absent"}],
"actions": [
{
"type": "mark-for-op",
"op": "stop",
"days": 14
}
]
},
)
resources = policy.run()
assert resources
if self.recording:
time.sleep(3)
assert 'maid_status' in get_tags(policy, resources.pop())
@pytest.mark.vcr
def METHOD_NAME(self):
policy = self.load_policy(
{
"name": "cvm-marked-for-op-stop",
"resource": "tencentcloud.cvm",
"query": [{
"InstanceIds": ["ins-00lycyy6"]
}],
"filters": [
{
"type": "marked-for-op",
"op": "stop",
"skew": 14
}, {
"not": [{
"type": "marked-for-op",
"op": "stop",
"skew": 14
}]
}
], "actions": [
{
"type": "stop"
}
]
},
)
resources = policy.run()
assert not resources
if self.recording:
time.sleep(10)
@pytest.mark.vcr
def test_cvm_marked_op_stop(self):
policy = self.load_policy(
{
"name": "cvm-marked-for-op-stop",
"resource": "tencentcloud.cvm",
"query": [{
"InstanceIds": ["ins-00lycyy6"]
}],
"filters": [
{
"type": "marked-for-op",
"op": "stop",
"skew": 14
}
], "actions": [
{
"type": "stop"
}
]
},
)
resources = policy.run()
assert resources[0]["InstanceState"] == "RUNNING"
if self.recording:
time.sleep(10)
assert_instance_states(policy, resources.pop(), ("STOPPING", "STOPPED"))
@pytest.mark.vcr
def test_cvm_mark_op_terminate_and_stop(self):
policy = self.load_policy(
{
"name": "cvm-mark-for-op-terminate",
"resource": "tencentcloud.cvm",
"query": [{
"InstanceIds": ["ins-nhhm5ppo"]
}],
"actions": [
{
"type": "mark-for-op",
"op": "terminate",
"days": 7
},
]
},
)
resources = policy.run()
tags = get_tags(policy, resources.pop())
assert 'maid_status' in tags
@pytest.mark.vcr
def test_add_tag(self):
policy = self.load_policy(
{
"name": "cvm-test-tag",
"resource": "tencentcloud.cvm",
"query": [{
"InstanceIds": ["ins-00lycyy6", "ins-nhhm5ppo"]
}],
"filters": [{"tag:tag_add_test_key_for_test": "absent"}],
"actions": [
{
"type": "tag",
"key": "tag_add_test_key_for_test",
"value": "tag_add_test_value_for_test"
}
]
},
)
resources = policy.run()
assert len(resources) == 2
if self.recording:
time.sleep(3)
rtags = get_tags(policy, resources[-1])
assert rtags.get('tag_add_test_key_for_test') == 'tag_add_test_value_for_test'
@pytest.mark.vcr
def test_modify_tag(self):
policy = self.load_policy(
{
"name": "cvm-test-rename-tag",
"resource": "tencentcloud.cvm",
"query": [{
"InstanceIds": ["ins-00lycyy6", "ins-nhhm5ppo"]
}],
"filters": [{"tag:tag_add_test_key_for_test": "present"}],
"actions": [
{
"type": "rename-tag",
"old_key": "tag_add_test_key_for_test",
"new_key": "tag_add_test_key_for_test_rename"
}
]
},
)
resources = policy.run()
assert len(resources) == 2
if self.recording:
time.sleep(10)
rtags = get_tags(policy, resources[0])
assert 'tag_add_test_key_for_test' not in rtags
assert 'tag_add_test_key_for_test_rename' in rtags
@pytest.mark.vcr
def test_remove_tag(self):
policy = self.load_policy(
{
"name": "cvm-test-remove-tag",
"resource": "tencentcloud.cvm",
"query": [{
"InstanceIds": ["ins-00lycyy6"]
}],
"filters": [{
"tag:tag_add_test_key_for_test_rename": "present"}],
"actions": [
{
"type": "remove-tag",
"tags": ["tag_add_test_key_for_test_rename"]
}
]
},
)
resources = policy.run()
assert resources
if self.recording:
time.sleep(3)
assert 'tag_add_test_key_for_test_rename' not in get_tags(policy, resources.pop()) |
5,141 | exact laplacian kernel | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility methods related to kernelized layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
def _to_matrix(u):
"""If input tensor is a vector (i.e., has rank 1), converts it to matrix."""
u_rank = len(u.shape)
if u_rank not in [1, 2]:
raise ValueError('The input tensor should have rank 1 or 2. Given rank: {}'
.format(u_rank))
if u_rank == 1:
return array_ops.expand_dims(u, 0)
return u
def _align_matrices(x, y):
"""Aligns x and y tensors to allow computations over pairs of their rows."""
x_matrix = _to_matrix(x)
y_matrix = _to_matrix(y)
x_shape = x_matrix.shape
y_shape = y_matrix.shape
if y_shape[1] != x_shape[1]: # dimensions do not match.
raise ValueError(
'The outermost dimensions of the input tensors should match. Given: {} '
'vs {}.'.format(y_shape[1], x_shape[1]))
x_tile = array_ops.tile(
array_ops.expand_dims(x_matrix, 1), [1, y_shape[0], 1])
y_tile = array_ops.tile(
array_ops.expand_dims(y_matrix, 0), [x_shape[0], 1, 1])
return x_tile, y_tile
def inner_product(u, v):
u = _to_matrix(u)
v = _to_matrix(v)
return math_ops.matmul(u, v, transpose_b=True)
def exact_gaussian_kernel(x, y, stddev):
r"""Computes exact Gaussian kernel value(s) for tensors x and y and stddev.
The Gaussian kernel for vectors u, v is defined as follows:
K(u, v) = exp(-||u-v||^2 / (2* stddev^2))
where the norm is the l2-norm. x, y can be either vectors or matrices. If they
are vectors, they must have the same dimension. If they are matrices, they
must have the same number of columns. In the latter case, the method returns
(as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and
v is a row from y.
Args:
x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].
y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].
stddev: The width of the Gaussian kernel.
Returns:
A single value (scalar) with shape (1, 1) (if x, y are vectors) or a matrix
of shape (m, n) with entries K(u, v) (where K is the Gaussian kernel) for
all (u,v) pairs where u, v are rows from x and y respectively.
Raises:
ValueError: if the shapes of x, y are not compatible.
"""
x_aligned, y_aligned = _align_matrices(x, y)
diff_squared_l2_norm = math_ops.reduce_sum(
math_ops.squared_difference(x_aligned, y_aligned), 2)
return math_ops.exp(-diff_squared_l2_norm / (2 * stddev * stddev))
def METHOD_NAME(x, y, stddev):
r"""Computes exact Laplacian kernel value(s) for tensors x and y using stddev.
The Laplacian kernel for vectors u, v is defined as follows:
K(u, v) = exp(-||u-v|| / stddev)
where the norm is the l1-norm. x, y can be either vectors or matrices. If they
are vectors, they must have the same dimension. If they are matrices, they
must have the same number of columns. In the latter case, the method returns
(as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and
v is a row from y.
Args:
x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].
y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].
stddev: The width of the Gaussian kernel.
Returns:
A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix
of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for
all (u,v) pairs where u, v are rows from x and y respectively.
Raises:
ValueError: if the shapes of x, y are not compatible.
"""
x_aligned, y_aligned = _align_matrices(x, y)
diff_l1_norm = math_ops.reduce_sum(
math_ops.abs(math_ops.subtract(x_aligned, y_aligned)), 2)
return math_ops.exp(-diff_l1_norm / stddev) |
5,142 | check policy exists | from core.providers.aws.boto3 import prepare_aws_client_with_given_cred
from core.providers.aws.boto3 import prepare_aws_resource_with_given_cred
import boto3
def get_iam_client(aws_auth_cred):
"""
Returns the client object for AWS IAM
Args:
aws_auth_cred (dict): AWS Auth details with region information
Returns:
obj: AWS IAM Object
"""
return prepare_aws_client_with_given_cred('iam', aws_auth_cred)
def get_iam_resource(aws_auth_cred):
"""
Returns the Resource client object for AWS IAM
Args:
aws_auth_cred (dict): AWS Auth details with region information
Returns:
obj: AWS IAM Resource Object
"""
return prepare_aws_resource_with_given_cred('iam', aws_auth_cred)
def get_user_name(aws_auth_cred):
"""
Returns the username of the given user credentails
Args:
aws_auth_cred (dict): AWS Auth details with region information
Returns:
user_name (str): AWS IAM User name
"""
iam = get_iam_resource(aws_auth_cred)
user_name = iam.CurrentUser().user_name
return user_name
def get_current_user(aws_auth_cred):
"""
Returns the user detials of the given user credentails
Args:
aws_auth_cred (dict): AWS Auth details with region information
Returns:
user (obj): AWS IAM User
"""
iam = get_iam_resource(aws_auth_cred)
return iam.CurrentUser()
def get_aws_account_user(aws_auth_cred):
"""
Returns the user details of the current user
Args:
aws_auth_cred (dict): AWS Auth details with region information
Returns:
obj: AWS IAM User
"""
return get_iam_resource.CurrentUser()
def get_iam_user_policy_names(user_name, aws_auth_cred):
"""
Returns the policy names of the current user has
Args:
aws_auth_cred (dict): AWS Auth details with region information
user_name (str): AWS user name
Returns:
policy_names (list): List of policy names the current user has
"""
iam_client = get_iam_client(aws_auth_cred)
attached_policies = iam_client.list_attached_user_policies(UserName=user_name)['AttachedPolicies']
attached_policy_names = [policy['PolicyName'] for policy in attached_policies]
user_policy_names = iam_client.list_user_policies(UserName=user_name)['PolicyNames']
return attached_policy_names + user_policy_names
def get_group_managed_policy_names(iam_client, groups):
"""
Returns the group managed policy names of the current user
Args:
iam_client (obj): IAM client obj
groups (list): User groups
Returns:
policy_names (list): List of group managed policy names the current user has
"""
policy_names = []
for group in groups:
attached_policies = iam_client.list_attached_group_policies(GroupName=group['GroupName'])['AttachedPolicies']
policy_names += [policy['PolicyName'] for policy in attached_policies]
return policy_names
def get_group_policy_names(iam_client, groups):
"""
Returns the group policy names of the current user
Args:
iam_client (obj): IAM client obj
groups (list): User groups
Returns:
policy_names (list): List of group policy names the current user has
"""
policy_names = []
for group in groups:
group_policy_names = iam_client.list_group_policies(GroupName=group['GroupName'])['PolicyNames']
policy_names += group_policy_names
return policy_names
def get_user_group_policy_names(user_name, aws_auth_cred):
"""
Returns all group user policies of a user
Args:
aws_auth_cred (dict): AWS Auth details with region information
user_name (str): AWS user name
Returns:
policy_names (list): List of all goup policy names the current user has
"""
iam_client = get_iam_client(aws_auth_cred)
groups = iam_client.list_groups_for_user(UserName=user_name)['Groups']
group_managed_policy_names = get_group_managed_policy_names(iam_client, groups)
group_policy_names = get_group_policy_names(iam_client, groups)
return group_managed_policy_names + group_policy_names
def get_all_policy_names(aws_auth_cred):
"""
Returns all group and user policies of a user
Args:
aws_auth_cred (dict): AWS Auth details with region information
Returns:
policy_names (list): List of all goup policy names and user policy names the current user has
"""
iam = get_iam_resource(aws_auth_cred)
user_name = iam.CurrentUser().user_name
user_policy_names = get_iam_user_policy_names(user_name, aws_auth_cred)
user_group_policy_names = get_user_group_policy_names(user_name, aws_auth_cred)
return user_policy_names + user_group_policy_names
def get_role_policy_names(role_name, aws_auth_cred):
"""
Returns all policies under the given IAM role
Args:
aws_auth_cred (dict): AWS Auth details with region information
role_name (str): Role name
Returns:
policy_names (list): List of all goup policy names and user policy names the current user has
"""
iam_client = get_iam_client(aws_auth_cred)
attached_policies = iam_client.list_attached_role_policies(RoleName=role_name)['AttachedPolicies']
return [policy['PolicyName'] for policy in attached_policies]
def create_iam_service_linked_role(service_name, desc, aws_auth_cred):
"""
Create AWS RoleNameES service linked role
Args:
aws_auth_cred (dict): AWS Auth details with region information
service_name (str): Service name
desc (str): Descsription
Returns:
Set: True if created else false with error
"""
role_name = "AWSServiceRoleForAmazonElasticsearchService"
iam_client = get_iam_client(aws_auth_cred)
try:
iam_client.create_service_linked_role(
AWSServiceName=service_name,
Description=desc
)
return True, None
except Exception as e:
return False, str(e)
def check_role_exists(role_name, aws_auth_cred):
"""
Check wheter the given IAM role already exists in the AWS Account
Args:
role_name (str): Role name
aws_auth_cred (dict): AWS Auth details with region information
Returns:
Boolean: True if env exists else False
"""
iam_client = get_iam_client(aws_auth_cred)
try:
role = iam_client.get_role(RoleName=role_name)
return True if role else False
except:
return False
def METHOD_NAME(policy_name, account_id, aws_auth_cred):
"""
Check wheter the given IAM policy already exists in the AWS Account
Args:
policy_name (str): Policy name
aws_auth_cred (dict): AWS Auth details with region information
Returns:
Boolean: True if env exists else False
"""
iam_client = get_iam_client(aws_auth_cred)
policy_arn = "arn:aws:iam::%s:policy/%s" % (str(account_id), policy_name)
try:
policy = iam_client.get_policy(PolicyArn=policy_arn)
return True if policy else False
except:
return False
def check_instance_profile_exists(instance_profile_name, aws_auth_cred):
"""
Check wheter the given IAM instance profile already exists in the AWS Account
Args:
instance_profile_name (str): Instance profile name
aws_auth_cred (dict): AWS Auth details with region information
Returns:
Boolean: True if env exists else False
"""
iam_client = get_iam_client(aws_auth_cred)
try:
profile = iam_client.get_instance_profile(InstanceProfileName=instance_profile_name)
return True if profile else False
except:
return False |
5,143 | test c1 projections | # coding: utf-8
import numpy as np
import pytest
from mpi4py import MPI
from sympde.topology.analytical_mapping import PolarMapping
from psydac.polar.c1_projections import C1Projector
from psydac.mapping.discrete import SplineMapping
from psydac.linalg.stencil import StencilVector, StencilMatrix
from psydac.fem.splines import SplineSpace
from psydac.fem.tensor import TensorFemSpace
from psydac.ddm.cart import DomainDecomposition
#==============================================================================
@pytest.mark.parametrize('degrees', [(2, 2), (2, 3), (3,2), (3, 3)])
@pytest.mark.parametrize('ncells' , [(9, 11), (10, 12), (12, 14)])
def METHOD_NAME(degrees, ncells, verbose=False):
if verbose:
np.set_printoptions(precision=2, linewidth=200)
#--------------------------------------------
# Setup
#--------------------------------------------
# Geometry: logical domain and mapping
lims_1 = (0, 1)
lims_2 = (0, 2*np.pi)
period_1 = False
period_2 = True
map_analytic = PolarMapping('M', dim=2, rmin=0, rmax=1, c1=0.0, c2=0.0)
# Discretization: number of elements and polynomial degree
ne1, ne2 = ncells
p1 , p2 = degrees
#--------------------------------------------
# Spline space and C1 projector
#--------------------------------------------
# Uniform grid in logical space
grid_1 = np.linspace(*lims_1, num=ne1+1)
grid_2 = np.linspace(*lims_2, num=ne2+1)
# 1D finite element spaces
V1 = SplineSpace(p1, grid=grid_1, periodic=period_1)
V2 = SplineSpace(p2, grid=grid_2, periodic=period_2)
domain_decomposition = DomainDecomposition(ncells, periods=[period_1, period_2], comm=MPI.COMM_WORLD)
# 2D tensor-product space
V = TensorFemSpace(domain_decomposition, V1, V2)
# Spline mapping
map_discrete = SplineMapping.from_mapping(V, map_analytic.get_callable_mapping())
# C1 projector
proj = C1Projector(map_discrete)
#--------------------------------------------
# Linear algebra objects
#--------------------------------------------
# Matrix and vector in tensor-product basis
A = StencilMatrix(V.vector_space, V.vector_space)
b = StencilVector(V.vector_space)
# Set values of matrix
A[:, :, 0, 0] = 4
A[:, :, 0,-1] = -1
A[:, :, 0,+1] = -1
A[:, :,-1, 0] = -2
A[:, :,+1, 0] = -2
# Add (symmetric) random perturbation to matrix
s1, s2 = V.vector_space.starts
e1, e2 = V.vector_space.ends
n1, n2 = A.domain.npts
perturbation = 0.1 * np.random.random((e1-s1+1, e2-s2+1, p1, p2))
for i1 in range(s1, e1+1):
for i2 in range(s2, e2+1):
for k1 in range(1, p1):
for k2 in range(1, p2):
j1 = (i1+k1) % n1
j2 = (i2+k2) % n2
eps = perturbation[i1-s1, i2-s2, k1, k2]
A[i1,i2, k1, k2] += eps
A[j1,j2,-k1,-k2] += eps
A.remove_spurious_entries()
if verbose:
print("A:")
print(A.toarray())
print()
# Set values of vector
s1, s2 = b.starts
e1, e2 = b.ends
b[s1:e1+1, s2:e2+1] = np.random.random((e1-s1+1, e2-s2+1))
b.update_ghost_regions()
if verbose:
print("b:")
print(b.toarray().reshape(b.space.npts))
print()
#--------------------------------------------
# Test all methods of C1 projector
#--------------------------------------------
# Compute A' = E^T A E
# Compute b' = E b
Ap = proj.change_matrix_basis(A)
bp = proj.change_rhs_basis (b)
# Compute (E^T A E) b' = A' b'
Ap_bp = Ap.dot(bp)
# Compute E^T (A (E b')) = A' b'
E_bp = proj.convert_to_tensor_basis(bp)
A_E_bp = A.dot(E_bp)
Et_A_E_bp = proj.change_rhs_basis(A_E_bp)
if verbose:
print("(E^T A E) b' :")
print(Ap_bp[0].toarray())
print(Ap_bp[1].toarray().reshape(Ap_bp[1].space.npts))
print()
print("E^T (A (E b')) :" )
print(Et_A_E_bp[0].toarray() )
print(Et_A_E_bp[1].toarray().reshape(Et_A_E_bp[1].space.npts))
print()
# Verity that two results are identical
kwargs = {'rtol': 1e-10, 'atol': 1e-10}
assert np.allclose(Ap_bp[0].toarray(), Et_A_E_bp[0].toarray(), **kwargs)
assert np.allclose(Ap_bp[1].toarray(), Et_A_E_bp[1].toarray(), **kwargs)
# Verify that matrix-vector product can be computed correctly twice
# (necessary for Krylov solvers)
Ap_bp_array = Ap.toarray() @ bp.toarray()
assert np.allclose(Ap_bp_array, Ap_bp.toarray(), **kwargs)
assert np.allclose(Ap_bp_array, Ap.dot(bp).toarray(), **kwargs)
if verbose:
print("PASSED")
return locals()
#==============================================================================
if __name__ == "__main__":
namespace = METHOD_NAME(degrees=(2, 2), ncells=(4, 5), verbose=True) |
5,144 | test spelling | import gc
import os
import tempfile
from clang.cindex import CursorKind
from clang.cindex import Cursor
from clang.cindex import File
from clang.cindex import Index
from clang.cindex import SourceLocation
from clang.cindex import SourceRange
from clang.cindex import TranslationUnitSaveError
from clang.cindex import TranslationUnitLoadError
from clang.cindex import TranslationUnit
from .util import get_cursor
from .util import get_tu
kInputsDir = os.path.join(os.path.dirname(__file__), 'INPUTS')
def METHOD_NAME():
path = os.path.join(kInputsDir, 'hello.cpp')
tu = TranslationUnit.from_source(path)
assert tu.spelling == path
def test_cursor():
path = os.path.join(kInputsDir, 'hello.cpp')
tu = get_tu(path)
c = tu.cursor
assert isinstance(c, Cursor)
assert c.kind is CursorKind.TRANSLATION_UNIT
def test_parse_arguments():
path = os.path.join(kInputsDir, 'parse_arguments.c')
tu = TranslationUnit.from_source(path, ['-DDECL_ONE=hello', '-DDECL_TWO=hi'])
spellings = [c.spelling for c in tu.cursor.get_children()]
assert spellings[-2] == 'hello'
assert spellings[-1] == 'hi'
def test_reparse_arguments():
path = os.path.join(kInputsDir, 'parse_arguments.c')
tu = TranslationUnit.from_source(path, ['-DDECL_ONE=hello', '-DDECL_TWO=hi'])
tu.reparse()
spellings = [c.spelling for c in tu.cursor.get_children()]
assert spellings[-2] == 'hello'
assert spellings[-1] == 'hi'
def test_unsaved_files():
tu = TranslationUnit.from_source('fake.c', ['-I./'], unsaved_files = [
('fake.c', """
#include "fake.h"
int x;
int SOME_DEFINE;
"""),
('./fake.h', """
#define SOME_DEFINE y
""")
])
spellings = [c.spelling for c in tu.cursor.get_children()]
assert spellings[-2] == 'x'
assert spellings[-1] == 'y'
def test_unsaved_files_2():
import StringIO
tu = TranslationUnit.from_source('fake.c', unsaved_files = [
('fake.c', StringIO.StringIO('int x;'))])
spellings = [c.spelling for c in tu.cursor.get_children()]
assert spellings[-1] == 'x'
def normpaths_equal(path1, path2):
""" Compares two paths for equality after normalizing them with
os.path.normpath
"""
return os.path.normpath(path1) == os.path.normpath(path2)
def test_includes():
def eq(expected, actual):
if not actual.is_input_file:
return normpaths_equal(expected[0], actual.source.name) and \
normpaths_equal(expected[1], actual.include.name)
else:
return normpaths_equal(expected[1], actual.include.name)
src = os.path.join(kInputsDir, 'include.cpp')
h1 = os.path.join(kInputsDir, "header1.h")
h2 = os.path.join(kInputsDir, "header2.h")
h3 = os.path.join(kInputsDir, "header3.h")
inc = [(src, h1), (h1, h3), (src, h2), (h2, h3)]
tu = TranslationUnit.from_source(src)
for i in zip(inc, tu.get_includes()):
assert eq(i[0], i[1])
def save_tu(tu):
"""Convenience API to save a TranslationUnit to a file.
Returns the filename it was saved to.
"""
_, path = tempfile.mkstemp()
tu.save(path)
return path
def test_save():
"""Ensure TranslationUnit.save() works."""
tu = get_tu('int foo();')
path = save_tu(tu)
assert os.path.exists(path)
assert os.path.getsize(path) > 0
os.unlink(path)
def test_save_translation_errors():
"""Ensure that saving to an invalid directory raises."""
tu = get_tu('int foo();')
path = '/does/not/exist/llvm-test.ast'
assert not os.path.exists(os.path.dirname(path))
try:
tu.save(path)
assert False
except TranslationUnitSaveError as ex:
expected = TranslationUnitSaveError.ERROR_UNKNOWN
assert ex.save_error == expected
def test_load():
"""Ensure TranslationUnits can be constructed from saved files."""
tu = get_tu('int foo();')
assert len(tu.diagnostics) == 0
path = save_tu(tu)
assert os.path.exists(path)
assert os.path.getsize(path) > 0
tu2 = TranslationUnit.from_ast_file(filename=path)
assert len(tu2.diagnostics) == 0
foo = get_cursor(tu2, 'foo')
assert foo is not None
# Just in case there is an open file descriptor somewhere.
del tu2
os.unlink(path)
def test_index_parse():
path = os.path.join(kInputsDir, 'hello.cpp')
index = Index.create()
tu = index.parse(path)
assert isinstance(tu, TranslationUnit)
def test_get_file():
"""Ensure tu.get_file() works appropriately."""
tu = get_tu('int foo();')
f = tu.get_file('t.c')
assert isinstance(f, File)
assert f.name == 't.c'
try:
f = tu.get_file('foobar.cpp')
except:
pass
else:
assert False
def test_get_source_location():
"""Ensure tu.get_source_location() works."""
tu = get_tu('int foo();')
location = tu.get_location('t.c', 2)
assert isinstance(location, SourceLocation)
assert location.offset == 2
assert location.file.name == 't.c'
location = tu.get_location('t.c', (1, 3))
assert isinstance(location, SourceLocation)
assert location.line == 1
assert location.column == 3
assert location.file.name == 't.c'
def test_get_source_range():
"""Ensure tu.get_source_range() works."""
tu = get_tu('int foo();')
r = tu.get_extent('t.c', (1,4))
assert isinstance(r, SourceRange)
assert r.start.offset == 1
assert r.end.offset == 4
assert r.start.file.name == 't.c'
assert r.end.file.name == 't.c'
r = tu.get_extent('t.c', ((1,2), (1,3)))
assert isinstance(r, SourceRange)
assert r.start.line == 1
assert r.start.column == 2
assert r.end.line == 1
assert r.end.column == 3
assert r.start.file.name == 't.c'
assert r.end.file.name == 't.c'
start = tu.get_location('t.c', 0)
end = tu.get_location('t.c', 5)
r = tu.get_extent('t.c', (start, end))
assert isinstance(r, SourceRange)
assert r.start.offset == 0
assert r.end.offset == 5
assert r.start.file.name == 't.c'
assert r.end.file.name == 't.c'
def test_get_tokens_gc():
"""Ensures get_tokens() works properly with garbage collection."""
tu = get_tu('int foo();')
r = tu.get_extent('t.c', (0, 10))
tokens = list(tu.get_tokens(extent=r))
assert tokens[0].spelling == 'int'
gc.collect()
assert tokens[0].spelling == 'int'
del tokens[1]
gc.collect()
assert tokens[0].spelling == 'int'
# May trigger segfault if we don't do our job properly.
del tokens
gc.collect()
gc.collect() # Just in case.
def test_fail_from_source():
path = os.path.join(kInputsDir, 'non-existent.cpp')
try:
tu = TranslationUnit.from_source(path)
except TranslationUnitLoadError:
tu = None
assert tu == None
def test_fail_from_ast_file():
path = os.path.join(kInputsDir, 'non-existent.ast')
try:
tu = TranslationUnit.from_ast_file(path)
except TranslationUnitLoadError:
tu = None
assert tu == None |
5,145 | drop table from database | from __future__ import annotations
import logging
from pathlib import Path
from typing import TYPE_CHECKING
logger = logging.getLogger(__name__)
# https://stackoverflow.com/questions/39740632/python-type-hinting-without-cyclic-imports
if TYPE_CHECKING:
from .linker import Linker
class SplinkDataFrame:
"""Abstraction over dataframe to handle basic operations like retrieving data and
retrieving column names, which need different implementations depending on whether
it's a spark dataframe, sqlite table etc.
Uses methods like `as_pandas_dataframe()` and `as_record_dict()` to retrieve data
"""
def __init__(self, templated_name: str, physical_name: str, linker: Linker):
self.templated_name = templated_name
self.physical_name = physical_name
self.linker = linker
self._target_schema = "splink"
self.created_by_splink = False
self.sql_used_to_create = None
@property
def columns(self):
pass
@property
def columns_escaped(self):
cols = self.columns
return [c.name() for c in cols]
def validate():
pass
@property
def physical_and_template_names_equal(self):
return self.templated_name == self.physical_name
def _check_drop_table_created_by_splink(self, force_non_splink_table=False):
if not self.created_by_splink:
if not force_non_splink_table:
raise ValueError(
f"You've asked to drop table {self.physical_name} from your "
"database which is not a table created by Splink. If you really "
"want to drop this table, you can do so by setting "
"force_non_splink_table=True"
)
logger.debug(
f"Dropping table with templated name {self.templated_name} and "
f"physical name {self.physical_name}"
)
def METHOD_NAME(self, force_non_splink_table=False):
raise NotImplementedError(
"_drop_table_from_database from database not " "implemented for this linker"
)
def drop_table_from_database_and_remove_from_cache(
self, force_non_splink_table=False
):
"""Drops the table from the underlying database, and removes it
from the (linker) cache.
By default this will fail if the table is not one created by Splink,
but this check can be overriden
Examples:
```py
df_predict = linker.predict()
df_predict.drop_table_from_database_and_remove_from_cache()
# predictions table no longer in the database / cache
```
Args:
force_non_splink_table (bool, optional): If True, skip check if the
table was created by Splink and always drop regardless. If False,
only drop if table was created by Splink. Defaults to False.
"""
self.METHOD_NAME(force_non_splink_table=force_non_splink_table)
self.linker._remove_splinkdataframe_from_cache(self)
def as_record_dict(self, limit=None):
"""Return the dataframe as a list of record dictionaries.
This can be computationally expensive if the dataframe is large.
Examples:
```py
df_predict = linker.predict()
ten_edges = df_predict.as_record_dict(10)
```
Args:
limit (int, optional): If provided, return this number of rows (equivalent
to a limit statement in SQL). Defaults to None, meaning return all rows
Returns:
list: a list of records, each of which is a dictionary
"""
raise NotImplementedError("as_record_dict not implemented for this linker")
def as_pandas_dataframe(self, limit=None):
"""Return the dataframe as a pandas dataframe.
This can be computationally expensive if the dataframe is large.
Args:
limit (int, optional): If provided, return this number of rows (equivalent
to a limit statement in SQL). Defaults to None, meaning return all rows
Examples:
```py
df_predict = linker.predict()
df_ten_edges = df_predict.as_pandas_dataframe(10)
```
Returns:
pandas.DataFrame: pandas Dataframe
"""
import pandas as pd
return pd.DataFrame(self.as_record_dict(limit=limit))
def _repr_pretty_(self, p, cycle):
msg = (
f"Table name in database: `{self.physical_name}`\n"
"\nTo retrieve records, you can call the following methods on this object:"
"\n`.as_record_dict(limit=5)` or "
"`.as_pandas_dataframe(limit=5)`.\n"
"\nYou may omit the `limit` argument to return all records."
"\n\nThis table represents the following splink entity: "
f"{self.templated_name}"
)
p.text(msg)
def to_parquet(self, filepath, overwrite=False):
"""Save the dataframe in parquet format.
Examples:
```py
df_predict = linker.predict()
df_predict.to_parquet("model_predictions.parquet", overwrite=True)
```
Args:
filepath (str): Filepath where csv will be saved.
overwrite (bool, optional): If True, overwrites file if it already exists.
Default is False.
"""
raise NotImplementedError("`to_parquet` not implemented for this linker")
def to_csv(self, filepath, overwrite=False):
"""Save the dataframe in csv format.
Examples:
```py
df_predict = linker.predict()
df_predict.to_csv("model_predictions.csv", overwrite=True)
```
Args:
filepath (str): Filepath where csv will be saved.
overwrite (bool, optional): If True, overwrites file if it already exists.
Default is False.
"""
raise NotImplementedError("`to_csv` not implemented for this linker")
def check_file_exists(self, filepath):
p = Path(filepath)
if p.exists():
raise FileExistsError(
"The filepath you've supplied already exists. Please use "
"either `overwrite = True` or manually move or delete the "
"existing file."
) |
5,146 | extract buck binary | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# @starlark-rust: allow_string_literals_in_type_expr
"""
WARNING: you probably don't actually want this
extract.bzl exists for very stripped down environments (for example, building
an initrd) that need a binary (most likely from an RPM) and its library
dependencies. In almost every case _other_ than building an initrd, you
either want `feature.rpms_install` or `feature.install_buck_runnable`
If you're still here, `extract.extract` works by parsing the ELF information
in the given binaries.
It then clones the binaries and any .so's they depend on from the source
layer into the destination layer. The actual clone is very unergonomic at
this point, and it is recommended to batch all binaries to be extracted into
a single call to `extract.extract`.
This new-and-improved version of extract is capable of extracting buck-built
binaries without first installing them into a layer.
"""
load("//antlir/antlir2/bzl:debuginfo.bzl", "SplitBinaryInfo", "split_binary_anon")
load("//antlir/antlir2/bzl:macro_dep.bzl", "antlir2_dep")
load("//antlir/antlir2/bzl:types.bzl", "LayerInfo")
load("//antlir/buck2/bzl:ensure_single_output.bzl", "ensure_single_output")
load("//antlir/bzl:constants.bzl", "REPO_CFG")
load(":dependency_layer_info.bzl", "layer_dep", "layer_dep_analyze")
load(":feature_info.bzl", "FeatureAnalysis", "ParseTimeDependency", "ParseTimeFeature")
def extract_from_layer(
layer: str | Select,
binaries: list[str | Select] | Select) -> ParseTimeFeature.type:
"""
Extract binaries that are installed into `layer`, most commonly by RPMs.
This copies the binary as well as any `.so` dependencies that `ld.so --list`
reports. All the dependencies are copied from within `layer`. Any conflicts
(same path, different file hash) caused by the extractor will result in a
build error.
"""
return ParseTimeFeature(
feature_type = "extract",
impl = antlir2_dep("features:extract"),
deps = {
"layer": ParseTimeDependency(dep = layer, providers = [LayerInfo]),
},
kwargs = {
"binaries": binaries,
"source": "layer",
},
analyze_uses_context = True,
)
def METHOD_NAME(
src: str | Select,
dst: str | Select,
strip: bool | Select = True) -> ParseTimeFeature.type:
"""
Extract a binary built by buck into the target layer.
The `.so` dependencies in this case will be copied from the host filesystem,
but the same conflict detection method as `extract_from_layer` is employed.
"""
return ParseTimeFeature(
feature_type = "extract",
impl = antlir2_dep("features:extract"),
# include in deps so we can look at the providers
deps = {"src": ParseTimeDependency(dep = src, providers = [RunInfo])},
kwargs = {
"dst": dst,
"source": "buck",
"strip": strip,
},
analyze_uses_context = True,
)
extract_buck_record = record(
src = Artifact,
dst = str,
)
extract_layer_record = record(
layer = layer_dep.type,
binaries = list[str],
)
extract_record = record(
buck = [extract_buck_record.type, None],
layer = [extract_layer_record.type, None],
)
def extract_analyze(
ctx: "AnalyzeFeatureContext",
source: str,
deps: dict[str, Dependency],
binaries: list[str] | None = None,
src: str | None = None,
dst: str | None = None,
strip: bool | None = None) -> FeatureAnalysis.type:
if source == "layer":
layer = deps["layer"]
return FeatureAnalysis(
feature_type = "extract",
data = extract_record(
layer = extract_layer_record(
layer = layer_dep_analyze(layer),
binaries = binaries,
),
buck = None,
),
required_layers = [layer[LayerInfo]],
)
elif source == "buck":
src = deps["src"]
if RunInfo not in src:
fail("'{}' does not appear to be a binary".format(src))
src_runinfo = src[RunInfo]
# Only strip if both strip=True and we're in opt mode (standalone binaries)
if strip and not REPO_CFG.artifacts_require_repo:
split_anon_target = split_binary_anon(
ctx = ctx,
src = src,
objcopy = ctx.tools.objcopy,
)
src = ctx.actions.artifact_promise(split_anon_target.map(lambda x: x[SplitBinaryInfo].stripped))
else:
src = ensure_single_output(src)
return FeatureAnalysis(
feature_type = "extract",
data = extract_record(
buck = extract_buck_record(
src = src,
dst = dst,
),
layer = None,
),
required_artifacts = [src],
required_run_infos = [src_runinfo],
)
else:
fail("invalid extract source '{}'".format(source)) |
5,147 | get subparser | """
The DSL is used for RPC and resource definition. The syntax of the DSL is same
as UNIX shell.
>>> req = parse("search jaychou -s=xx")
>>> req.cmd
'search'
>>> req.cmd_args
['jaychou']
"""
import argparse
import shlex
import itertools
from typing import Optional
from feeluown.argparser import (
create_fmt_parser,
add_common_cmds,
add_server_cmds,
)
from feeluown.server.data_structure import Request
from feeluown.server.excs import FuoSyntaxError
def tokenize(source):
s = shlex.shlex(source, punctuation_chars=True, posix=True)
s.whitespace_split = True
try:
tokens = list(s)
except ValueError as e:
raise FuoSyntaxError(str(e)) from None
else:
if s.lineno >= 2:
raise FuoSyntaxError("source must be only one line")
return tokens
class ArgumentParserNoExitAndPrint(argparse.ArgumentParser):
"""
This customized argument parser class is design to handle two scenario
1. When there is an error, the parser should not exit. So the error method is
overrided.
2. When `help` action is executed, the parser should not exit and it should
not print help message to stdout/stderr either. So the `_print_message` and
`exit` method are overrided.
"""
def _print_message(self, message, file=None): # noqa
pass
def exit(self, status=0, message=None): # noqa
pass
def error(self, message):
raise FuoSyntaxError(message)
def create_dsl_parser():
# pylint: disable=protected-access
parser = ArgumentParserNoExitAndPrint(add_help=False)
subparsers = parser.add_subparsers(
dest='cmd',
)
add_common_cmds(subparsers)
add_server_cmds(subparsers)
return parser
class Parser:
def __init__(self, source):
self._source = source
def parse(self) -> Request:
"""Parse the source to a Request object.
argparse have little public methods, so some protected methods are used.
"""
# pylint: disable=too-many-locals,protected-access,too-many-branches
parser: ArgumentParserNoExitAndPrint = create_dsl_parser()
tokens = tokenize(self._source)
# Handle io_here token.
has_heredoc, heredoc_word = False, None
for i, token in enumerate(tokens.copy()):
if token == '<<': # Heredoc document.
has_heredoc = True
try:
heredoc_word = tokens.pop(i+1)
except IndexError:
raise FuoSyntaxError('no heredoc word') from None
else:
tokens.pop(i) # Pop token '<<' from tokens.
elif token in ('<', '<<<'):
raise FuoSyntaxError('unknown token')
# Parse the tokens.
args, remain = parser.parse_known_args(tokens)
if remain:
raise FuoSyntaxError(f'unknown tokens {tokens}')
# Get cmdname from the parse result.
cmdname = getattr(args, 'cmd')
subparser = METHOD_NAME(parser, cmdname)
assert subparser is not None, f'parser for cmd:{cmdname} not found'
# Get cmd args from the parse result.
cmd_args = []
for action in subparser._positionals._group_actions:
cmd_args.append(getattr(args, action.dest))
# Get req options from the parse result.
req_options = {}
option_names_req = []
for parser_ in [create_fmt_parser()]:
for action in parser_._actions:
name = action.dest
option_names_req.append(name)
value = getattr(args, name)
req_options[name] = value
# Get cmd options from the parse result.
cmd_options = {}
for action in subparser._optionals._group_actions:
option_name = action.dest
if option_name == 'help': # Ignore help action.
continue
if option_name not in option_names_req:
cmd_options[option_name] = getattr(args, option_name)
return Request(cmdname,
cmd_args,
cmd_options,
req_options,
has_heredoc=has_heredoc, heredoc_word=heredoc_word)
def METHOD_NAME(parser, cmdname) -> Optional[argparse.ArgumentParser]:
# pylint: disable=protected-access
# Get cmdname from the parse result.
root_dest = 'cmd'
subparser = None
for action in parser._actions:
if action.dest == root_dest:
subparser = action._name_parser_map[cmdname] # type: ignore
break
return subparser
def parse(source):
return Parser(source).parse()
def unparse(request: Request):
"""Generate source code for the request object"""
# pylint: disable=protected-access,too-many-branches
parser = create_dsl_parser()
subparser = METHOD_NAME(parser, request.cmd)
if subparser is None:
raise ValueError(f'{request.cmd}: no such cmd')
cmdline = [request.cmd]
# Unparse cmd args.
if request.has_heredoc:
cmdline.append(f'<<{request.heredoc_word}')
else:
cmdline.extend([shlex.quote(each) for each in request.cmd_args])
# Unparse cmd options.
for key, value in itertools.chain(
request.cmd_options.items(), request.options.items()):
for action in subparser._actions:
if action.dest == key:
# The option has a default value.
if value is None:
break
if isinstance(action, argparse._StoreTrueAction):
if value is True:
cmdline.append(f'--{key}')
elif isinstance(action, argparse._AppendAction):
for each in value or []:
cmdline.append(f'--{key}={shlex.quote(str(each))}')
else:
cmdline.append(f'--{key}={shlex.quote(str(value))}')
break
else:
raise ValueError(f'{key}: no such option')
cmdtext = ' '.join(cmdline)
if request.has_heredoc:
cmdtext += '\n'
cmdtext += request.cmd_args[0]
cmdtext += '\n'
cmdtext += request.heredoc_word
return cmdtext |
5,148 | mm | from meerk40t.core.units import MM_PER_INCH, UNITS_PER_INCH, Length
from meerk40t.svgelements import Matrix
class View:
def __init__(
self, width, height, dpi=float(UNITS_PER_INCH), dpi_x=None, dpi_y=None
):
"""
This should init the simple width and height dimensions.
The default coordinate system is (0,0), (width,0), (width,height), (0,height), In top_left, top_right,
bottom_right, bottom_left ordering.
@param width:
@param height:
"""
if dpi_x is None:
dpi_x = dpi
if dpi_y is None:
dpi_y = dpi
self.width = width
self.height = height
self.dpi_x = dpi_x
self.dpi_y = dpi_y
self.dpi = (dpi_x + dpi_y) / 2.0
self._source = None
self._destination = None
self._matrix = None
def __str__(self):
return f"View('{self.width}', '{self.height}', @{self.dpi})"
@property
def METHOD_NAME(self):
return self.dpi * MM_PER_INCH
def set_dims(self, width, height):
self.width = width
self.height = height
self.reset()
def reset(self):
width = float(Length(self.width))
height = float(Length(self.height))
top_left = 0, 0
top_right = width, 0
bottom_right = width, height
bottom_left = 0, height
self._source = top_left, top_right, bottom_right, bottom_left
self._destination = top_left, top_right, bottom_right, bottom_left
def contains(self, x, y):
"""
This solves the AABB of the container, not the strict solution. If a view is rotated by a non-tau/4 multiple
amount, we could generate false positives.
@param x:
@param y:
@return:
"""
# This solves the AABB of the container, not the strict solution
x0, y0, x1, y1 = self.bbox()
return x0 < x < x1 and y0 < y < y1
def bbox(self):
return (
min(
self._destination[0][0],
self._destination[1][0],
self._destination[2][0],
self._destination[3][0],
),
min(
self._destination[0][1],
self._destination[1][1],
self._destination[2][1],
self._destination[3][1],
),
max(
self._destination[0][0],
self._destination[1][0],
self._destination[2][0],
self._destination[3][0],
),
max(
self._destination[0][1],
self._destination[1][1],
self._destination[2][1],
self._destination[3][1],
),
)
def scale(self, scale_x, scale_y):
width = float(Length(self.width))
height = float(Length(self.height))
width *= scale_x
height *= scale_y
top_left, top_right, bottom_right, bottom_left = self._destination
top_left, top_right, bottom_right, bottom_left = (
(top_left[0] * scale_x, top_left[1] * scale_y),
(top_right[0] * scale_x, top_right[1] * scale_y),
(bottom_right[0] * scale_x, bottom_right[1] * scale_y),
(bottom_left[0] * scale_x, bottom_left[1] * scale_y),
)
self._destination = top_left, top_right, bottom_right, bottom_left
self._matrix = None
def origin(self, origin_x, origin_y):
width = float(Length(self.width))
height = float(Length(self.height))
dx = -width * origin_x
dy = -height * origin_y
top_left, top_right, bottom_right, bottom_left = self._destination
self._destination = (
(top_left[0] + dx, top_left[1] + dy),
(top_right[0] + dx, top_right[1] + dy),
(bottom_right[0] + dx, bottom_right[1] + dy),
(bottom_left[0] + dx, bottom_left[1] + dy),
)
self._matrix = None
def flip_x(self):
top_left, top_right, bottom_right, bottom_left = self._destination
self._destination = (
top_right,
top_left,
bottom_left,
bottom_right,
)
self._matrix = None
def flip_y(self):
top_left, top_right, bottom_right, bottom_left = self._destination
self._destination = (
bottom_left,
bottom_right,
top_right,
top_left,
)
self._matrix = None
def swap_xy(self):
top_left, top_right, bottom_right, bottom_left = self._destination
self._destination = (
(top_left[1], top_left[0]),
(top_right[1], top_right[0]),
(bottom_right[1], bottom_right[0]),
(bottom_left[1], bottom_left[0]),
)
self._matrix = None
def transform(
self,
origin_x=0.0,
origin_y=0.0,
user_scale_x=1.0,
user_scale_y=1.0,
flip_x=False,
flip_y=False,
swap_xy=False,
):
self.reset()
self.scale(1.0 / user_scale_x, 1.0 / user_scale_y)
if flip_x:
self.flip_x()
if flip_y:
self.flip_y()
if origin_x != 0 or origin_y != 0:
self.origin(origin_x, origin_y)
if swap_xy:
self.swap_xy()
def position(self, x, y, vector=False):
if not isinstance(x, (int, float)):
x = Length(x, relative_length=self.width, unitless=1).units
if not isinstance(y, (int, float)):
y = Length(y, relative_length=self.height, unitless=1).units
unit_x, unit_y = x, y
if vector:
return self.matrix.transform_vector([unit_x, unit_y])
return self.matrix.point_in_matrix_space([unit_x, unit_y])
def iposition(self, x, y, vector=False):
if not isinstance(x, (int, float)):
x = Length(x, relative_length=self.width, unitless=1).units
if not isinstance(y, (int, float)):
y = Length(y, relative_length=self.height, unitless=1).units
unit_x, unit_y = x, y
matrix = ~self.matrix
if vector:
return matrix.transform_vector([unit_x, unit_y])
return matrix.point_in_matrix_space([unit_x, unit_y])
@property
def matrix(self):
if self._matrix is None:
self._matrix = Matrix.map(*self._source, *self._destination)
return self._matrix
def dpi_to_steps(self, dpi):
"""
Converts a DPI to a given step amount within the device length values. So M2 Nano will have 1 step per mil,
the DPI of 500 therefore is step_x 2, step_y 2. A Galvo laser with a 200mm lens will have steps equal to
200mm/65536 ~= 0.12 mils. So a DPI of 500 needs a step size of ~16.65 for x and y. Since 500 DPI is one dot
per 2 mils.
Note, steps size can be negative if our driver is x or y flipped.
@param dpi:
@return:
"""
# We require vectors so any positional offsets are non-contributing.
unit_x = self.dpi_x
unit_y = self.dpi_y
matrix = self.matrix
oneinch_x = abs(complex(*matrix.transform_vector([unit_x, 0])))
oneinch_y = abs(complex(*matrix.transform_vector([0, unit_y])))
step_x = float(oneinch_x / dpi)
step_y = float(oneinch_y / dpi)
return step_x, step_y |
5,149 | test remove all | import pytest
from funcy import first
from dvc.exceptions import InvalidArgumentError
from dvc.repo.experiments.exceptions import UnresolvedExpNamesError
from dvc.repo.experiments.utils import exp_refs_by_rev
def test_remove_experiments_by_ref(tmp_dir, scm, dvc, exp_stage, caplog):
queue_length = 3
ref_info_list = []
ref_name_list = []
for i in range(queue_length):
results = dvc.experiments.run(exp_stage.addressing, params=[f"foo={i}"])
ref_info = first(exp_refs_by_rev(scm, first(results)))
ref_info_list.append(ref_info)
ref_name_list.append(str(ref_info))
with pytest.raises(InvalidArgumentError):
dvc.experiments.remove(ref_name_list[:2] + ["non-exist"])
assert scm.get_ref(ref_name_list[0]) is not None
assert scm.get_ref(ref_name_list[1]) is not None
assert scm.get_ref(ref_name_list[2]) is not None
assert set(dvc.experiments.remove(ref_name_list[:2])) == set(ref_name_list[:2])
assert scm.get_ref(ref_name_list[0]) is None
assert scm.get_ref(ref_name_list[1]) is None
assert scm.get_ref(ref_name_list[2]) is not None
def test_remove_all_queued_experiments(tmp_dir, scm, dvc, exp_stage):
queue_length = 3
for i in range(queue_length):
dvc.experiments.run(exp_stage.addressing, params=[f"foo={i}"], queue=True)
results = dvc.experiments.run(exp_stage.addressing, params=[f"foo={queue_length}"])
ref_info = first(exp_refs_by_rev(scm, first(results)))
assert len(dvc.experiments.stash_revs) == queue_length
assert len(dvc.experiments.remove(queue=True)) == queue_length
assert len(dvc.experiments.stash_revs) == 0
assert scm.get_ref(str(ref_info)) is not None
def test_remove_special_queued_experiments(tmp_dir, scm, dvc, exp_stage):
dvc.experiments.run(
exp_stage.addressing, params=["foo=1"], queue=True, name="queue1"
)
dvc.experiments.run(
exp_stage.addressing, params=["foo=2"], queue=True, name="queue2"
)
dvc.experiments.run(
exp_stage.addressing, params=["foo=3"], queue=True, name="queue3"
)
queue_revs = {
entry.name: entry.stash_rev
for entry in dvc.experiments.celery_queue.iter_queued()
}
assert len(queue_revs) == 3
results = dvc.experiments.run(exp_stage.addressing, params=["foo=4"])
ref_info1 = first(exp_refs_by_rev(scm, first(results)))
results = dvc.experiments.run(exp_stage.addressing, params=["foo=5"])
ref_info2 = first(exp_refs_by_rev(scm, first(results)))
assert scm.get_ref(str(ref_info1)) is not None
assert scm.get_ref(str(ref_info2)) is not None
rev2 = queue_revs["queue2"]
assert set(dvc.experiments.remove(["queue1", rev2[:5], str(ref_info1)])) == {
"queue1",
rev2[:5],
str(ref_info1),
}
assert len(list(dvc.experiments.celery_queue.iter_queued())) == 1
assert scm.get_ref(str(ref_info1)) is None
assert scm.get_ref(str(ref_info2)) is not None
def METHOD_NAME(tmp_dir, scm, dvc, exp_stage):
results = dvc.experiments.run(exp_stage.addressing, params=["foo=1"])
ref_info1 = first(exp_refs_by_rev(scm, first(results)))
dvc.experiments.run(exp_stage.addressing, params=["foo=2"], queue=True)
scm.add(["dvc.yaml", "dvc.lock", "copy.py", "params.yaml", "metrics.yaml"])
scm.commit("update baseline")
results = dvc.experiments.run(exp_stage.addressing, params=["foo=3"])
ref_info2 = first(exp_refs_by_rev(scm, first(results)))
dvc.experiments.run(exp_stage.addressing, params=["foo=4"], queue=True)
assert set(dvc.experiments.remove(all_commits=True)) == {
ref_info1.name,
ref_info2.name,
}
assert len(dvc.experiments.stash_revs) == 2
assert scm.get_ref(str(ref_info2)) is None
assert scm.get_ref(str(ref_info1)) is None
@pytest.mark.parametrize("use_url", [True, False])
def test_remove_remote(tmp_dir, scm, dvc, exp_stage, git_upstream, use_url):
remote = git_upstream.url if use_url else git_upstream.remote
ref_info_list = []
exp_list = []
for i in range(3):
results = dvc.experiments.run(exp_stage.addressing, params=[f"foo={i}"])
exp = first(results)
exp_list.append(exp)
ref_info = first(exp_refs_by_rev(scm, exp))
ref_info_list.append(ref_info)
dvc.experiments.push(remote, [ref_info.name])
assert git_upstream.tmp_dir.scm.get_ref(str(ref_info)) == exp
dvc.experiments.remove(
git_remote=remote,
exp_names=[str(ref_info_list[0]), ref_info_list[1].name],
)
assert git_upstream.tmp_dir.scm.get_ref(str(ref_info_list[0])) is None
assert git_upstream.tmp_dir.scm.get_ref(str(ref_info_list[1])) is None
assert git_upstream.tmp_dir.scm.get_ref(str(ref_info_list[2])) == exp_list[2]
with pytest.raises(
UnresolvedExpNamesError, match=f"Experiment 'foo' does not exist in '{remote}'"
):
dvc.experiments.remove(git_remote=remote, exp_names=["foo"])
def test_remove_experiments_by_rev(tmp_dir, scm, dvc, exp_stage):
baseline = scm.get_rev()
results = dvc.experiments.run(exp_stage.addressing, params=["foo=1"])
baseline_exp_ref = first(exp_refs_by_rev(scm, first(results)))
dvc.experiments.run(
exp_stage.addressing, params=["foo=2"], queue=True, name="queue2"
)
scm.commit("new_baseline")
results = dvc.experiments.run(exp_stage.addressing, params=["foo=3"])
ref_info = first(exp_refs_by_rev(scm, first(results)))
new_exp_ref = str(ref_info)
dvc.experiments.run(
exp_stage.addressing, params=["foo=4"], queue=True, name="queue4"
)
assert dvc.experiments.remove(rev=baseline) == [baseline_exp_ref.name]
queue_revs = {
entry.name: entry.stash_rev
for entry in dvc.experiments.celery_queue.iter_queued()
}
assert scm.get_ref(str(baseline_exp_ref)) is None
assert "queue2" in queue_revs
assert scm.get_ref(new_exp_ref) is not None
assert "queue4" in queue_revs
def test_remove_multi_rev(tmp_dir, scm, dvc, exp_stage):
baseline = scm.get_rev()
results = dvc.experiments.run(exp_stage.addressing, params=["foo=1"])
baseline_exp_ref = first(exp_refs_by_rev(scm, first(results)))
dvc.experiments.run(
exp_stage.addressing, params=["foo=2"], queue=True, name="queue2"
)
scm.commit("new_baseline")
results = dvc.experiments.run(exp_stage.addressing, params=["foo=3"])
new_exp_ref = first(exp_refs_by_rev(scm, first(results)))
assert set(dvc.experiments.remove(rev=[baseline, scm.get_rev()])) == {
baseline_exp_ref.name,
new_exp_ref.name,
}
assert scm.get_ref(str(baseline_exp_ref)) is None
assert scm.get_ref(str(new_exp_ref)) is None |
5,150 | delete settings cache | from decimal import Decimal
import django
from django.core.cache import cache
from django.conf import settings as d_settings
from tendenci.apps.site_settings.models import Setting
from tendenci.apps.site_settings.cache import SETTING_PRE_KEY
def get_setting_key(items=[]):
"""
Generate a setting key string from a list of string items.
Spaces are also removed as memcached doesn't allow space characters.
"""
if not items:
return None
key = ('.'.join([d_settings.CACHE_PRE_KEY, SETTING_PRE_KEY] + items)).replace(' ', '')
return key
def delete_all_settings_cache():
key = get_setting_key(['all'])
cache.delete(key)
def cache_setting(scope, scope_category, name, value):
"""
Caches a single setting within a scope
and scope category
"""
key = get_setting_key([scope, scope_category, name])
cache.set(key, value)
def cache_settings(scope, scope_category):
"""
Caches all settings within a scope and scope category
"""
filters = {
'scope': scope,
'scope_category': scope_category,
}
settings = Setting.objects.filter(**filters)
if settings:
for setting in settings:
key = get_setting_key([setting.scope, setting.scope_category, setting.name])
cache.set(key, setting.get_value())
def delete_setting_cache(scope, scope_category, name):
"""
Deletes a single setting within a
scope and scope category
"""
key = get_setting_key([scope, scope_category, name])
cache.delete(key)
def METHOD_NAME(scope, scope_category):
"""
Deletes all settings within a scope
and scope category
"""
filters = {
'scope': scope,
'scope_category': scope_category,
}
settings = Setting.objects.filter(**filters)
for setting in settings:
key = get_setting_key([setting.scope, setting.scope_category, setting.name])
cache.delete(key)
def get_setting(scope, scope_category, name):
"""
Gets a single setting value from within a scope
and scope category.
Returns the value of the setting if it exists
otherwise it returns an empty string
"""
key = get_setting_key([scope, scope_category, name])
if django.apps.apps.models_ready:
setting = cache.get(key)
else:
setting = None
if setting is None:
#setting is not in the cache
try:
#try to get the setting and cache it
filters = {
'scope': scope,
'scope_category': scope_category,
'name': name
}
setting = Setting.objects.get(**filters)
cache_setting(setting.scope, setting.scope_category, setting.name, setting)
except Exception:
setting = None
#check if the setting has been set and evaluate the value
if setting is not None:
try:
# test is get_value will work
value = setting.get_value().strip()
except AttributeError:
return u''
# convert data types
if setting.data_type == 'boolean':
value = value[0].lower() == 't'
if setting.data_type == 'decimal':
if value:
value = Decimal(value)
else:
value = 0
if setting.data_type == 'int':
if value.strip():
value = int(value.strip())
else:
value = 0 # default to 0
if setting.data_type == 'file':
from tendenci.apps.files.models import File as TFile
try:
tfile = TFile.objects.get(pk=value)
except TFile.DoesNotExist:
tfile = None
value = tfile
return value
return u''
def get_global_setting(name):
return get_setting('site', 'global', name)
def get_module_setting(scope_category, name):
return get_setting('module', scope_category, name)
def check_setting(scope, scope_category, name):
#check cache first
key = get_setting_key([scope, scope_category, name])
setting = cache.get(key)
if setting is not None:
return True
missing_key = get_setting_key([scope, scope_category, name, "missing"])
missing = cache.get(missing_key)
if missing is None:
#check the db if it is not in the cache
exists = Setting.objects.filter(scope=scope,
scope_category=scope_category, name=name).exists()
#cache that it does not exist
if not exists:
#set to True to signify that it is missing so we do not
#come back into this if statement and query db again
cache.set(missing_key, True)
return exists
return False
def get_form_list(user):
"""
Generate a list of 2-tuples of form id and form title
This will be used as a special select
"""
from tendenci.apps.forms_builder.forms.models import Form
from tendenci.apps.perms.utils import get_query_filters
filters = get_query_filters(user, 'forms.view_form')
forms = Form.objects.filter(filters)
#To avoid hitting the database n time by calling .object
#We will use the values in the index field.
l = []
for form in forms:
l.append((form.pk, form.title))
return l
def get_box_list(user):
"""
Generate a list of 2-tuples of form id and form title
This will be used as a special select
"""
from tendenci.apps.boxes.models import Box
from tendenci.apps.perms.utils import get_query_filters
filters = get_query_filters(user, 'boxes.view_box')
boxes = Box.objects.filter(filters)
#To avoid hitting the database n time by calling .object
#We will use the values in the index field.
l = [('', 'None')]
for box in boxes:
l.append((box.pk, box.title))
return l
def get_group_list(user):
"""
Generate a list of 2-tuples of group id and group name
This will be used as a special select
"""
from tendenci.apps.user_groups.models import Group
groups = Group.objects.filter(status=True,
status_detail='active'
).exclude(type='system_generated'
).order_by('name')
if not groups.exists():
# no groups - create one
groups = [Group.objects.get_or_create_default(user)]
initial_group = groups[0]
else:
[initial_group] = groups.filter(
entity__id=1,
entity__entity_name__iexact=get_global_setting(
'sitedisplayname')
)[:1] or [None]
if not initial_group:
[initial_group] = groups.filter(
entity__id=1)[:1] or [None]
if not initial_group:
initial_group = groups[0]
choices = []
for group in groups:
choices.append((group.pk, group.name))
return choices, initial_group.id |
5,151 | test rolling before analysis | import copy
import math
from unittest import TestCase
from matplotlib import pyplot as plt
from skimage.transform import rotate
from pylinac.cheese import TomoCheese, TomoCheeseResult
from tests_basic.utils import (
CloudFileMixin,
FromDemoImageTesterMixin,
FromURLTesterMixin,
FromZipTesterMixin,
InitTesterMixin,
save_file,
)
TEST_DIR = "Tomo"
class TestInstantiation(
TestCase,
InitTesterMixin,
FromDemoImageTesterMixin,
FromURLTesterMixin,
FromZipTesterMixin,
):
klass = TomoCheese
init_file = [TEST_DIR, "TomoTherapy Cheese Phantom"]
demo_load_method = "from_demo_images"
url = "TomoCheese.zip"
zip = [TEST_DIR, "TomoCheese.zip"]
is_folder = True
class TestResults(TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.cheese = TomoCheese.from_demo_images()
cls.cheese.analyze()
def test_results_as_str(self):
assert isinstance(self.cheese.results(), str)
def test_results_as_list(self):
r = self.cheese.results(as_list=True)
assert isinstance(r, list)
def test_results_data(self):
r = self.cheese.results_data()
assert isinstance(r, TomoCheeseResult)
assert self.cheese.module.rois["5"].pixel_value == r.roi_5["median"]
r = self.cheese.results_data(as_dict=True)
assert isinstance(r, dict)
assert self.cheese.module.rois["9"].std == r["roi_9"]["std"]
class TestGeneral(TestCase):
def test_demo(self):
TomoCheese.run_demo()
class TestAnalysis(TestCase):
def test_cropping_before_analysis(self):
cheese = TomoCheese.from_demo_images()
for img in cheese.dicom_stack:
img.crop(pixels=20, edges=("bottom",))
# shouldn't raise
cheese.analyze()
def METHOD_NAME(self):
"""Rolling (shifting) the phantom by a nominal amount shouldn't affect analysis"""
cheese = TomoCheese.from_demo_images()
cheese.analyze()
original_roi_1 = copy.copy(cheese.module.rois["1"].pixel_value)
for img in cheese.dicom_stack:
img.roll(direction="x", amount=20)
cheese.analyze()
new_roi_1 = cheese.module.rois["1"].pixel_value
assert math.isclose(original_roi_1, new_roi_1, abs_tol=3)
def test_rotating_phantom(self):
"""Check that a roll is corrected"""
cheese = TomoCheese.from_demo_images()
cheese.analyze()
assert math.isclose(cheese.catphan_roll, -0.25, abs_tol=0.05)
for img in cheese.dicom_stack:
img.array = rotate(img.array, angle=3, mode="edge")
cheese.analyze()
assert math.isclose(cheese.catphan_roll, -3.25, abs_tol=0.05)
def test_too_much_rotation_resets(self):
# too much roll will reset to 0 however
cheese = TomoCheese.from_demo_images()
for img in cheese.dicom_stack:
img.array = rotate(img.array, angle=13, mode="edge")
cheese.analyze()
assert cheese.catphan_roll == 0
def test_roi_config(self):
cheese = TomoCheese.from_demo_images()
config = {"3": {"density": 4.12}}
cheese.analyze(roi_config=config)
self.assertEqual(cheese.roi_config, config)
class TestPlottingSaving(TestCase):
@classmethod
def setUpClass(cls):
cls.cheese = TomoCheese.from_demo_images()
cls.cheese.analyze()
@classmethod
def tearDownClass(cls):
plt.close("all")
def test_save_pdf(self):
# shouldn't raise
save_file(self.cheese.publish_pdf, "temp")
def test_set_figure_size(self):
self.cheese.plot_analyzed_image(figsize=(8, 13))
fig = plt.gcf()
self.assertEqual(fig.bbox_inches.height, 13)
self.assertEqual(fig.bbox_inches.width, 8)
def test_save_image(self):
save_file(self.cheese.save_analyzed_image)
def test_save_subimage_fails(self):
"""There is no sub-images for the tomo"""
with self.assertRaises(NotImplementedError):
self.cheese.save_analyzed_subimage()
def test_plotting_without_density_fails(self):
cheese = TomoCheese.from_demo_images()
cheese.analyze() # no roi config
with self.assertRaises(ValueError):
cheese.plot_density_curve()
def test_plotting_density(self):
cheese = TomoCheese.from_demo_images()
cheese.analyze(roi_config={"1": {"density": 1.2}})
cheese.plot_density_curve()
class TomoCheeseMixin(CloudFileMixin):
model = TomoCheese
origin_slice = 0
dir_path = ["Tomo"]
hu_values = {}
expected_roll = -0.2
@classmethod
def setUpClass(cls):
filename = cls.get_filename()
cls.cheese = cls.model.from_zip(filename)
@classmethod
def tearDownClass(cls):
plt.close("all")
def test_slice_locations(self):
"""Test the locations of the slices of interest."""
self.assertAlmostEqual(self.cheese.origin_slice, self.origin_slice, delta=1)
def test_phantom_roll(self):
"""Test the roll of the phantom."""
self.assertAlmostEqual(self.cheese.catphan_roll, self.expected_roll, delta=0.3)
def test_HU_values(self):
"""Test HU values."""
for name, roi in self.cheese.module.rois.items():
exp_val = self.hu_values[name]
meas_val = roi.pixel_value
self.assertAlmostEqual(exp_val, meas_val, delta=5)
def test_pdf(self):
save_file(self.cheese.publish_pdf, "temp")
class TomoCheeseDemo(TomoCheeseMixin, TestCase):
origin_slice = 24
expected_roll = -0.23
hu_values = {
"1": 16,
"2": 20,
"3": 23,
"4": 2,
"5": 16,
"6": -669,
"7": 15,
"8": 25,
"9": 653,
"10": 25,
"11": 24,
"12": 102,
"13": 7,
"14": -930,
"15": 23,
"16": 14,
"17": -516,
"18": 447,
"19": 269,
"20": 14,
}
@classmethod
def setUpClass(cls):
cls.cheese = TomoCheese.from_demo_images()
cls.cheese.analyze() |
5,152 | test string vectorised | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Test function :func:`iris.coord_categorisation.add_categorised_coord`."""
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests # isort:skip
from unittest import mock
from cf_units import CALENDARS as calendars
from cf_units import Unit
import numpy as np
from iris.coord_categorisation import add_categorised_coord, add_day_of_year
from iris.coords import DimCoord
from iris.cube import Cube
class Test_add_categorised_coord(tests.IrisTest):
def setUp(self):
# Factor out common variables and objects.
self.cube = mock.Mock(name="cube", coords=mock.Mock(return_value=[]))
self.coord = mock.Mock(
name="coord", points=np.arange(12).reshape(3, 4)
)
self.units = "units"
self.vectorised = mock.Mock(name="vectorized_result")
def test_vectorise_call(self):
# Check that the function being passed through gets called with
# numpy.vectorize, before being applied to the points array.
# The reason we use numpy.vectorize is to support multi-dimensional
# coordinate points.
def fn(coord, v):
return v**2
with mock.patch(
"numpy.vectorize", return_value=self.vectorised
) as vectorise_patch:
with mock.patch("iris.coords.AuxCoord") as aux_coord_constructor:
add_categorised_coord(
self.cube, "foobar", self.coord, fn, units=self.units
)
# Check the constructor of AuxCoord gets called with the
# appropriate arguments.
# Start with the vectorised function.
vectorise_patch.assert_called_once_with(fn)
# Check the vectorize wrapper gets called with the appropriate args.
self.vectorised.assert_called_once_with(self.coord, self.coord.points)
# Check the AuxCoord constructor itself.
aux_coord_constructor.assert_called_once_with(
self.vectorised(self.coord, self.coord.points),
units=self.units,
attributes=self.coord.attributes.copy(),
)
# And check adding the aux coord to the cube mock.
self.cube.add_aux_coord.assert_called_once_with(
aux_coord_constructor(), self.cube.coord_dims(self.coord)
)
def METHOD_NAME(self):
# Check that special case handling of a vectorized string returning
# function is taking place.
def fn(coord, v):
return "0123456789"[:v]
with mock.patch(
"numpy.vectorize", return_value=self.vectorised
) as vectorise_patch:
with mock.patch("iris.coords.AuxCoord") as aux_coord_constructor:
add_categorised_coord(
self.cube, "foobar", self.coord, fn, units=self.units
)
self.assertEqual(
aux_coord_constructor.call_args[0][0],
vectorise_patch(fn, otypes=[object])(
self.coord, self.coord.points
).astype("|S64"),
)
class Test_add_day_of_year(tests.IrisTest):
def setUp(self):
self.expected = {
"standard": np.array(list(range(360, 367)) + list(range(1, 4))),
"gregorian": np.array(list(range(360, 367)) + list(range(1, 4))),
"proleptic_gregorian": np.array(
list(range(360, 367)) + list(range(1, 4))
),
"noleap": np.array(list(range(359, 366)) + list(range(1, 4))),
"julian": np.array(list(range(360, 367)) + list(range(1, 4))),
"all_leap": np.array(list(range(360, 367)) + list(range(1, 4))),
"365_day": np.array(list(range(359, 366)) + list(range(1, 4))),
"366_day": np.array(list(range(360, 367)) + list(range(1, 4))),
"360_day": np.array(list(range(355, 361)) + list(range(1, 5))),
}
def make_cube(self, calendar):
n_times = 10
cube = Cube(np.arange(n_times))
time_coord = DimCoord(
np.arange(n_times),
standard_name="time",
units=Unit("days since 1980-12-25", calendar=calendar),
)
cube.add_dim_coord(time_coord, 0)
return cube
def test_calendars(self):
for calendar in calendars:
# Skip the Julian calendar due to
# https://github.com/Unidata/netcdftime/issues/13
# Remove this if block once the issue is resolved.
if calendar == "julian":
continue
cube = self.make_cube(calendar)
add_day_of_year(cube, "time")
points = cube.coord("day_of_year").points
expected_points = self.expected[calendar]
msg = "Test failed for the following calendar: {}."
self.assertArrayEqual(
points, expected_points, err_msg=msg.format(calendar)
)
if __name__ == "__main__":
tests.main() |
5,153 | expand rates | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import torch
import torch.nn as nn
from mmengine.logging import print_log
from mmengine.model import BaseModule
from torch import Tensor
from .utils import METHOD_NAME, get_single_padding
class BaseConvRFSearchOp(BaseModule):
"""Based class of ConvRFSearchOp.
Args:
op_layer (nn.Module): pytorch module, e,g, Conv2d
global_config (dict): config dict.
"""
def __init__(self, op_layer: nn.Module, global_config: dict):
super().__init__()
self.op_layer = op_layer
self.global_config = global_config
def normlize(self, weights: nn.Parameter) -> nn.Parameter:
"""Normalize weights.
Args:
weights (nn.Parameter): Weights to be normalized.
Returns:
nn.Parameters: Normalized weights.
"""
abs_weights = torch.abs(weights)
normalized_weights = abs_weights / torch.sum(abs_weights)
return normalized_weights
class Conv2dRFSearchOp(BaseConvRFSearchOp):
"""Enable Conv2d with receptive field searching ability.
Args:
op_layer (nn.Module): pytorch module, e,g, Conv2d
global_config (dict): config dict. Defaults to None.
By default this must include:
- "init_alphas": The value for initializing weights of each branch.
- "num_branches": The controller of the size of
search space (the number of branches).
- "exp_rate": The controller of the sparsity of search space.
- "mmin": The minimum dilation rate.
- "mmax": The maximum dilation rate.
Extra keys may exist, but are used by RFSearchHook, e.g., "step",
"max_step", "search_interval", and "skip_layer".
verbose (bool): Determines whether to print rf-next
related logging messages.
Defaults to True.
"""
def __init__(self,
op_layer: nn.Module,
global_config: dict,
verbose: bool = True):
super().__init__(op_layer, global_config)
assert global_config is not None, 'global_config is None'
self.num_branches = global_config['num_branches']
assert self.num_branches in [2, 3]
self.verbose = verbose
init_dilation = op_layer.dilation
self.dilation_rates = METHOD_NAME(init_dilation, global_config)
if self.op_layer.kernel_size[
0] == 1 or self.op_layer.kernel_size[0] % 2 == 0:
self.dilation_rates = [(op_layer.dilation[0], r[1])
for r in self.dilation_rates]
if self.op_layer.kernel_size[
1] == 1 or self.op_layer.kernel_size[1] % 2 == 0:
self.dilation_rates = [(r[0], op_layer.dilation[1])
for r in self.dilation_rates]
self.branch_weights = nn.Parameter(torch.Tensor(self.num_branches))
if self.verbose:
print_log(f'Expand as {self.dilation_rates}', 'current')
nn.init.constant_(self.branch_weights, global_config['init_alphas'])
def forward(self, input: Tensor) -> Tensor:
norm_w = self.normlize(self.branch_weights[:len(self.dilation_rates)])
if len(self.dilation_rates) == 1:
outputs = [
nn.functional.conv2d(
input,
weight=self.op_layer.weight,
bias=self.op_layer.bias,
stride=self.op_layer.stride,
padding=self.get_padding(self.dilation_rates[0]),
dilation=self.dilation_rates[0],
groups=self.op_layer.groups,
)
]
else:
outputs = [
nn.functional.conv2d(
input,
weight=self.op_layer.weight,
bias=self.op_layer.bias,
stride=self.op_layer.stride,
padding=self.get_padding(r),
dilation=r,
groups=self.op_layer.groups,
) * norm_w[i] for i, r in enumerate(self.dilation_rates)
]
output = outputs[0]
for i in range(1, len(self.dilation_rates)):
output += outputs[i]
return output
def estimate_rates(self) -> None:
"""Estimate new dilation rate based on trained branch_weights."""
norm_w = self.normlize(self.branch_weights[:len(self.dilation_rates)])
if self.verbose:
print_log(
'Estimate dilation {} with weight {}.'.format(
self.dilation_rates,
norm_w.detach().cpu().numpy().tolist()), 'current')
sum0, sum1, w_sum = 0, 0, 0
for i in range(len(self.dilation_rates)):
sum0 += norm_w[i].item() * self.dilation_rates[i][0]
sum1 += norm_w[i].item() * self.dilation_rates[i][1]
w_sum += norm_w[i].item()
estimated = [
np.clip(
int(round(sum0 / w_sum)), self.global_config['mmin'],
self.global_config['mmax']).item(),
np.clip(
int(round(sum1 / w_sum)), self.global_config['mmin'],
self.global_config['mmax']).item()
]
self.op_layer.dilation = tuple(estimated)
self.op_layer.padding = self.get_padding(self.op_layer.dilation)
self.dilation_rates = [tuple(estimated)]
if self.verbose:
print_log(f'Estimate as {tuple(estimated)}', 'current')
def METHOD_NAME(self) -> None:
"""Expand dilation rate."""
dilation = self.op_layer.dilation
dilation_rates = METHOD_NAME(dilation, self.global_config)
if self.op_layer.kernel_size[
0] == 1 or self.op_layer.kernel_size[0] % 2 == 0:
dilation_rates = [(dilation[0], r[1]) for r in dilation_rates]
if self.op_layer.kernel_size[
1] == 1 or self.op_layer.kernel_size[1] % 2 == 0:
dilation_rates = [(r[0], dilation[1]) for r in dilation_rates]
self.dilation_rates = copy.deepcopy(dilation_rates)
if self.verbose:
print_log(f'Expand as {self.dilation_rates}', 'current')
nn.init.constant_(self.branch_weights,
self.global_config['init_alphas'])
def get_padding(self, dilation) -> tuple:
padding = (get_single_padding(self.op_layer.kernel_size[0],
self.op_layer.stride[0], dilation[0]),
get_single_padding(self.op_layer.kernel_size[1],
self.op_layer.stride[1], dilation[1]))
return padding |
5,154 | determine pydantic field type | import copy
import string
from random import choices
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Set,
TYPE_CHECKING,
Type,
Union,
cast,
)
import pydantic
from pydantic.fields import ModelField
from ormar.models.mixins.relation_mixin import RelationMixin # noqa: I100, I202
from ormar.queryset.utils import translate_list_to_dict
class PydanticMixin(RelationMixin):
__cache__: Dict[str, Type[pydantic.BaseModel]] = {}
if TYPE_CHECKING: # pragma: no cover
__fields__: Dict[str, ModelField]
_skip_ellipsis: Callable
_get_not_excluded_fields: Callable
@classmethod
def get_pydantic(
cls, *, include: Union[Set, Dict] = None, exclude: Union[Set, Dict] = None
) -> Type[pydantic.BaseModel]:
"""
Returns a pydantic model out of ormar model.
Converts also nested ormar models into pydantic models.
Can be used to fully exclude certain fields in fastapi response and requests.
:param include: fields of own and nested models to include
:type include: Union[Set, Dict, None]
:param exclude: fields of own and nested models to exclude
:type exclude: Union[Set, Dict, None]
"""
relation_map = translate_list_to_dict(cls._iterate_related_models())
return cls._convert_ormar_to_pydantic(
include=include, exclude=exclude, relation_map=relation_map
)
@classmethod
def _convert_ormar_to_pydantic(
cls,
relation_map: Dict[str, Any],
include: Union[Set, Dict] = None,
exclude: Union[Set, Dict] = None,
) -> Type[pydantic.BaseModel]:
if include and isinstance(include, Set):
include = translate_list_to_dict(include)
if exclude and isinstance(exclude, Set):
exclude = translate_list_to_dict(exclude)
fields_dict: Dict[str, Any] = dict()
defaults: Dict[str, Any] = dict()
fields_to_process = cls._get_not_excluded_fields(
fields={*cls.Meta.model_fields.keys()}, include=include, exclude=exclude
)
fields_to_process.sort(
key=lambda x: list(cls.Meta.model_fields.keys()).index(x)
)
cache_key = f"{cls.__name__}_{str(include)}_{str(exclude)}"
if cache_key in cls.__cache__:
return cls.__cache__[cache_key]
for name in fields_to_process:
field = cls.METHOD_NAME(
name=name,
defaults=defaults,
include=include,
exclude=exclude,
relation_map=relation_map,
)
if field is not None:
fields_dict[name] = field
model = type(
f"{cls.__name__}_{''.join(choices(string.ascii_uppercase, k=3))}",
(pydantic.BaseModel,),
{"__annotations__": fields_dict, **defaults},
)
model = cast(Type[pydantic.BaseModel], model)
cls._copy_field_validators(model=model)
cls.__cache__[cache_key] = model
return model
@classmethod
def METHOD_NAME(
cls,
name: str,
defaults: Dict,
include: Union[Set, Dict, None],
exclude: Union[Set, Dict, None],
relation_map: Dict[str, Any],
) -> Any:
field = cls.Meta.model_fields[name]
target: Any = None
if field.is_relation and name in relation_map: # type: ignore
target = field.to._convert_ormar_to_pydantic(
include=cls._skip_ellipsis(include, name),
exclude=cls._skip_ellipsis(exclude, name),
relation_map=cls._skip_ellipsis(
relation_map, name, default_return=dict()
),
)
if field.is_multi or field.virtual:
target = List[target] # type: ignore
elif not field.is_relation:
defaults[name] = cls.__fields__[name].field_info
target = field.__type__
if target is not None and field.nullable:
target = Optional[target]
return target
@classmethod
def _copy_field_validators(cls, model: Type[pydantic.BaseModel]) -> None:
"""
Copy field validators from ormar model to generated pydantic model.
"""
for field_name, field in model.__fields__.items():
if (
field_name not in cls.__fields__
or cls.Meta.model_fields[field_name].is_relation
):
continue
validators = cls.__fields__[field_name].validators
already_attached = [
validator.__wrapped__ for validator in field.validators # type: ignore
]
validators_to_copy = [
validator
for validator in validators
if validator.__wrapped__ not in already_attached # type: ignore
]
field.validators.extend(copy.deepcopy(validators_to_copy))
class_validators = cls.__fields__[field_name].class_validators
field.class_validators.update(copy.deepcopy(class_validators))
field.pre_validators = copy.deepcopy(
cls.__fields__[field_name].pre_validators
)
field.post_validators = copy.deepcopy(
cls.__fields__[field_name].post_validators
) |
5,155 | visit bool op | # -*- coding: utf-8 -*-
"""
eve.io.mongo.parser
~~~~~~~~~~~~~~~~~~~
This module implements a Python-to-Mongo syntax parser. Allows the MongoDB
data-layer to seamlessly respond to a Python-like query.
:copyright: (c) 2017 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import ast
import sys
from datetime import datetime # noqa
from bson import ObjectId # noqa
def parse(expression):
"""Given a python-like conditional statement, returns the equivalent
mongo-like query expression. Conditional and boolean operators (==, <=, >=,
!=, >, <) along with a couple function calls (ObjectId(), datetime()) are
supported.
"""
v = MongoVisitor()
try:
v.visit(ast.parse(expression))
except SyntaxError as e:
e = ParseError(e)
e.__traceback__ = sys.exc_info()[2]
raise e
return v.mongo_query
class ParseError(ValueError):
pass
class MongoVisitor(ast.NodeVisitor):
"""Implements the python-to-mongo parser. Only Python conditional
statements are supported, however nested, combined with most common compare
and boolean operators (And and Or).
Supported compare operators: ==, >, <, !=, >=, <=
Supported boolean operators: And, Or
"""
op_mapper = {
ast.Eq: "",
ast.Gt: "$gt",
ast.GtE: "$gte",
ast.Lt: "$lt",
ast.LtE: "$lte",
ast.NotEq: "$ne",
ast.Or: "$or",
ast.And: "$and",
}
def visit_Module(self, node):
"""Module handler, our entry point."""
self.mongo_query = {}
self.ops = []
self.current_value = None
# perform the magic.
self.generic_visit(node)
# if we didn't obtain a query, it is likely that an unsupported
# python expression has been passed.
if not self.mongo_query:
raise ParseError(
"Only conditional statements with boolean "
"(and, or) and comparison operators are "
"supported."
)
def visit_Expr(self, node):
"""Make sure that we are parsing compare or boolean operators"""
if not (
isinstance(node.value, ast.Compare) or isinstance(node.value, ast.BoolOp)
):
raise ParseError("Will only parse conditional statements")
self.generic_visit(node)
def visit_Compare(self, node):
"""Compare operator handler."""
self.visit(node.left)
left = self.current_value
operator = self.op_mapper[node.ops[0].__class__] if node.ops else None
if node.comparators:
comparator = node.comparators[0]
self.visit(comparator)
if operator != "":
value = {operator: self.current_value}
else:
value = self.current_value
if self.ops:
self.ops[-1].append({left: value})
else:
self.mongo_query[left] = value
def METHOD_NAME(self, node):
"""Boolean operator handler."""
op = self.op_mapper[node.op.__class__]
self.ops.append([])
for value in node.values:
self.visit(value)
c = self.ops.pop()
if self.ops:
self.ops[-1].append({op: c})
else:
self.mongo_query[op] = c
def visit_Call(self, node):
"""A couple function calls are supported: bson's ObjectId() and
datetime().
"""
if isinstance(node.func, ast.Name):
if node.func.id == "ObjectId":
try:
self.current_value = ObjectId(node.args[0].s)
except Exception:
pass
elif node.func.id == "datetime":
values = []
for arg in node.args:
values.append(arg.n)
try:
self.current_value = datetime(*values)
except Exception:
pass
def visit_Attribute(self, node):
"""Attribute handler ('Contact.Id')."""
self.visit(node.value)
self.current_value += "." + node.attr
def visit_Name(self, node):
"""Names handler."""
self.current_value = node.id
def visit_Num(self, node):
"""Numbers handler."""
self.current_value = node.n
def visit_Str(self, node):
"""Strings handler."""
self.current_value = node.s |
5,156 | download video file | from googleapiclient.errors import HttpError
import cv2
import logging
from dataclasses import dataclass
from typing import Any
from google_api.sdk import youtube_videos_insert, youtube_videos_set_thumbnail
from users.client import get_users_data_by_ids_async
from temporalio import activity
from schedule.models import ScheduleItem
from django.core.files.storage import FileSystemStorage
from django.core.files.storage import storages
logger = logging.getLogger(__name__)
local_storage = FileSystemStorage()
local_storage.base_location = "/tmp/"
class DailyThumbnailLimitException(Exception):
pass
@dataclass
class ScheduleItemData:
id: int
slug: str
title: str
type: str
description: str
keynote_description: str
abstract: str
elevator_pitch: str
video_uploaded_path: str
tags: list[str]
speakers_ids: list[int]
conference_name: str
conference_youtube_video_bottom_text: str
has_submission: bool
@property
def clean_tags(self) -> list[str]:
return [tag.replace(" ", "").replace("-", "").lower() for tag in self.tags]
@property
def hashtags(self) -> list[str]:
return [f"#{tag}" for tag in self.clean_tags]
@activity.defn
async def fetch_schedule_item(schedule_item_id: int) -> ScheduleItemData:
schedule_item = await ScheduleItem.objects.prefetch_related(
"submission",
"submission__tags",
"conference",
"language",
"additional_speakers",
"keynote__speakers",
).aget(id=schedule_item_id)
speakers_ids = schedule_item.speakers
language_code = schedule_item.language.code
return ScheduleItemData(
id=schedule_item.id,
slug=schedule_item.slug,
type=schedule_item.type,
title=schedule_item.title.strip(),
description=schedule_item.description.strip(),
abstract=schedule_item.submission.abstract.localize(language_code).strip()
if schedule_item.submission_id
else "",
keynote_description=schedule_item.keynote.description.localize(
language_code
).strip()
if schedule_item.keynote_id
else "",
elevator_pitch=schedule_item.submission.elevator_pitch.localize(
language_code
).strip()
if schedule_item.submission_id
else "",
tags=[tag.name for tag in schedule_item.submission.tags.all()]
if schedule_item.submission_id
else [],
video_uploaded_path=schedule_item.video_uploaded_path,
speakers_ids=speakers_ids,
conference_name=schedule_item.conference.name.localize(language_code),
conference_youtube_video_bottom_text=schedule_item.conference.youtube_video_bottom_text,
has_submission=schedule_item.submission_id is not None,
)
@activity.defn
async def fetch_speakers_data(
speakers_ids: list[int],
) -> dict[str, dict[str, Any]]:
return await get_users_data_by_ids_async(speakers_ids)
@dataclass
class AddYouTubeIDToScheduleItemInput:
schedule_item_id: int
youtube_id: str
@activity.defn
async def add_youtube_id_to_schedule_item(
input: AddYouTubeIDToScheduleItemInput,
) -> None:
schedule_item = await ScheduleItem.objects.aget(id=input.schedule_item_id)
schedule_item.youtube_video_id = input.youtube_id
await schedule_item.asave(update_fields=["youtube_video_id"])
@dataclass
class DownloadVideoFileInput:
path: str
id: int
@activity.defn
async def METHOD_NAME(input: DownloadVideoFileInput) -> str:
logger.warning(f"downloading {input.path}")
storage = storages["conferencevideos"]
filename = f"yt_upload_{input.id}"
if not local_storage.exists(filename):
local_storage.save(filename, storage.open(input.path))
return local_storage.path(filename)
@dataclass
class UploadVideoToYouTubeInput:
title: str
description: str
file_path: str
tags: list[str]
@property
def tags_as_str(self) -> list[str]:
return ",".join(self.tags)
@activity.defn
async def upload_video_to_youtube(input: UploadVideoToYouTubeInput):
async for response in youtube_videos_insert(
title=input.title,
description=input.description,
tags=input.tags_as_str,
file_path=input.file_path,
):
activity.heartbeat("video uploading")
return response
@dataclass
class ExtractVideoThumbnailInput:
file_path: str
schedule_item_id: int
@activity.defn
async def extract_video_thumbnail(input: ExtractVideoThumbnailInput):
thumbnail_file_name = f"{input.schedule_item_id}-thumbnail.jpg"
file_path = local_storage.path(thumbnail_file_name)
if local_storage.exists(thumbnail_file_name):
return file_path
video_capture = cv2.VideoCapture(input.file_path)
success, image = video_capture.read()
if not success:
raise ValueError("Unable to extract frame")
cv2.imwrite(file_path, image)
return file_path
@dataclass
class SetThumbnailToYouTubeVideoInput:
youtube_id: str
thumbnail_path: str
@activity.defn
async def set_thumbnail_to_youtube_video(input: SetThumbnailToYouTubeVideoInput):
try:
return await youtube_videos_set_thumbnail(
video_id=input.youtube_id,
thumbnail_path=input.thumbnail_path,
)
except HttpError as e:
if e.status_code == 429:
# we reached the daily thumbnail limit
raise DailyThumbnailLimitException()
raise
@dataclass
class CleanupLocalVideoFilesInput:
schedule_item_id: int
delete_thumbnail: bool
@activity.defn
async def cleanup_local_video_files(input: CleanupLocalVideoFilesInput):
thumbnail_name = f"{input.schedule_item_id}-thumbnail.jpg"
if input.delete_thumbnail and local_storage.exists(thumbnail_name):
local_storage.delete(thumbnail_name)
video_name = f"yt_upload_{input.schedule_item_id}"
if local_storage.exists(video_name):
local_storage.delete(video_name) |
5,157 | name | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetAccessConnectorResult',
'AwaitableGetAccessConnectorResult',
'get_access_connector',
'get_access_connector_output',
]
@pulumi.output_type
class GetAccessConnectorResult:
"""
Information about azure databricks accessConnector.
"""
def __init__(__self__, id=None, identity=None, location=None, METHOD_NAME=None, properties=None, system_data=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
Managed service identity (system assigned and/or user assigned identities)
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.AccessConnectorPropertiesResponse':
"""
Azure Databricks accessConnector properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(METHOD_NAME="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
class AwaitableGetAccessConnectorResult(GetAccessConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccessConnectorResult(
id=self.id,
identity=self.identity,
location=self.location,
METHOD_NAME=self.METHOD_NAME,
properties=self.properties,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_access_connector(connector_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccessConnectorResult:
"""
Gets an azure databricks accessConnector.
Azure REST API version: 2023-05-01.
:param str connector_name: The name of the azure databricks accessConnector.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['connectorName'] = connector_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:databricks:getAccessConnector', __args__, opts=opts, typ=GetAccessConnectorResult).value
return AwaitableGetAccessConnectorResult(
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
location=pulumi.get(__ret__, 'location'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_access_connector)
def get_access_connector_output(connector_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAccessConnectorResult]:
"""
Gets an azure databricks accessConnector.
Azure REST API version: 2023-05-01.
:param str connector_name: The name of the azure databricks accessConnector.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
5,158 | set status | # Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from pypika import CustomFunction
import frappe
from frappe import _
from frappe.query_builder.functions import Avg
from frappe.utils import cint, flt
from frappe.utils.nestedset import NestedSet
from hrms.hr.doctype.appraisal_cycle.appraisal_cycle import validate_active_appraisal_cycle
from hrms.hr.utils import validate_active_employee
class Goal(NestedSet):
nsm_parent_field = "parent_goal"
def before_insert(self):
if cint(self.is_group):
self.progress = 0
def validate(self):
if self.appraisal_cycle:
validate_active_appraisal_cycle(self.appraisal_cycle)
validate_active_employee(self.employee)
self.validate_parent_fields()
self.validate_from_to_dates(self.start_date, self.end_date)
self.validate_progress()
self.METHOD_NAME()
def on_update(self):
NestedSet.on_update(self)
doc_before_save = self.get_doc_before_save()
if doc_before_save:
self.update_kra_in_child_goals(doc_before_save)
if doc_before_save.parent_goal != self.parent_goal:
# parent goal changed, update progress of old parent
self.update_parent_progress(doc_before_save.parent_goal)
self.update_parent_progress()
self.update_goal_progress_in_appraisal()
def on_trash(self):
NestedSet.on_trash(self, allow_root_deletion=True)
def after_delete(self):
self.update_parent_progress()
self.update_goal_progress_in_appraisal()
def validate_parent_fields(self):
if not self.parent_goal:
return
parent_details = frappe.db.get_value(
"Goal", self.parent_goal, ["employee", "kra", "appraisal_cycle"], as_dict=True
)
if not parent_details:
return
if self.employee != parent_details.employee:
frappe.throw(
_("Goal should be owned by the same employee as its parent goal."), title=_("Not Allowed")
)
if self.kra != parent_details.kra:
frappe.throw(
_("Goal should be aligned with the same KRA as its parent goal."), title=_("Not Allowed")
)
if self.appraisal_cycle != parent_details.appraisal_cycle:
frappe.throw(
_("Goal should belong to the same Appraisal Cycle as its parent goal."),
title=_("Not Allowed"),
)
def validate_progress(self):
if flt(self.progress) > 100:
frappe.throw(_("Goal progress percentage cannot be more than 100."))
def METHOD_NAME(self, status=None):
if self.status != "Archived":
if flt(self.progress) == 0:
self.status = "Pending"
elif flt(self.progress) == 100:
self.status = "Completed"
elif flt(self.progress) < 100:
self.status = "In Progress"
def update_kra_in_child_goals(self, doc_before_save):
"""Aligns children's KRA to parent goal's KRA if parent goal's KRA is changed"""
if doc_before_save.kra != self.kra and self.is_group:
Goal = frappe.qb.DocType("Goal")
(frappe.qb.update(Goal).set(Goal.kra, self.kra).where((Goal.parent_goal == self.name))).run()
frappe.msgprint(_("KRA updated for all child goals."), alert=True, indicator="green")
def update_parent_progress(self, old_parent=None):
parent_goal = old_parent or self.parent_goal
if not parent_goal:
return
Goal = frappe.qb.DocType("Goal")
avg_goal_completion = (
frappe.qb.from_(Goal)
.select(Avg(Goal.progress).as_("avg_goal_completion"))
.where(
(Goal.parent_goal == parent_goal)
& (Goal.employee == self.employee)
# archived goals should not contribute to progress
& (Goal.status != "Archived")
)
).run()[0][0]
parent_goal_doc = frappe.get_doc("Goal", parent_goal)
parent_goal_doc.progress = flt(avg_goal_completion, parent_goal_doc.precision("progress"))
parent_goal_doc.ignore_permissions = True
parent_goal_doc.ignore_mandatory = True
parent_goal_doc.save()
def update_goal_progress_in_appraisal(self):
if not self.appraisal_cycle:
return
appraisal = frappe.db.get_value(
"Appraisal", {"employee": self.employee, "appraisal_cycle": self.appraisal_cycle}
)
if appraisal:
appraisal = frappe.get_doc("Appraisal", appraisal)
appraisal.set_goal_score(update=True)
@frappe.whitelist()
def get_children(doctype: str, parent: str, is_root: bool = False, **filters) -> list[dict]:
Goal = frappe.qb.DocType(doctype)
query = (
frappe.qb.from_(Goal)
.select(
Goal.name.as_("value"),
Goal.goal_name.as_("title"),
Goal.is_group.as_("expandable"),
Goal.status,
Goal.employee,
Goal.employee_name,
Goal.appraisal_cycle,
Goal.progress,
Goal.kra,
)
.where(Goal.status != "Archived")
)
if filters.get("employee"):
query = query.where(Goal.employee == filters.get("employee"))
if filters.get("appraisal_cycle"):
query = query.where(Goal.appraisal_cycle == filters.get("appraisal_cycle"))
if filters.get("goal"):
query = query.where(Goal.parent_goal == filters.get("goal"))
elif parent and not is_root:
# via expand child
query = query.where(Goal.parent_goal == parent)
else:
ifnull = CustomFunction("IFNULL", ["value", "default"])
query = query.where(ifnull(Goal.parent_goal, "") == "")
if filters.get("date_range"):
date_range = frappe.parse_json(filters.get("date_range"))
query = query.where(
(Goal.start_date.between(date_range[0], date_range[1]))
& ((Goal.end_date.isnull()) | (Goal.end_date.between(date_range[0], date_range[1])))
)
goals = query.orderby(Goal.employee, Goal.kra).run(as_dict=True)
_update_goal_completion_status(goals)
return goals
def _update_goal_completion_status(goals: list[dict]) -> list[dict]:
for goal in goals:
if goal.expandable: # group node
total_goals = frappe.db.count("Goal", dict(parent_goal=goal.value))
if total_goals:
completed = frappe.db.count("Goal", {"parent_goal": goal.value, "status": "Completed"}) or 0
# set completion status of group node
goal["completion_count"] = _("{0} of {1} Completed").format(completed, total_goals)
return goals
@frappe.whitelist()
def update_progress(progress: float, goal: str) -> None:
goal = frappe.get_doc("Goal", goal)
goal.progress = progress
goal.flags.ignore_mandatory = True
goal.save()
return goal
@frappe.whitelist()
def add_tree_node():
from frappe.desk.treeview import make_tree_args
args = frappe.form_dict
args = make_tree_args(**args)
if args.parent_goal == "All Goals" or not frappe.db.exists("Goal", args.parent_goal):
args.parent_goal = None
frappe.get_doc(args).insert() |
5,159 | validate files | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Rucio Cache client.
"""
import argparse
import json
import os
import random
import ssl
import sys
import stomp
from jsonschema import validate
from rucio.client.didclient import DIDClient
from rucio.client.rseclient import RSEClient
from rucio.common.config import config_get, config_get_int
from rucio.common.exception import DataIdentifierNotFound
from rucio.common.schema import get_schema_value
SUCCESS = 0
FAILURE = 1
def METHOD_NAME(files):
""" validate files metadata"""
client = DIDClient()
for file in files:
try:
metadata = client.get_metadata(file["scope"], file["name"])
except DataIdentifierNotFound:
err = "%s:%s not found in rucio catalog" % (file["scope"], file["name"])
raise Exception(err)
if int(metadata["bytes"]) != int(file["bytes"]) or metadata["adler32"] != file["adler32"]:
err = "%s:%s(bytes:%s, adler32:%s) has different size or checksum with metadata(bytes:%s, adler32:%s)" % (file["scope"], file["name"], file["bytes"], file["adler32"], metadata["bytes"], metadata["adler32"])
raise Exception(err)
def validate_rse(rse):
""" validate rse"""
# the rse should be volatile
client = RSEClient()
try:
rse_attributes = client.get_rse(rse)
except Exception as error:
raise Exception(error)
if not rse_attributes["volatile"]:
err = "%s volatile is not True, Rucio Cache should not update it." % (rse)
raise Exception(err)
def cache_operation(args):
""" cache operation"""
payload = json.loads(args.message)
validate(payload, get_schema_value('MESSAGE_OPERATION'))
validate_rse(payload["rse"])
if payload["operation"] == "add_replicas":
validate(payload, get_schema_value('CACHE_ADD_REPLICAS'))
METHOD_NAME(payload["files"])
else:
validate(payload, get_schema_value('CACHE_DELETE_REPLICAS'))
conn = stomp.Connection([(args.broker, args.port)])
conn.set_ssl(key_file=args.ssl_key_file, cert_file=args.ssl_cert_file, ssl_version=ssl.PROTOCOL_TLSv1)
conn.connect()
message = {'id': int(random.random() * 1000), 'payload': payload}
conn.send(destination=args.destination, body=json.dumps(message), id='rucio-cache-messaging', ack='auto', headers={'vo': 'atlas'})
conn.disconnect()
def get_parser():
"""
Returns the argparse parser.
"""
message_help = """
Add replicas message:
{'files': [{'scope': scope, 'name': name, 'bytes': 1L, 'adler32': ''},
{'scope': scope, 'name': name, 'bytes': 1L, 'adler32': ''}, ...],
'rse': rse_cache_name,
'lifetime': seconds,
'operation': 'add_replicas'
}
Delete replicas message:
{'files': [{'scope': scope, 'name': name}, {'scope': scope, 'name': name}, ...],
'rse': rse_cache_name,
'operation': 'delete_replicas'
}
"""
oparser = argparse.ArgumentParser(description="This daemons is used to populate information of replicas on volatile storage.",
prog=os.path.basename(sys.argv[0]), add_help=True)
# Main arguments
oparser.add_argument('-b', '--broker', dest='broker', default=config_get('messaging-cache', 'brokers').split(',')[0], help='Message broker name')
oparser.add_argument('-p', '--port', dest='port', default=config_get_int('messaging-cache', 'port'), help='Message broker port')
oparser.add_argument('-c', '--certificate', dest='ssl_cert_file', default=config_get('messaging-cache', 'ssl_cert_file'), help='Certificate file')
oparser.add_argument('-k', '--certificate-key', dest='ssl_key_file', default=config_get('messaging-cache', 'ssl_key_file'), help='Certificate key file')
oparser.add_argument('-d', '--destination', dest='destination', default=config_get('messaging-cache', 'destination'), help="Message broker topic")
oparser.add_argument('-m', '--message', dest='message', default=None, help=message_help)
return oparser
if __name__ == '__main__':
oparser = get_parser()
if len(sys.argv) == 1:
oparser.print_help()
sys.exit(FAILURE)
args = oparser.parse_args(sys.argv[1:])
try:
result = cache_operation(args)
sys.exit(result)
except (RuntimeError, NotImplementedError) as e:
print("ERROR: ", e, file=sys.stderr)
sys.exit(FAILURE) |
5,160 | get children | import ast as python_ast
from typing import Any, Optional, Sequence, Type, Union
from .natspec import parse_natspec as parse_natspec
from .utils import ast_to_dict as ast_to_dict
from .utils import parse_to_ast as parse_to_ast
from .utils import parse_to_ast_with_settings as parse_to_ast_with_settings
NODE_BASE_ATTRIBUTES: Any
NODE_SRC_ATTRIBUTES: Any
DICT_AST_SKIPLIST: Any
def get_node(
ast_struct: Union[dict, python_ast.AST], parent: Optional[VyperNode] = ...
) -> VyperNode: ...
def compare_nodes(left_node: VyperNode, right_node: VyperNode) -> bool: ...
class VyperNode:
full_source_code: str = ...
node_source_code: str = ...
_metadata: dict = ...
def __init__(self, parent: Optional[VyperNode] = ..., **kwargs: Any) -> None: ...
def __hash__(self) -> Any: ...
def __eq__(self, other: Any) -> Any: ...
@property
def description(self): ...
@classmethod
def get_fields(cls: Any) -> set: ...
def evaluate(self) -> VyperNode: ...
@classmethod
def from_node(cls, node: VyperNode, **kwargs: Any) -> Any: ...
def to_dict(self) -> dict: ...
def METHOD_NAME(
self,
node_type: Union[Type[VyperNode], Sequence[Type[VyperNode]], None] = ...,
filters: Optional[dict] = ...,
reverse: bool = ...,
) -> Sequence: ...
def get_descendants(
self,
node_type: Union[Type[VyperNode], Sequence[Type[VyperNode]], None] = ...,
filters: Optional[dict] = ...,
include_self: bool = ...,
reverse: bool = ...,
) -> Sequence: ...
def get_ancestor(
self, node_type: Union[Type[VyperNode], Sequence[Type[VyperNode]], None] = ...
) -> VyperNode: ...
def get(self, field_str: str) -> Any: ...
class TopLevel(VyperNode):
doc_string: Str = ...
body: list = ...
name: str = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
def __getitem__(self, key: Any) -> Any: ...
def __iter__(self) -> Any: ...
def __len__(self) -> int: ...
def __contains__(self, obj: Any) -> bool: ...
class Module(TopLevel):
def replace_in_tree(self, old_node: VyperNode, new_node: VyperNode) -> None: ...
def add_to_body(self, node: VyperNode) -> None: ...
def remove_from_body(self, node: VyperNode) -> None: ...
def namespace(self) -> Any: ... # context manager
class FunctionDef(TopLevel):
args: arguments = ...
decorator_list: list = ...
returns: VyperNode = ...
class arguments(VyperNode):
args: list = ...
defaults: list = ...
class arg(VyperNode): ...
class Return(VyperNode): ...
class Log(VyperNode):
value: VyperNode = ...
class EnumDef(VyperNode):
body: list = ...
name: str = ...
class EventDef(VyperNode):
body: list = ...
name: str = ...
class InterfaceDef(VyperNode):
body: list = ...
name: str = ...
class StructDef(VyperNode):
body: list = ...
name: str = ...
class ExprNode(VyperNode): ...
class Constant(VyperNode):
value: Any = ...
class Num(Constant):
@property
def n(self): ...
class Int(Num):
value: int = ...
class Decimal(Num): ...
class Hex(Num):
@property
def n_bytes(self): ...
class Str(Constant):
@property
def s(self): ...
class Bytes(Constant):
@property
def s(self): ...
class List(VyperNode):
elements: list = ...
class Tuple(VyperNode):
elements: list = ...
class Dict(VyperNode):
keys: list = ...
values: list = ...
class NameConstant(Constant): ...
class Name(VyperNode):
id: str = ...
_type: str = ...
class Expr(VyperNode):
value: VyperNode = ...
class UnaryOp(ExprNode):
op: VyperNode = ...
class USub(VyperNode): ...
class Not(VyperNode): ...
class BinOp(ExprNode):
left: VyperNode = ...
op: VyperNode = ...
right: VyperNode = ...
class Add(VyperNode): ...
class Sub(VyperNode): ...
class Mult(VyperNode): ...
class Div(VyperNode): ...
class Mod(VyperNode): ...
class Pow(VyperNode): ...
class LShift(VyperNode): ...
class RShift(VyperNode): ...
class BitAnd(VyperNode): ...
class BitOr(VyperNode): ...
class BitXor(VyperNode): ...
class BoolOp(ExprNode):
op: VyperNode = ...
class And(VyperNode): ...
class Or(VyperNode): ...
class Compare(ExprNode):
op: VyperNode = ...
class Eq(VyperNode): ...
class NotEq(VyperNode): ...
class Lt(VyperNode): ...
class LtE(VyperNode): ...
class Gt(VyperNode): ...
class GtE(VyperNode): ...
class In(VyperNode): ...
class Call(ExprNode):
args: list = ...
keywords: list = ...
func: Name = ...
class keyword(VyperNode): ...
class Attribute(VyperNode):
attr: str = ...
value: VyperNode = ...
class Subscript(VyperNode):
slice: Index = ...
value: VyperNode = ...
class Index(VyperNode):
value: Constant = ...
class Assign(VyperNode): ...
class AnnAssign(VyperNode):
target: Name = ...
value: VyperNode = ...
annotation: VyperNode = ...
class VariableDecl(VyperNode):
target: Name = ...
value: VyperNode = ...
annotation: VyperNode = ...
is_constant: bool = ...
is_public: bool = ...
is_immutable: bool = ...
class AugAssign(VyperNode):
op: VyperNode = ...
target: VyperNode = ...
value: VyperNode = ...
class Raise(VyperNode): ...
class Assert(VyperNode): ...
class Pass(VyperNode): ...
class Import(VyperNode):
alias: str = ...
name: str = ...
class ImportFrom(VyperNode):
alias: str = ...
level: int = ...
module: str = ...
name: str = ...
class ImplementsDecl(VyperNode):
target: Name = ...
annotation: Name = ...
class If(VyperNode):
body: list = ...
orelse: list = ...
class IfExp(ExprNode):
test: ExprNode = ...
body: ExprNode = ...
orelse: ExprNode = ...
class For(VyperNode): ...
class Break(VyperNode): ...
class Continue(VyperNode): ... |
5,161 | modify existing color table | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: colortables.py
#
# Programmer: Kathleen Biagas
# Date: March 30, 2022
#
# Modificatons:
# Justin Privitera, Wed Aug 3 19:46:13 PDT 2022
# Made changes to reflect the fact that built-in tables cannot be edited.
#
# Mark C. Miller, Mon Dec 12 19:02:35 PST 2022
# Add introspecting block
# ----------------------------------------------------------------------------
# NOTE: Sections of this test file are 'literalinclude'd in quickrecipes.rst.
# After modifying this file, ensure the proper content is still displayed in the doc.
# comments of the form '# sometext {' and '# sometext }' bracket the sections
# that are 'literalinclude'd in quickrecipes.rst.
def introspectingColorTable():
# introspectingColorTable {
hotCT = GetColorTable("hot")
print(hotCT)
# results of print
# GetControlPoints(0).colors = (0, 0, 255, 255)
# GetControlPoints(0).position = 0
# GetControlPoints(1).colors = (0, 255, 255, 255)
# GetControlPoints(1).position = 0.25
# GetControlPoints(2).colors = (0, 255, 0, 255)
# GetControlPoints(2).position = 0.5
# GetControlPoints(3).colors = (255, 255, 0, 255)
# GetControlPoints(3).position = 0.75
# GetControlPoints(4).colors = (255, 0, 0, 255)
# GetControlPoints(4).position = 1
# smoothing = Linear # NONE, Linear, CubicSpline
# equalSpacingFlag = 0
# discreteFlag = 0
# introspectingColorTable }
def METHOD_NAME():
# modifyTable1 {
OpenDatabase(silo_data_path("rect2d.silo"))
AddPlot("Pseudocolor", "d")
pc = PseudocolorAttributes()
pc.centering=pc.Nodal
# set color table name
pc.colorTableName = "hot"
SetPlotOptions(pc)
DrawPlots()
# put the plot in full-frame mode
v = GetView2D()
v.fullFrameActivationMode= v.On
SetView2D(v)
# modifyTable1 }
Test("standard_hot_table")
hotCTorig = GetColorTable("hot")
# modifyTable2 {
hotCT = GetColorTable("hot")
# Remove a couple of control points
hotCT.RemoveControlPoints(4)
hotCT.RemoveControlPoints(3)
# We must use a different name, as VisIt will not allow overwriting of built-in color tables
SetColorTable("hot_edited", hotCT)
# set color table name so changes to it will be reflected in plot
pc.colorTableName = "hot_edited"
SetPlotOptions(pc)
# modifyTable2 }
Test("modified_hot_table_1")
# modifyTable3 {
# Change colors
hotCT.GetControlPoints(0).colors = (255,0,0,255)
hotCT.GetControlPoints(1).colors = (255, 0, 255, 255)
SetColorTable("hot_edited", hotCT)
# modifyTable3 }
Test("modified_hot_table_2")
# modifyTable4 {
# Turn on equal spacing
hotCT.equalSpacingFlag = 1
# Create a new color table by providing a different name
SetColorTable("hot2", hotCT)
# tell the Pseudocolor plot to use the new color table
pc.colorTableName = "hot2"
SetPlotOptions(pc)
# modifyTable4 }
Test("hot2")
# modifyTable5 {
# Change positions so that the first and last are at the endpoints
hotCT.equalSpacingFlag=0
hotCT.GetControlPoints(0).position = 0
hotCT.GetControlPoints(1).position =0.5
hotCT.GetControlPoints(2).position = 1
SetColorTable("hot3", hotCT)
pc.colorTableName = "hot3"
SetPlotOptions(pc)
# modifyTable5 }
Test("hot3")
# remove the added color tables
RemoveColorTable("hot_edited")
RemoveColorTable("hot2")
RemoveColorTable("hot3")
DeleteAllPlots()
def createContinuous():
# based on http://visitusers.org/index.php?title=Creating_a_color_table
# continuous1 {
# create control points (red, green, blue, position).
ct = ((1,0,0,0.), (1,0.8,0.,0.166), (1,1,0,0.333), (0,1,0,0.5),
(0,1,1,0.666), (0,0,1,0.8333), (0.8,0.1,1,1))
ccpl = ColorControlPointList()
# add the control points to the list
for pt in ct:
p = ColorControlPoint()
# colors is RGBA and must be in range 0...255
p.colors = (pt[0] * 255, pt[1] * 255, pt[2] * 255, 255)
p.position = pt[3]
ccpl.AddControlPoints(p)
AddColorTable("myrainbow", ccpl)
OpenDatabase(silo_data_path("globe.silo"))
AddPlot("Pseudocolor", "speed")
# Make the plot use the new color table
pc = PseudocolorAttributes(1)
pc.colorTableName = "myrainbow"
SetPlotOptions(pc)
DrawPlots()
v = GetView3D()
v.viewNormal = (-0.693476, 0.212776, 0.688344)
v. viewUp = (0.161927, 0.976983, -0.138864)
SetView3D(v)
# continuous1 }
Test("rainbow_continuous")
RemoveColorTable("myrainbow")
DeleteAllPlots()
def createDiscreteUsingVTKNamedColors():
# discrete1 {
try:
import vtk # for vtk.vtkNamedColors
except:
return
# to see list of all color names available:
# print(vtk.vtkNamedColors.GetColorNames())
# choose some colors from vtk.vtkNamedColors
colorNames = ["tomato", "turquoise", "van_dyke_brown", "carrot",
"royalblue", "naples_yellow_deep", "cerulean", "warm_grey",
"venetian_red", "seagreen", "sky_blue", "pink"]
# Create a color control point list
ccpl = ColorControlPointList()
# Make it discrete
ccpl.discreteFlag=1
# Add color control points corresponding to color names
for name in colorNames:
p = ColorControlPoint()
p.colors=vtk.vtkNamedColors().GetColor4ub(name)
ccpl.AddControlPoints(p)
# add a color table based on the color control points
AddColorTable("mylevels", ccpl)
OpenDatabase(silo_data_path("multi_rect2d.silo"))
AddPlot("Subset", "domains")
s = SubsetAttributes()
s.colorType = s.ColorByColorTable
s.colorTableName = "mylevels"
SetPlotOptions(s)
DrawPlots()
# discrete1 }
Test("discrete_using_vtk")
# remove the added color tables
RemoveColorTable("mylevels")
DeleteAllPlots()
def main():
introspectingColorTable()
METHOD_NAME()
createContinuous()
createDiscreteUsingVTKNamedColors()
main()
Exit() |
5,162 | score partial | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
"""Scorer interface module."""
import warnings
from typing import Any
from typing import List
from typing import Tuple
import paddle
class ScorerInterface:
"""Scorer interface for beam search.
The scorer performs scoring of the all tokens in vocabulary.
Examples:
* Search heuristics
* :class:`scorers.length_bonus.LengthBonus`
* Decoder networks of the sequence-to-sequence models
* :class:`transformer.decoder.Decoder`
* :class:`rnn.decoders.Decoder`
* Neural language models
* :class:`lm.transformer.TransformerLM`
* :class:`lm.default.DefaultRNNLM`
* :class:`lm.seq_rnn.SequentialRNNLM`
"""
def init_state(self, x: paddle.Tensor) -> Any:
"""Get an initial state for decoding (optional).
Args:
x (paddle.Tensor): The encoded feature tensor
Returns: initial state
"""
return None
def select_state(self, state: Any, i: int, new_id: int=None) -> Any:
"""Select state with relative ids in the main beam search.
Args:
state: Decoder state for prefix tokens
i (int): Index to select a state in the main beam search
new_id (int): New label index to select a state if necessary
Returns:
state: pruned state
"""
return None if state is None else state[i]
def score(self, y: paddle.Tensor, state: Any,
x: paddle.Tensor) -> Tuple[paddle.Tensor, Any]:
"""Score new token (required).
Args:
y (paddle.Tensor): 1D paddle.int64 prefix tokens.
state: Scorer state for prefix tokens
x (paddle.Tensor): The encoder feature that generates ys.
Returns:
tuple[paddle.Tensor, Any]: Tuple of
scores for next token that has a shape of `(n_vocab)`
and next state for ys
"""
raise NotImplementedError
def final_score(self, state: Any) -> float:
"""Score eos (optional).
Args:
state: Scorer state for prefix tokens
Returns:
float: final score
"""
return 0.0
class BatchScorerInterface(ScorerInterface):
"""Batch scorer interface."""
def batch_init_state(self, x: paddle.Tensor) -> Any:
"""Get an initial state for decoding (optional).
Args:
x (paddle.Tensor): The encoded feature tensor
Returns: initial state
"""
return self.init_state(x)
def batch_score(self,
ys: paddle.Tensor,
states: List[Any],
xs: paddle.Tensor) -> Tuple[paddle.Tensor, List[Any]]:
"""Score new token batch (required).
Args:
ys (paddle.Tensor): paddle.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (paddle.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[paddle.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
warnings.warn(
"{} batch score is implemented through for loop not parallelized".
format(self.__class__.__name__))
scores = list()
outstates = list()
for i, (y, state, x) in enumerate(zip(ys, states, xs)):
score, outstate = self.score(y, state, x)
outstates.append(outstate)
scores.append(score)
scores = paddle.cat(scores, 0).view(ys.shape[0], -1)
return scores, outstates
class PartialScorerInterface(ScorerInterface):
"""Partial scorer interface for beam search.
The partial scorer performs scoring when non-partial scorer finished scoring,
and receives pre-pruned next tokens to score because it is too heavy to score
all the tokens.
Score sub-set of tokens, not all.
Examples:
* Prefix search for connectionist-temporal-classification models
* :class:`decoders.scorers.ctc.CTCPrefixScorer`
"""
def METHOD_NAME(self,
y: paddle.Tensor,
next_tokens: paddle.Tensor,
state: Any,
x: paddle.Tensor) -> Tuple[paddle.Tensor, Any]:
"""Score new token (required).
Args:
y (paddle.Tensor): 1D prefix token
next_tokens (paddle.Tensor): paddle.int64 next token to score
state: decoder state for prefix tokens
x (paddle.Tensor): The encoder feature that generates ys
Returns:
tuple[paddle.Tensor, Any]:
Tuple of a score tensor for y that has a shape `(len(next_tokens),)`
and next state for ys
"""
raise NotImplementedError
class BatchPartialScorerInterface(BatchScorerInterface, PartialScorerInterface):
"""Batch partial scorer interface for beam search."""
def batch_score_partial(
self,
ys: paddle.Tensor,
next_tokens: paddle.Tensor,
states: List[Any],
xs: paddle.Tensor, ) -> Tuple[paddle.Tensor, Any]:
"""Score new token (required).
Args:
ys (paddle.Tensor): paddle.int64 prefix tokens (n_batch, ylen).
next_tokens (paddle.Tensor): paddle.int64 tokens to score (n_batch, n_token).
states (List[Any]): Scorer states for prefix tokens.
xs (paddle.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[paddle.Tensor, Any]:
Tuple of a score tensor for ys that has a shape `(n_batch, n_vocab)`
and next states for ys
"""
raise NotImplementedError |
5,163 | test cash bad truncation | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from gammapy import stats
@pytest.fixture
def test_data():
"""Test data for fit statistics tests"""
test_data = dict(
mu_sig=[
0.59752422,
9.13666449,
12.98288095,
5.56974565,
13.52509804,
11.81725635,
0.47963765,
11.17708176,
5.18504894,
8.30202394,
],
n_on=[0, 13, 7, 5, 11, 16, 0, 9, 3, 12],
n_off=[0, 7, 4, 0, 18, 7, 1, 5, 12, 25],
alpha=[
0.83746243,
0.17003354,
0.26034507,
0.69197751,
0.89557033,
0.34068848,
0.0646732,
0.86411967,
0.29087245,
0.74108241,
],
)
test_data["staterror"] = np.sqrt(test_data["n_on"])
return test_data
@pytest.fixture
def reference_values():
"""Reference values for fit statistics test.
Produced using sherpa stats module in dev/sherpa/stats/compare_wstat.py
"""
return dict(
wstat=[
1.19504844,
0.625311794002,
4.25810886127,
0.0603765381044,
11.7285002468,
0.206014834301,
1.084611,
2.72972381792,
4.60602990838,
7.51658734973,
],
cash=[
1.19504844,
-39.24635098872072,
-9.925081055136996,
-6.034002586236575,
-30.249839537105466,
-55.39143500383233,
0.9592753,
-21.095413867175516,
0.49542219758430406,
-34.19193611846045,
],
cstat=[
1.19504844,
1.4423323052792387,
3.3176610316373925,
0.06037653810442922,
0.5038564644586838,
1.3314041078406706,
0.9592753,
0.4546285248764317,
1.0870959295929628,
1.4458234764515652,
],
)
def test_wstat(test_data, reference_values):
statsvec = stats.wstat(
n_on=test_data["n_on"],
mu_sig=test_data["mu_sig"],
n_off=test_data["n_off"],
alpha=test_data["alpha"],
extra_terms=True,
)
assert_allclose(statsvec, reference_values["wstat"])
def test_cash(test_data, reference_values):
statsvec = stats.cash(n_on=test_data["n_on"], mu_on=test_data["mu_sig"])
assert_allclose(statsvec, reference_values["cash"])
def test_cstat(test_data, reference_values):
statsvec = stats.cstat(n_on=test_data["n_on"], mu_on=test_data["mu_sig"])
assert_allclose(statsvec, reference_values["cstat"])
def test_cash_sum_cython(test_data):
counts = np.array(test_data["n_on"], dtype=float)
npred = np.array(test_data["mu_sig"], dtype=float)
stat = stats.cash_sum_cython(counts=counts, npred=npred)
ref = stats.cash(counts, npred).sum()
assert_allclose(stat, ref)
def METHOD_NAME():
with pytest.raises(ValueError):
stats.cash(10, 10, 0.0)
def test_cstat_bad_truncation():
with pytest.raises(ValueError):
stats.cstat(10, 10, 0.0)
def test_wstat_corner_cases():
"""test WSTAT formulae for corner cases"""
n_on = 0
n_off = 5
mu_sig = 2.3
alpha = 0.5
actual = stats.wstat(n_on=n_on, mu_sig=mu_sig, n_off=n_off, alpha=alpha)
desired = 2 * (mu_sig + n_off * np.log(1 + alpha))
assert_allclose(actual, desired)
actual = stats.get_wstat_mu_bkg(n_on=n_on, mu_sig=mu_sig, n_off=n_off, alpha=alpha)
desired = n_off / (alpha + 1)
assert_allclose(actual, desired)
# n_off = 0 and mu_sig < n_on * (alpha / alpha + 1)
n_on = 9
n_off = 0
mu_sig = 2.3
alpha = 0.5
actual = stats.wstat(n_on=n_on, mu_sig=mu_sig, n_off=n_off, alpha=alpha)
desired = -2 * (mu_sig * (1.0 / alpha) + n_on * np.log(alpha / (1 + alpha)))
assert_allclose(actual, desired)
actual = stats.get_wstat_mu_bkg(n_on=n_on, mu_sig=mu_sig, n_off=n_off, alpha=alpha)
desired = n_on / (1 + alpha) - (mu_sig / alpha)
assert_allclose(actual, desired)
# n_off = 0 and mu_sig > n_on * (alpha / alpha + 1)
n_on = 5
n_off = 0
mu_sig = 5.3
alpha = 0.5
actual = stats.wstat(n_on=n_on, mu_sig=mu_sig, n_off=n_off, alpha=alpha)
desired = 2 * (mu_sig + n_on * (np.log(n_on) - np.log(mu_sig) - 1))
assert_allclose(actual, desired)
actual = stats.get_wstat_mu_bkg(n_on=n_on, mu_sig=mu_sig, n_off=n_off, alpha=alpha)
assert_allclose(actual, 0) |
5,164 | package | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.apple import fix_apple_shared_install_name
from conan.tools.env import VirtualBuildEnv
from conan.tools.files import copy, get, rmdir
from conan.tools.build import check_min_cppstd
from conan.tools.layout import basic_layout
from conan.tools.meson import Meson, MesonToolchain
from conan.tools.microsoft import is_msvc
from conan.tools.scm import Version
import os
required_conan_version = ">=1.53.0"
class InihConan(ConanFile):
name = "inih"
description = "Simple .INI file parser in C, good for embedded systems "
license = "BSD-3-Clause"
topics = ("ini", "configuration", "parser")
homepage = "https://github.com/benhoyt/inih"
url = "https://github.com/conan-io/conan-center-index"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_inireader": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_inireader": True,
}
@property
def _min_cppstd(self):
return 11
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
# INIReader is written in C++
if not self.options.with_inireader:
self.settings.rm_safe("compiler.libcxx")
self.settings.rm_safe("compiler.cppstd")
def layout(self):
basic_layout(self, src_folder="src")
def validate(self):
# since 57, INIReader requires C++11
if Version(self.version) >= "57" and self.options.with_inireader and \
self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, self._min_cppstd)
if self.options.shared and is_msvc(self):
raise ConanInvalidConfiguration("Shared inih is not supported with msvc")
def build_requirements(self):
self.tool_requires("meson/1.1.1")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
env = VirtualBuildEnv(self)
env.generate()
tc = MesonToolchain(self)
tc.project_options["distro_install"] = True
tc.project_options["with_INIReader"] = self.options.with_inireader
# since 57, INIReader requires C++11
if Version(self.version) >= "57" and not is_msvc(self):
tc.cpp_args.append("-std=c++11")
tc.generate()
def build(self):
meson = Meson(self)
meson.configure()
meson.build()
def METHOD_NAME(self):
copy(self, "LICENSE.txt", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
meson = Meson(self)
meson.install()
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
fix_apple_shared_install_name(self)
fix_msvc_libname(self)
def package_info(self):
self.cpp_info.set_property("pkg_config_name", "INIReader")
self.cpp_info.components["libinih"].set_property("pkg_config_name", "inih")
self.cpp_info.components["libinih"].libs = ["inih"]
if self.options.with_inireader:
self.cpp_info.components["inireader"].set_property("pkg_config_name", "INIReader")
self.cpp_info.components["inireader"].libs = ["INIReader"]
self.cpp_info.components["inireader"].requires = ["libinih"]
def fix_msvc_libname(conanfile, remove_lib_prefix=True):
"""remove lib prefix & change extension to .lib in case of cl like compiler"""
from conan.tools.files import rename
import glob
if not conanfile.settings.get_safe("compiler.runtime"):
return
libdirs = getattr(conanfile.cpp.METHOD_NAME, "libdirs")
for libdir in libdirs:
for ext in [".dll.a", ".dll.lib", ".a"]:
full_folder = os.path.join(conanfile.package_folder, libdir)
for filepath in glob.glob(os.path.join(full_folder, f"*{ext}")):
libname = os.path.basename(filepath)[0:-len(ext)]
if remove_lib_prefix and libname[0:3] == "lib":
libname = libname[3:]
rename(conanfile, filepath, os.path.join(os.path.dirname(filepath), f"{libname}.lib")) |
5,165 | test adjust no op | # Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import core
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class RandomHueTest(TestCase):
def test_preserves_output_shape(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomHue(factor=(0.3, 0.8), value_range=(0, 255))
output = layer(image)
self.assertEqual(image.shape, output.shape)
self.assertNotAllClose(image, output)
def METHOD_NAME(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomHue(factor=(0.0, 0.0), value_range=(0, 255))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
def test_adjust_full_opposite_hue(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomHue(factor=(1.0, 1.0), value_range=(0, 255))
output = layer(image)
channel_max = tf.math.reduce_max(output, axis=-1)
channel_min = tf.math.reduce_min(output, axis=-1)
# Make sure the max and min channel are the same between input and
# output. In the meantime, and channel will swap between each other.
self.assertAllClose(
channel_max,
tf.math.reduce_max(image, axis=-1),
atol=1e-5,
rtol=1e-5,
)
self.assertAllClose(
channel_min,
tf.math.reduce_min(image, axis=-1),
atol=1e-5,
rtol=1e-5,
)
@parameterized.named_parameters(
("025", 0.25), ("05", 0.5), ("075", 0.75), ("100", 1.0)
)
def test_adjusts_all_values_for_factor(self, factor):
image_shape = (4, 8, 8, 3)
# Value range (0, 100)
image = tf.random.uniform(shape=image_shape) * 100.0
layer = preprocessing.RandomHue(
factor=(factor, factor), value_range=(0, 255)
)
output = layer(image)
self.assertNotAllClose(image, output, atol=1e-5, rtol=1e-5)
def test_adjustment_for_non_rgb_value_range(self):
image_shape = (4, 8, 8, 3)
# Value range (0, 100)
image = tf.random.uniform(shape=image_shape) * 100.0
layer = preprocessing.RandomHue(factor=(0.0, 0.0), value_range=(0, 255))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
layer = preprocessing.RandomHue(factor=(0.3, 0.8), value_range=(0, 255))
output = layer(image)
self.assertNotAllClose(image, output)
def test_with_uint8(self):
image_shape = (4, 8, 8, 3)
image = tf.cast(
tf.random.uniform(shape=image_shape) * 255.0, dtype=tf.uint8
)
layer = preprocessing.RandomHue(factor=(0.0, 0.0), value_range=(0, 255))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
layer = preprocessing.RandomHue(factor=(0.3, 0.8), value_range=(0, 255))
output = layer(image)
self.assertNotAllClose(image, output)
def test_config(self):
layer = preprocessing.RandomHue(factor=(0.3, 0.8), value_range=(0, 255))
config = layer.get_config()
self.assertTrue(isinstance(config["factor"], core.UniformFactorSampler))
self.assertEqual(config["factor"].get_config()["lower"], 0.3)
self.assertEqual(config["factor"].get_config()["upper"], 0.8)
self.assertEqual(config["value_range"], (0, 255)) |
5,166 | test smoke origin | # This file is part of Checkbox.
#
# Copyright 2014 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3,
# as published by the Free Software Foundation.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
plainbox.impl.test_validation
=============================
Test definitions for plainbox.impl.validation module
"""
from unittest import TestCase
from plainbox.impl.validation import ValidationError
from plainbox.impl.validation import Issue
from plainbox.vendor import mock
class ValidationErrorTests(TestCase):
def test_smoke__no_hint(self):
err = ValidationError('field', 'problem')
self.assertEqual(str(err), "Problem with field field: problem")
self.assertEqual(repr(err), (
"ValidationError("
"field='field', problem='problem', hint=None, origin=None)"))
def test_smoke__hint(self):
err = ValidationError('field', 'problem', 'hint')
self.assertEqual(str(err), "Problem with field field: problem")
self.assertEqual(repr(err), (
"ValidationError("
"field='field', problem='problem', hint='hint', origin=None)"))
def METHOD_NAME(self):
err = ValidationError('field', 'problem', origin='origin')
self.assertEqual(str(err), "Problem with field field: problem")
self.assertEqual(repr(err), (
"ValidationError("
"field='field', problem='problem', hint=None, origin='origin')"))
class IssueTests(TestCase):
def setUp(self):
self.message = mock.MagicMock(name='message')
self.severity = mock.MagicMock(name='severity')
self.kind = mock.MagicMock(name='kind')
self.origin = mock.MagicMock(name='origin')
self.issue = Issue(self.message, self.severity, self.kind, self.origin)
def test_init(self):
self.assertIs(self.issue.message, self.message)
self.assertIs(self.issue.severity, self.severity)
self.assertIs(self.issue.kind, self.kind)
self.assertIs(self.issue.origin, self.origin)
def test_str__with_origin(self):
self.message.__str__.return_value = '<message>'
self.origin.__str__.return_value = '<origin>'
self.kind.__str__.return_value = '<kind>'
self.severity.__str__.return_value = '<severity>'
self.assertEqual(str(self.issue), "<severity>: <origin>: <message>")
def test_str__without_origin(self):
self.issue.origin = None
self.message.__str__.return_value = '<message>'
self.kind.__str__.return_value = '<kind>'
self.severity.__str__.return_value = '<severity>'
self.assertEqual(str(self.issue), "<severity>: <message>")
def test_repr__with_origin(self):
self.message.__repr__ = lambda mock: '(message)'
self.origin.__repr__ = lambda mock: '(origin)'
self.kind.__repr__ = lambda mock: '(kind)'
self.severity.__repr__ = lambda mock: '(severity)'
self.assertEqual(
repr(self.issue), (
'Issue(message=(message), severity=(severity),'
' kind=(kind), origin=(origin))'))
def test_relative_to__with_origin(self):
path = 'path'
issue2 = self.issue.relative_to(path)
self.issue.origin.relative_to.assert_called_with(path)
self.assertIs(self.issue.message, issue2.message)
self.assertIs(self.issue.severity, issue2.severity)
self.assertIs(self.issue.kind, issue2.kind)
self.assertIs(self.issue.origin.relative_to(path), issue2.origin)
def test_relative_to__without_origin(self):
path = 'path'
self.issue.origin = None
issue2 = self.issue.relative_to(path)
self.assertIs(issue2.message, self.issue.message)
self.assertIs(issue2.severity, self.issue.severity)
self.assertIs(issue2.kind, self.issue.kind)
self.assertIs(issue2.origin, None) |
5,167 | get timed out failure event | from __future__ import annotations
import abc
from localstack.aws.api.stepfunctions import (
HistoryEventExecutionDataDetails,
HistoryEventType,
TaskScheduledEventDetails,
TaskStartedEventDetails,
TaskSucceededEventDetails,
TaskTimedOutEventDetails,
)
from localstack.services.stepfunctions.asl.component.common.error_name.failure_event import (
FailureEvent,
)
from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name import (
StatesErrorName,
)
from localstack.services.stepfunctions.asl.component.common.error_name.states_error_name_type import (
StatesErrorNameType,
)
from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.resource import (
ServiceResource,
)
from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.state_task import (
StateTask,
)
from localstack.services.stepfunctions.asl.eval.environment import Environment
from localstack.services.stepfunctions.asl.eval.event.event_detail import EventDetails
from localstack.services.stepfunctions.asl.utils.encoding import to_json_str
# TODO: improve on factory constructor (don't use SubtypeManager)
class StateTaskService(StateTask, abc.ABC):
resource: ServiceResource
def _get_sfn_resource(self) -> str:
return self.resource.api_action
def _get_sfn_resource_type(self) -> str:
return self.resource.service_name
def METHOD_NAME(self) -> FailureEvent:
return FailureEvent(
error_name=StatesErrorName(typ=StatesErrorNameType.StatesTimeout),
event_type=HistoryEventType.TaskTimedOut,
event_details=EventDetails(
taskTimedOutEventDetails=TaskTimedOutEventDetails(
resourceType=self._get_sfn_resource_type(),
resource=self._get_sfn_resource(),
error=StatesErrorNameType.StatesTimeout.to_name(),
)
),
)
@abc.abstractmethod
def _eval_service_task(self, env: Environment, parameters: dict):
...
def _before_eval_execution(self, env: Environment, parameters: dict) -> None:
parameters_str = to_json_str(parameters)
scheduled_event_details = TaskScheduledEventDetails(
resource=self._get_sfn_resource(),
resourceType=self._get_sfn_resource_type(),
region=self.resource.region,
parameters=parameters_str,
)
if not self.timeout.is_default_value():
self.timeout.eval(env=env)
timeout_seconds = env.stack.pop()
scheduled_event_details["timeoutInSeconds"] = timeout_seconds
if self.heartbeat is not None:
self.heartbeat.eval(env=env)
heartbeat_seconds = env.stack.pop()
scheduled_event_details["heartbeatInSeconds"] = heartbeat_seconds
env.event_history.add_event(
hist_type_event=HistoryEventType.TaskScheduled,
event_detail=EventDetails(taskScheduledEventDetails=scheduled_event_details),
)
env.event_history.add_event(
hist_type_event=HistoryEventType.TaskStarted,
event_detail=EventDetails(
taskStartedEventDetails=TaskStartedEventDetails(
resource=self._get_sfn_resource(), resourceType=self._get_sfn_resource_type()
)
),
)
def _after_eval_execution(self, env: Environment) -> None:
output = env.stack[-1]
env.event_history.add_event(
hist_type_event=HistoryEventType.TaskSucceeded,
event_detail=EventDetails(
taskSucceededEventDetails=TaskSucceededEventDetails(
resource=self._get_sfn_resource(),
resourceType=self._get_sfn_resource_type(),
output=to_json_str(output),
outputDetails=HistoryEventExecutionDataDetails(truncated=False),
)
),
)
def _eval_execution(self, env: Environment) -> None:
parameters = self._eval_parameters(env=env)
self._before_eval_execution(env=env, parameters=parameters)
normalised_parameters = self._normalised_parameters_bindings(parameters)
self._eval_service_task(env=env, parameters=normalised_parameters)
self._after_eval_execution(env=env)
@classmethod
def for_service(cls, service_name: str) -> StateTaskService:
match service_name:
case "aws-sdk":
from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_aws_sdk import (
StateTaskServiceAwsSdk,
)
return StateTaskServiceAwsSdk()
case "lambda":
from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_lambda import (
StateTaskServiceLambda,
)
return StateTaskServiceLambda()
case "sqs":
from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_sqs import (
StateTaskServiceSqs,
)
return StateTaskServiceSqs()
case "states":
from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_sfn import (
StateTaskServiceSfn,
)
return StateTaskServiceSfn()
case "dynamodb":
from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_dynamodb import (
StateTaskServiceDynamoDB,
)
return StateTaskServiceDynamoDB()
case "apigateway":
from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_api_gateway import (
StateTaskServiceApiGateway,
)
return StateTaskServiceApiGateway()
case "sns":
from localstack.services.stepfunctions.asl.component.state.state_execution.state_task.service.state_task_service_sns import (
StateTaskServiceSns,
)
return StateTaskServiceSns()
case unknown:
raise NotImplementedError(f"Unsupported service: '{unknown}'.") # noqa |
5,168 | colormap | # Copyright (c) Facebook, Inc. and its affiliates.
"""
An awesome colormap for really neat visualizations.
Copied from Detectron, and removed gray colors.
"""
import numpy as np
import random
__all__ = ["colormap", "random_color", "random_colors"]
# fmt: off
# RGB:
_COLORS = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
).astype(np.float32).reshape(-1, 3)
# fmt: on
def METHOD_NAME(rgb=False, maximum=255):
"""
Args:
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1]
"""
assert maximum in [255, 1], maximum
c = _COLORS * maximum
if not rgb:
c = c[:, ::-1]
return c
def random_color(rgb=False, maximum=255):
"""
Args:
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a vector of 3 numbers
"""
idx = np.random.randint(0, len(_COLORS))
ret = _COLORS[idx] * maximum
if not rgb:
ret = ret[::-1]
return ret
def random_colors(N, rgb=False, maximum=255):
"""
Args:
N (int): number of unique colors needed
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a list of random_color
"""
indices = random.sample(range(len(_COLORS)), N)
ret = [_COLORS[i] * maximum for i in indices]
if not rgb:
ret = [x[::-1] for x in ret]
return ret
if __name__ == "__main__":
import cv2
size = 100
H, W = 10, 10
canvas = np.random.rand(H * size, W * size, 3).astype("float32")
for h in range(H):
for w in range(W):
idx = h * W + w
if idx >= len(_COLORS):
break
canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx]
cv2.imshow("a", canvas)
cv2.waitKey(0) |
5,169 | group conv1d ncw | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""1D convolution operators."""
from .conv2d import conv
def conv1d(
data,
kernel,
strides=1,
padding="VALID",
dilation=1,
data_layout="NCW",
kernel_layout="",
out_dtype=None,
):
"""1D convolution forward operator.
Parameters
----------
data : tvm.te.Tensor
3-D input shape [batch, in_channel, in_width] for data_layout == 'NCW'
and [batch, in_width, in_channel] for data_layout == 'NWC'
kernel : tvm.te.Tensor
3-D kernel with shape [num_filter, in_channel, filter_size] for kernel_layout == 'OIW'
and [filter_size, in_channel, num_filter] for kernel_layout == 'WIO'
strides : int or tuple
The spatial stride along width
padding : int or str
Padding size, or ['VALID', 'SAME']
dilation : int or tuple
Dilation rate if convolution should be dilated.
data_layout : str
How input data is laid out, must be one of ['NCW', 'NWC']
kernel_layout: Optiona[str]
The layout of the kernel. If unspecified, use default layout. "OIW" if data_layout == "NCW",
"WIO" if data_layout == "NWC".
out_dtype : str
The output data type. If None then output is same type as input.
"""
return conv(data, kernel, strides, padding, dilation, 1, data_layout, kernel_layout, out_dtype)
def conv1d_nwc(data, kernel, strides=1, padding="VALID", dilation=1, out_dtype=None):
"""1D convolution in NWC layout. See :py:func:`conv` for details on parameters"""
return conv(data, kernel, strides, padding, dilation, 1, "NWC", "WIO", out_dtype=out_dtype)
def conv1d_ncw(data, kernel, strides=1, padding="VALID", dilation=1, out_dtype=None):
"""1D convolution in NCW layout. See :py:func:`conv` for details on parameters"""
return conv(data, kernel, strides, padding, dilation, 1, "NCW", "OIW", out_dtype=out_dtype)
def group_conv1d_nwc(
data, kernel, strides=1, padding="VALID", dilation=1, groups=1, out_dtype=None
):
"""1D convolution forward operator for NWC layout.
Parameters
----------
data : tvm.te.Tensor
3-D with shape [batch, in_width, in_channel]
kernel : tvm.te.Tensor
3-D with shape [filter_size, in_channel, num_filter]
strides : int or tuple
The spatial stride along width
padding : int, tuple, or str
Padding size can be an integer for equal padding,
a tuple of (left, right) or a string in ['VALID', 'SAME'].
dilation : int or tuple
Dilation rate if convolution should be dilated.
groups : int
Number of groups
out_dtype : str
The output data type. If None then output is same type as input.
"""
return conv(data, kernel, strides, padding, dilation, groups, "NWC", "WIO", out_dtype=out_dtype)
def METHOD_NAME(
data, kernel, strides=1, padding="VALID", dilation=1, groups=1, out_dtype=None
):
"""1D convolution forward operator for NCW layout.
Parameters
----------
data : tvm.te.Tensor
3-D with shape [batch, in_channel, in_width]
kernel : tvm.te.Tensor
3-D with shape [num_filter, in_channel, filter_size]
strides : int or tuple
The spatial stride along width
padding : int, tuple, or str
Padding size can be an integer for equal padding,
a tuple of (left, right) or a string in ['VALID', 'SAME'].
dilation : int or tuple
Dilation rate if convolution should be dilated.
groups : int
Number of groups
out_dtype : str
The output data type. If None then output is same type as input.
"""
return conv(data, kernel, strides, padding, dilation, groups, "NCW", "OIW", out_dtype=out_dtype) |
5,170 | list tags | """Handles incoming cloudtrail requests, invokes methods, returns responses."""
import json
from typing import Any, Dict
from moto.core.responses import BaseResponse
from .models import cloudtrail_backends, CloudTrailBackend
from .exceptions import InvalidParameterCombinationException
class CloudTrailResponse(BaseResponse):
"""Handler for CloudTrail requests and responses."""
def __init__(self) -> None:
super().__init__(service_name="cloudtrail")
@property
def cloudtrail_backend(self) -> CloudTrailBackend:
"""Return backend instance specific for this region."""
return cloudtrail_backends[self.current_account][self.region]
def create_trail(self) -> str:
name = self._get_param("Name")
bucket_name = self._get_param("S3BucketName")
is_global = self._get_bool_param("IncludeGlobalServiceEvents", True)
is_multi_region = self._get_bool_param("IsMultiRegionTrail", False)
if not is_global and is_multi_region:
raise InvalidParameterCombinationException(
"Multi-Region trail must include global service events."
)
s3_key_prefix = self._get_param("S3KeyPrefix")
sns_topic_name = self._get_param("SnsTopicName")
log_validation = self._get_bool_param("EnableLogFileValidation", False)
is_org_trail = self._get_bool_param("IsOrganizationTrail", False)
cw_log_group_arn = self._get_param("CloudWatchLogsLogGroupArn")
cw_role_arn = self._get_param("CloudWatchLogsRoleArn")
kms_key_id = self._get_param("KmsKeyId")
tags_list = self._get_param("TagsList", [])
trail = self.cloudtrail_backend.create_trail(
name,
bucket_name,
s3_key_prefix,
sns_topic_name,
is_global,
is_multi_region,
log_validation,
is_org_trail,
cw_log_group_arn,
cw_role_arn,
kms_key_id,
tags_list,
)
return json.dumps(trail.description())
def get_trail(self) -> str:
name = self._get_param("Name")
trail = self.cloudtrail_backend.get_trail(name)
return json.dumps({"Trail": trail.description()})
def get_trail_status(self) -> str:
name = self._get_param("Name")
status = self.cloudtrail_backend.get_trail_status(name)
return json.dumps(status.description())
def describe_trails(self) -> str:
include_shadow_trails = self._get_bool_param("includeShadowTrails", True)
trails = self.cloudtrail_backend.describe_trails(include_shadow_trails)
return json.dumps(
{"trailList": [t.description(include_region=True) for t in trails]}
)
def list_trails(self) -> str:
all_trails = self.cloudtrail_backend.list_trails()
return json.dumps({"Trails": [t.short() for t in all_trails]})
def start_logging(self) -> str:
name = self._get_param("Name")
self.cloudtrail_backend.start_logging(name)
return json.dumps({})
def stop_logging(self) -> str:
name = self._get_param("Name")
self.cloudtrail_backend.stop_logging(name)
return json.dumps({})
def delete_trail(self) -> str:
name = self._get_param("Name")
self.cloudtrail_backend.delete_trail(name)
return json.dumps({})
def update_trail(self) -> str:
name = self._get_param("Name")
s3_bucket_name = self._get_param("S3BucketName")
s3_key_prefix = self._get_param("S3KeyPrefix")
sns_topic_name = self._get_param("SnsTopicName")
include_global_service_events = self._get_param("IncludeGlobalServiceEvents")
is_multi_region_trail = self._get_param("IsMultiRegionTrail")
enable_log_file_validation = self._get_param("EnableLogFileValidation")
is_organization_trail = self._get_param("IsOrganizationTrail")
cw_log_group_arn = self._get_param("CloudWatchLogsLogGroupArn")
cw_role_arn = self._get_param("CloudWatchLogsRoleArn")
kms_key_id = self._get_param("KmsKeyId")
trail = self.cloudtrail_backend.update_trail(
name=name,
s3_bucket_name=s3_bucket_name,
s3_key_prefix=s3_key_prefix,
sns_topic_name=sns_topic_name,
include_global_service_events=include_global_service_events,
is_multi_region_trail=is_multi_region_trail,
enable_log_file_validation=enable_log_file_validation,
is_organization_trail=is_organization_trail,
cw_log_group_arn=cw_log_group_arn,
cw_role_arn=cw_role_arn,
kms_key_id=kms_key_id,
)
return json.dumps(trail.description())
def put_event_selectors(self) -> str:
params = json.loads(self.body)
trail_name = params.get("TrailName")
event_selectors = params.get("EventSelectors")
advanced_event_selectors = params.get("AdvancedEventSelectors")
(
trail_arn,
event_selectors,
advanced_event_selectors,
) = self.cloudtrail_backend.put_event_selectors(
trail_name=trail_name,
event_selectors=event_selectors,
advanced_event_selectors=advanced_event_selectors,
)
return json.dumps(
dict(
TrailARN=trail_arn,
EventSelectors=event_selectors,
AdvancedEventSelectors=advanced_event_selectors,
)
)
def get_event_selectors(self) -> str:
params = json.loads(self.body)
trail_name = params.get("TrailName")
(
trail_arn,
event_selectors,
advanced_event_selectors,
) = self.cloudtrail_backend.get_event_selectors(trail_name=trail_name)
return json.dumps(
dict(
TrailARN=trail_arn,
EventSelectors=event_selectors,
AdvancedEventSelectors=advanced_event_selectors,
)
)
def add_tags(self) -> str:
params = json.loads(self.body)
resource_id = params.get("ResourceId")
tags_list = params.get("TagsList")
self.cloudtrail_backend.add_tags(resource_id=resource_id, tags_list=tags_list)
return json.dumps(dict())
def remove_tags(self) -> str:
resource_id = self._get_param("ResourceId")
tags_list = self._get_param("TagsList")
self.cloudtrail_backend.remove_tags(
resource_id=resource_id, tags_list=tags_list
)
return json.dumps(dict())
def METHOD_NAME(self) -> str:
params = json.loads(self.body)
resource_id_list = params.get("ResourceIdList")
resource_tag_list = self.cloudtrail_backend.METHOD_NAME(
resource_id_list=resource_id_list
)
return json.dumps(dict(ResourceTagList=resource_tag_list))
def put_insight_selectors(self) -> str:
trail_name = self._get_param("TrailName")
insight_selectors = self._get_param("InsightSelectors")
trail_arn, insight_selectors = self.cloudtrail_backend.put_insight_selectors(
trail_name=trail_name, insight_selectors=insight_selectors
)
return json.dumps(dict(TrailARN=trail_arn, InsightSelectors=insight_selectors))
def get_insight_selectors(self) -> str:
trail_name = self._get_param("TrailName")
trail_arn, insight_selectors = self.cloudtrail_backend.get_insight_selectors(
trail_name=trail_name
)
resp: Dict[str, Any] = {"TrailARN": trail_arn}
if insight_selectors:
resp["InsightSelectors"] = insight_selectors
return json.dumps(resp) |
5,171 | lambda error to dead letter queue | import json
import logging
import uuid
from typing import Dict, List
from localstack.aws.connect import connect_to
from localstack.utils.aws import arns
from localstack.utils.aws.aws_models import LambdaFunction
from localstack.utils.strings import convert_to_printable_chars, first_char_to_upper
LOG = logging.getLogger(__name__)
def sns_error_to_dead_letter_queue(
sns_subscriber: dict,
message: str,
error: str,
**kwargs,
):
policy = json.loads(sns_subscriber.get("RedrivePolicy") or "{}")
target_arn = policy.get("deadLetterTargetArn")
if not target_arn:
return
if not_supported := (
set(kwargs) - {"MessageAttributes", "MessageGroupId", "MessageDeduplicationId"}
):
LOG.warning(
"Not publishing to the DLQ - invalid arguments passed to the DLQ '%s'", not_supported
)
return
event = {
"message": message,
**kwargs,
}
return _send_to_dead_letter_queue(sns_subscriber["SubscriptionArn"], target_arn, event, error)
def METHOD_NAME(func_details: LambdaFunction, event: Dict, error):
dlq_arn = (func_details.dead_letter_config or {}).get("TargetArn")
source_arn = func_details.id
_send_to_dead_letter_queue(source_arn, dlq_arn, event, error)
def _send_to_dead_letter_queue(source_arn: str, dlq_arn: str, event: Dict, error, role: str = None):
if not dlq_arn:
return
LOG.info("Sending failed execution %s to dead letter queue %s", source_arn, dlq_arn)
messages = _prepare_messages_to_dlq(source_arn, event, error)
source_service = arns.extract_service_from_arn(source_arn)
region = arns.extract_region_from_arn(dlq_arn)
if role:
clients = connect_to.with_assumed_role(
role_arn=role, service_principal=source_service, region_name=region
)
else:
clients = connect_to(region_name=region)
if ":sqs:" in dlq_arn:
queue_url = arns.get_sqs_queue_url(dlq_arn)
sqs_client = clients.sqs.request_metadata(
source_arn=source_arn, service_principal=source_service
)
error = None
result_code = None
try:
result = sqs_client.send_message_batch(QueueUrl=queue_url, Entries=messages)
result_code = result.get("ResponseMetadata", {}).get("HTTPStatusCode")
except Exception as e:
error = e
if error or not result_code or result_code >= 400:
msg = "Unable to send message to dead letter queue %s (code %s): %s" % (
queue_url,
result_code,
error,
)
if "InvalidMessageContents" in str(error):
msg += f" - messages: {messages}"
LOG.info(msg)
raise Exception(msg)
elif ":sns:" in dlq_arn:
sns_client = clients.sns.request_metadata(
source_arn=source_arn, service_principal=source_service
)
for message in messages:
sns_client.publish(
TopicArn=dlq_arn,
Message=message["MessageBody"],
MessageAttributes=message["MessageAttributes"],
)
else:
LOG.warning("Unsupported dead letter queue type: %s", dlq_arn)
return dlq_arn
def _prepare_messages_to_dlq(source_arn: str, event: Dict, error) -> List[Dict]:
messages = []
custom_attrs = {
"RequestID": {"DataType": "String", "StringValue": str(uuid.uuid4())},
"ErrorCode": {"DataType": "String", "StringValue": "200"},
"ErrorMessage": {"DataType": "String", "StringValue": str(error)},
}
if ":sqs:" in source_arn:
custom_attrs["ErrorMessage"]["StringValue"] = str(error.result)
for record in event.get("Records", []):
msg_attrs = message_attributes_to_upper(record.get("messageAttributes"))
message_attrs = {**msg_attrs, **custom_attrs}
messages.append(
{
"Id": record.get("messageId"),
"MessageBody": record.get("body"),
"MessageAttributes": message_attrs,
}
)
elif ":sns:" in source_arn:
# event can also contain: MessageAttributes, MessageGroupId, MessageDeduplicationId
message = {
"Id": str(uuid.uuid4()),
"MessageBody": event.pop("message"),
**event,
}
messages.append(message)
elif ":lambda:" in source_arn:
custom_attrs["ErrorCode"]["DataType"] = "Number"
# not sure about what type of error can come here
try:
error_message = json.loads(error.result)["errorMessage"]
custom_attrs["ErrorMessage"]["StringValue"] = error_message
except (ValueError, KeyError):
# using old behaviour
custom_attrs["ErrorMessage"]["StringValue"] = str(error)
messages.append(
{
"Id": str(uuid.uuid4()),
"MessageBody": json.dumps(event),
"MessageAttributes": custom_attrs,
}
)
# make sure we only have printable strings in the message attributes
for message in messages:
if message.get("MessageAttributes"):
message["MessageAttributes"] = convert_to_printable_chars(message["MessageAttributes"])
return messages
def message_attributes_to_upper(message_attrs: Dict) -> Dict:
"""Convert message attribute details (first characters) to upper case (e.g., StringValue, DataType)."""
message_attrs = message_attrs or {}
for _, attr in message_attrs.items():
if not isinstance(attr, dict):
continue
for key, value in dict(attr).items():
attr[first_char_to_upper(key)] = attr.pop(key)
return message_attrs |
5,172 | test check timeseries scalar | '''
Basic tests for timeseries
'''
from datetime import datetime
import numpy as np
from pytest import raises
from gnome.basic_types import datetime_value_2d
from gnome.utilities.timeseries import Timeseries, TimeseriesError
from ..conftest import testdata
wind_file = testdata['timeseries']['wind_ts']
def test_str():
ts = Timeseries()
s = str(ts)
# not much of a check, not much of a str.
assert 'Timeseries' in s
def test_filename():
""" should really check for a real filename """
ts = Timeseries()
fn = ts.filename
assert fn is None
def test_exceptions(invalid_rq):
"""
Test TimeseriesError exception thrown if improper input arguments
Test TypeError thrown if units are not given - so they are None
"""
# valid timeseries for testing
dtv = np.zeros((4, ), dtype=datetime_value_2d).view(dtype=np.recarray)
dtv.time = [datetime(2012, 11, 0o6, 20, 10 + i, 30,) for i in range(4)]
dtv.value = (1, 0)
# Following also raises ValueError. This gives invalid (r,theta) inputs
# which are rejected by the transforms.r_theta_to_uv_wind method.
# It tests the inner exception is correct
with raises(TimeseriesError):
invalid_dtv_rq = np.zeros((len(invalid_rq['rq']), ),
dtype=datetime_value_2d)
invalid_dtv_rq['value'] = invalid_rq['rq']
Timeseries(timeseries=invalid_dtv_rq, coord_sys='r-theta')
# exception raised if datetime values are not in ascending order
# or not unique
with raises(TimeseriesError):
# not unique datetime values
dtv_rq = np.zeros((2, ),
dtype=datetime_value_2d).view(dtype=np.recarray)
(dtv_rq.value[0])[:] = (1, 0)
(dtv_rq.value[1])[:] = (1, 10)
Timeseries(timeseries=dtv_rq)
with raises(TimeseriesError):
# not in ascending order
dtv_rq = np.zeros((4, ),
dtype=datetime_value_2d).view(dtype=np.recarray)
dtv_rq.value = (1, 0)
dtv_rq.time[:len(dtv_rq) - 1] = [datetime(2012, 11, 0o6, 20, 10 + i, 30)
for i in range(len(dtv_rq) - 1)]
Timeseries(timeseries=dtv_rq)
def test_init(wind_timeseries):
''
rq = wind_timeseries['rq']
uv = wind_timeseries['uv']
ts = Timeseries(rq, coord_sys='r-theta')
assert np.all(ts.get_timeseries()['time'] == rq['time'])
assert np.allclose(ts.get_timeseries(coord_sys='r-theta')['value'],
rq['value'],
atol=1e-10)
assert np.allclose(ts.get_timeseries()['value'],
uv['value'],
atol=1e-10)
def test_get_timeseries(wind_timeseries):
uv = wind_timeseries['uv']
ts = Timeseries(uv, coord_sys='uv')
result = ts.get_timeseries()
assert len(result) == 6
for dt, value in result:
assert type(dt) is np.datetime64
assert len(value) == 2
def test_get_timeseries_multiple(wind_timeseries):
uv = wind_timeseries['uv']
ts = Timeseries(uv, coord_sys='uv')
dts = [datetime(2012, 11, 6, 20, 12),
datetime(2012, 11, 6, 20, 14)]
result = ts.get_timeseries(dts)
assert len(result) == 2
assert result[0][1][0] == -1.0
assert result[0][1][1] == 0.0
assert result[1][1][0] == 0.0
assert result[1][1][1] == 1.0
def test_empty():
"""
can you create one with no data
FixMe: why would you want to do this??
"""
ts = Timeseries()
arr = ts.get_timeseries()
assert len(arr) == 1
assert arr[0][1][0] == 0.0
assert arr[0][1][1] == 0.0
def test_set_timeseries_prop():
'''
following operation requires a numpy array
'''
ts = Timeseries(filename=wind_file)
# Following is a 0-D array, make sure it gets
# converted to a 1-D array correctly
x = (datetime.now().replace(microsecond=0, second=0), (4, 5))
ts.set_timeseries(x)
assert ts.get_timeseries()['time'] == x[0]
assert np.allclose(ts.get_timeseries()['value'], x[1], atol=1e-6)
def test__eq():
''' only checks timeseries values match '''
ts1 = Timeseries(filename=wind_file)
ts2 = Timeseries(timeseries=ts1.get_timeseries())
assert ts1 == ts2
def test_ne():
''' change timeseries '''
ts1 = Timeseries(filename=wind_file)
ts = ts1.get_timeseries()
ts[0]['value'] += (1, 1)
ts2 = Timeseries(timeseries=ts)
assert ts1 != ts2
def METHOD_NAME():
ts = Timeseries() # need one to get check methods..
result = ts._check_timeseries((datetime.now(), (1.0, 2.0)))
assert result
with raises(TimeseriesError):
result = ts._check_timeseries(('2007-03-01T13:00:00', (1.0, 2.0)))
assert result
def test__check_timeseries_single_value():
ts = Timeseries() # need one to get check methods..
result = ts._check_timeseries(((datetime.now(), (1.0, 2.0)),))
assert result
with raises(TimeseriesError):
result = ts._check_timeseries((('2007-03-01T13:00:00', (1.0, 2.0)),))
assert result
def test__check_timeseries_wrong_tuple():
ts = Timeseries() # need one to get check methods..
with raises(TimeseriesError):
result = ts._check_timeseries(((datetime.now(), (1.0, 2.0, 3.0)),))
assert result
with raises(TimeseriesError):
result = ts._check_timeseries(((datetime.now(), ()),))
assert result |
5,173 | event loop task | """Contains the EventManager class. See :mod:`.EventManagerGlobal` for the
global eventMgr instance."""
__all__ = ['EventManager']
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.task.TaskManagerGlobal import taskMgr
from direct.showbase.MessengerGlobal import messenger
from panda3d.core import PStatCollector, EventQueue, EventHandler
from panda3d.core import ConfigVariableBool
class EventManager:
notify = None
def __init__(self, eventQueue = None):
"""
Create a C++ event queue and handler
"""
# Make a notify category for this class (unless there already is one)
if EventManager.notify is None:
EventManager.notify = directNotify.newCategory("EventManager")
self.eventQueue = eventQueue
self.eventHandler = None
self._wantPstats = ConfigVariableBool('pstats-eventmanager', False)
def doEvents(self):
"""
Process all the events on the C++ event queue
"""
# use different methods for handling events with and without pstats tracking
# for efficiency
if self._wantPstats:
processFunc = self.processEventPstats
else:
processFunc = self.processEvent
isEmptyFunc = self.eventQueue.isQueueEmpty
dequeueFunc = self.eventQueue.dequeueEvent
while not isEmptyFunc():
processFunc(dequeueFunc())
def METHOD_NAME(self, task):
"""
Process all the events on the C++ event queue
"""
self.doEvents()
messenger.send("event-loop-done")
return task.cont
def parseEventParameter(self, eventParameter):
"""
Extract the actual data from the eventParameter
"""
if eventParameter.isInt():
return eventParameter.getIntValue()
elif eventParameter.isDouble():
return eventParameter.getDoubleValue()
elif eventParameter.isString():
return eventParameter.getStringValue()
elif eventParameter.isWstring():
return eventParameter.getWstringValue()
elif eventParameter.isTypedRefCount():
return eventParameter.getTypedRefCountValue()
elif eventParameter.isEmpty():
return None
else:
# Must be some user defined type, return the ptr
# which will be downcast to that type.
return eventParameter.getPtr()
def processEvent(self, event):
"""
Process a C++ event
Duplicate any changes in processEventPstats
"""
# **************************************************************
# ******** Duplicate any changes in processEventPstats *********
# **************************************************************
# Get the event name
eventName = event.name
if eventName:
paramList = []
for eventParameter in event.parameters:
eventParameterData = self.parseEventParameter(eventParameter)
paramList.append(eventParameterData)
# Do not print the new frame debug, it is too noisy!
if EventManager.notify.getDebug() and eventName != 'NewFrame':
EventManager.notify.debug('received C++ event named: ' + eventName +
' parameters: ' + repr(paramList))
# **************************************************************
# ******** Duplicate any changes in processEventPstats *********
# **************************************************************
# Send the event, we used to send it with the event
# name as a parameter, but now you can use extraArgs for that
messenger.send(eventName, paramList)
# Also send the event down into C++ land
handler = self.eventHandler
if handler:
handler.dispatchEvent(event)
else:
# An unnamed event from C++ is probably a bad thing
EventManager.notify.warning('unnamed event in processEvent')
def processEventPstats(self, event):
"""
Process a C++ event with pstats tracking
Duplicate any changes in processEvent
"""
# ********************************************************
# ******** Duplicate any changes in processEvent *********
# ********************************************************
# Get the event name
eventName = event.name
if eventName:
paramList = []
for eventParameter in event.parameters:
eventParameterData = self.parseEventParameter(eventParameter)
paramList.append(eventParameterData)
# Do not print the new frame debug, it is too noisy!
if EventManager.notify.getDebug() and eventName != 'NewFrame':
EventManager.notify.debug('received C++ event named: ' + eventName +
' parameters: ' + repr(paramList))
# Send the event, we used to send it with the event
# name as a parameter, but now you can use extraArgs for that
# ********************************************************
# ******** Duplicate any changes in processEvent *********
# ********************************************************
name = eventName
hyphen = name.find('-')
if hyphen >= 0:
name = name[0:hyphen]
pstatCollector = PStatCollector('App:Tasks:eventManager:' + name)
pstatCollector.start()
if self.eventHandler:
cppPstatCollector = PStatCollector(
'App:Tasks:eventManager:' + name + ':C++')
messenger.send(eventName, paramList)
# Also send the event down into C++ land
handler = self.eventHandler
if handler:
cppPstatCollector.start()
handler.dispatchEvent(event)
cppPstatCollector.stop()
pstatCollector.stop()
else:
# An unnamed event from C++ is probably a bad thing
EventManager.notify.warning('unnamed event in processEvent')
def restart(self):
if self.eventQueue is None:
self.eventQueue = EventQueue.getGlobalEventQueue()
if self.eventHandler is None:
if self.eventQueue == EventQueue.getGlobalEventQueue():
# If we are using the global event queue, then we also
# want to use the global event handler.
self.eventHandler = EventHandler.getGlobalEventHandler()
else:
# Otherwise, we need our own event handler.
self.eventHandler = EventHandler(self.eventQueue)
taskMgr.add(self.METHOD_NAME, 'eventManager')
def shutdown(self):
taskMgr.remove('eventManager')
# Flush the event queue. We do this after removing the task
# since the task removal itself might also fire off an event.
if self.eventQueue is not None:
self.eventQueue.clear()
do_events = doEvents
process_event = processEvent |
5,174 | reint | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Tests for reshaping and reinterpretation of existing arrays. """
import dace
import numpy as np
import pytest
N = dace.symbol('N')
def test_reshape():
""" Array->View->Tasklet """
@dace.program
def reshp(A: dace.float64[2, 3, 4], B: dace.float64[8, 3]):
C = np.reshape(A, [8, 3])
B[:] += C
A = np.random.rand(2, 3, 4)
B = np.random.rand(8, 3)
expected = np.reshape(A, [8, 3]) + B
reshp(A, B)
assert np.allclose(expected, B)
def test_reshape_dst():
""" Tasklet->View->Array """
@dace.program
def reshpdst(A: dace.float64[2, 3, 4], B: dace.float64[8, 3]):
C = np.reshape(B, [2, 3, 4])
C[:] = A
A = np.random.rand(2, 3, 4)
B = np.random.rand(8, 3)
reshpdst(A, B)
assert np.allclose(A, np.reshape(B, [2, 3, 4]))
def test_reshape_dst_explicit():
""" Tasklet->View->Array """
sdfg = dace.SDFG('reshapedst')
sdfg.add_array('A', [2, 3, 4], dace.float64)
sdfg.add_view('Bv', [2, 3, 4], dace.float64)
sdfg.add_array('B', [8, 3], dace.float64)
state = sdfg.add_state()
me, mx = state.add_map('compute', dict(i='0:2', j='0:3', k='0:4'))
t = state.add_tasklet('add', {'a'}, {'b'}, 'b = a + 1')
state.add_memlet_path(state.add_read('A'), me, t, dst_conn='a', memlet=dace.Memlet('A[i,j,k]'))
v = state.add_access('Bv')
state.add_memlet_path(t, mx, v, src_conn='b', memlet=dace.Memlet('Bv[i,j,k]'))
state.add_nedge(v, state.add_write('B'), dace.Memlet('B'))
sdfg.validate()
A = np.random.rand(2, 3, 4)
B = np.random.rand(8, 3)
sdfg(A=A, B=B)
assert np.allclose(A + 1, np.reshape(B, [2, 3, 4]))
@pytest.mark.parametrize('memlet_dst', (False, True))
def test_reshape_copy(memlet_dst):
"""
Symmetric case of Array->View->Array. Should be translated to a reference
and a copy.
"""
sdfg = dace.SDFG('reshpcpy')
sdfg.add_array('A', [2, 3], dace.float64)
sdfg.add_array('B', [6], dace.float64)
sdfg.add_view('Av', [6], dace.float64)
state = sdfg.add_state()
r = state.add_read('A')
v = state.add_access('Av')
w = state.add_write('B')
state.add_edge(r, None, v, 'views', dace.Memlet(data='A'))
state.add_nedge(v, w, dace.Memlet(data='B' if memlet_dst else 'Av'))
sdfg.validate()
A = np.random.rand(2, 3)
B = np.random.rand(6)
sdfg(A=A, B=B)
assert np.allclose(A.reshape([6]), B)
def test_reshape_copy_scoped():
""" Array->View->Array where one array is located within a map scope. """
sdfg = dace.SDFG('reshpcpy')
sdfg.add_array('A', [2, 3], dace.float64)
sdfg.add_array('B', [6], dace.float64)
sdfg.add_view('Av', [6], dace.float64)
sdfg.add_transient('tmp', [1], dace.float64)
state = sdfg.add_state()
r = state.add_read('A')
me, mx = state.add_map('reverse', dict(i='0:6'))
v = state.add_access('Av')
t = state.add_access('tmp')
w = state.add_write('B')
state.add_edge_pair(me, v, r, dace.Memlet('A[0:2, 0:3]'), dace.Memlet('A[0:2, 0:3]'))
state.add_nedge(v, t, dace.Memlet('Av[i]'))
state.add_memlet_path(t, mx, w, memlet=dace.Memlet('B[6 - i - 1]'))
sdfg.validate()
A = np.random.rand(2, 3)
B = np.random.rand(6)
sdfg(A=A, B=B)
assert np.allclose(A.reshape([6])[::-1], B)
def test_reshape_subset():
""" Tests reshapes on subsets of arrays. """
@dace.program
def reshp(A: dace.float64[2, 3, 4], B: dace.float64[12]):
C = np.reshape(A[1, :, :], [12])
B[:] += C
A = np.random.rand(2, 3, 4)
B = np.random.rand(12)
expected = np.reshape(A[1, :, :], [12]) + B
reshp(A, B)
assert np.allclose(expected, B)
def test_reshape_subset_explicit():
""" Tests reshapes on subsets of arrays. """
sdfg = dace.SDFG('reshp')
sdfg.add_array('A', [2, 3, 4], dace.float64)
sdfg.add_array('B', [12], dace.float64)
sdfg.add_view('Av', [12], dace.float64)
state = sdfg.add_state()
state.add_mapped_tasklet('compute',
dict(i='0:12'),
dict(a=dace.Memlet('Av[i]'), b=dace.Memlet('B[i]')),
'out = a + b',
dict(out=dace.Memlet('B[i]')),
external_edges=True)
v = next(n for n in state.source_nodes() if n.data == 'Av')
state.add_nedge(state.add_read('A'), v, dace.Memlet('A[1, 0:3, 0:4]'))
A = np.random.rand(2, 3, 4)
B = np.random.rand(12)
expected = np.reshape(A[1, :, :], [12]) + B
sdfg(A=A, B=B)
assert np.allclose(expected, B)
def test_reinterpret():
@dace.program
def METHOD_NAME(A: dace.int32[N]):
C = A.view(dace.int16)
C[:] += 1
A = np.random.randint(0, 262144, size=[10], dtype=np.int32)
expected = np.copy(A)
B = expected.view(np.int16)
B[:] += 1
METHOD_NAME(A)
assert np.allclose(expected, A)
def test_reinterpret_invalid():
@dace.program
def reint_invalid(A: dace.float32[5]):
C = A.view(dace.float64)
C[:] += 1
A = np.random.rand(5).astype(np.float32)
try:
reint_invalid(A)
raise AssertionError('Program should not be compilable')
except ValueError:
pass
if __name__ == "__main__":
test_reshape()
test_reshape_dst()
test_reshape_dst_explicit()
test_reshape_copy(False)
test_reshape_copy(True)
test_reshape_copy_scoped()
test_reshape_subset()
test_reshape_subset_explicit()
test_reinterpret()
test_reinterpret_invalid() |
5,175 | test specifications combo box enabled if item | ######################################################################################################################
# Copyright (C) 2017-2022 Spine project consortium
# This file is part of Spine Toolbox.
# Spine Toolbox is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option)
# any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
# Public License for more details. You should have received a copy of the GNU Lesser General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
######################################################################################################################
"""
Unit tests for AddProjectItemWidget.
"""
from tempfile import TemporaryDirectory
import unittest
from unittest.mock import MagicMock, patch
from PySide6.QtWidgets import QApplication, QWidget
from PySide6.QtGui import QColor
from spinetoolbox.project_item.project_item import ProjectItem
from spinetoolbox.project_item.project_item_factory import ProjectItemFactory
from spinetoolbox.widgets.add_project_item_widget import AddProjectItemWidget
from tests.mock_helpers import create_toolboxui_with_project, clean_up_toolbox
class TestAddProjectItemWidget(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not QApplication.instance():
QApplication()
def setUp(self):
"""Set up toolbox."""
self._temp_dir = TemporaryDirectory()
with patch("spinetoolbox.ui_main.JumpPropertiesWidget") as mock_jump_props_widget, patch(
"spinetoolbox.ui_main.load_project_items"
) as mock_load_project_items:
mock_jump_props_widget.return_value = QWidget()
mock_load_project_items.return_value = (
{TestProjectItem.item_type(): TestProjectItem.item_category()},
{TestProjectItem.item_type(): TestItemFactory},
)
self._toolbox = create_toolboxui_with_project(self._temp_dir.name)
def tearDown(self):
"""Clean up."""
clean_up_toolbox(self._toolbox)
self._temp_dir.cleanup()
def test_name_field_initially_selected(self):
widget = AddProjectItemWidget(self._toolbox, 0.0, 0.0, class_=TestProjectItem)
self.assertEqual(widget.ui.lineEdit_name.selectedText(), "TestItemType")
def test_find_item_is_used_to_create_prefix(self):
widget = AddProjectItemWidget(self._toolbox, 0.0, 0.0, class_=TestProjectItem)
self.assertEqual(widget.ui.lineEdit_name.text(), "TestItemType")
class TestAddProjectItemWidgetWithSpecifications(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not QApplication.instance():
QApplication()
def setUp(self):
"""Set up toolbox."""
self._temp_dir = TemporaryDirectory()
with patch("spinetoolbox.ui_main.JumpPropertiesWidget") as mock_jump_props_widget, patch(
"spinetoolbox.ui_main.load_project_items"
) as mock_load_project_items, patch(
"spinetoolbox.ui_main.load_item_specification_factories"
) as mock_load_specification_factories:
mock_jump_props_widget.return_value = QWidget()
mock_load_project_items.return_value = (
{TestProjectItem.item_type(): TestProjectItem.item_category()},
{TestProjectItem.item_type(): TestItemFactory},
)
mock_load_specification_factories.return_value = {TestProjectItem.item_type(): TestSpecificationFactory}
self._toolbox = create_toolboxui_with_project(self._temp_dir.name)
def tearDown(self):
"""Clean up."""
clean_up_toolbox(self._toolbox)
self._temp_dir.cleanup()
def METHOD_NAME(self):
widget = AddProjectItemWidget(self._toolbox, 0.0, 0.0, class_=TestProjectItem)
self.assertTrue(widget.ui.comboBox_specification.isEnabled())
class TestProjectItem(ProjectItem):
def __init__(self, project):
super().__init__("item name", "item description", 0.0, 0.0, project)
@staticmethod
def item_type():
return "TestItemType"
@staticmethod
def item_category():
return "TestCategory"
@property
def executable_class(self):
raise NotImplementedError()
@staticmethod
def from_dict(name, item_dict, toolbox, project):
return TestProjectItem(project)
def update_name_label(self):
return
class TestItemFactory(ProjectItemFactory):
@staticmethod
def item_class():
return TestProjectItem
@staticmethod
def icon():
return ""
@staticmethod
def icon_color():
return QColor()
@staticmethod
def make_add_item_widget(toolbox, x, y, specification):
return MagicMock()
@staticmethod
def make_icon(toolbox):
return MagicMock()
@staticmethod
def make_item(name, item_dict, toolbox, project):
return TestProjectItem(project)
@staticmethod
def make_properties_widget(toolbox):
"""
Creates the item's properties tab widget.
Returns:
QWidget: item's properties tab widget
"""
return MagicMock()
@staticmethod
def make_specification_menu(parent, index):
return MagicMock()
@staticmethod
def show_specification_widget(toolbox, specification=None, **kwargs):
return MagicMock()
class TestSpecificationFactory:
pass
if __name__ == "__main__":
unittest.main() |
5,176 | test person birth date should be in | from pytest import mark
import re
def test_person_headshot_should_be_an_image_link(ia):
person = ia.get_person('0000206', info=['biography']) # Keanu Reeves
assert re.match(r'^https?://.*\.jpg$', person['headshot'])
def test_person_full_size_headshot_should_be_an_image_link(ia):
person = ia.get_person('0000206', info=['biography']) # Keanu Reeves
assert re.match(r'^https?://.*\.jpg$', person['full-size headshot'])
def test_person_headshot_if_none_should_be_excluded(ia):
person = ia.get_person('0330139', info=['biography']) # Deni Gordon
assert 'headshot' not in person
def test_person_bio_is_present(ia):
person = ia.get_person('0000206', info=['biography']) # Keanu Reeves
assert 'mini biography' in person
def METHOD_NAME(ia):
person = ia.get_person('0000001', info=['biography']) # Fred Astaire
assert person.get('birth date') == '1899-05-10'
def test_person_birth_date_without_month_and_date_should_be_in_y00_format(ia):
person = ia.get_person('0565883', info=['biography']) # Belinda McClory
assert person.get('birth date') == '1968-00-00'
def test_person_birth_date_without_itemprop_should_be_in_ymd_format(ia):
person = ia.get_person('0000007', info=['biography']) # Humphrey Bogart
assert person.get('birth date') == '1899-12-25'
def test_person_birth_notes_should_contain_birth_place(ia):
person = ia.get_person('0000001', info=['biography']) # Fred Astaire
assert person.get('birth notes') == 'Omaha, Nebraska, USA'
def test_person_death_date_should_be_in_ymd_format(ia):
person = ia.get_person('0000001', info=['biography']) # Fred Astaire
assert person.get('death date') == '1987-06-22'
def test_person_death_date_without_itemprop_should_be_in_ymd_format(ia):
person = ia.get_person('0000007', info=['biography']) # Humphrey Bogart
assert person.get('death date') == '1957-01-14'
def test_person_death_date_if_none_should_be_excluded(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
assert 'death date' not in person
def test_person_death_notes_should_contain_death_place_and_reason(ia):
person = ia.get_person('0000001', info=['biography']) # Fred Astaire
assert person['death notes'] == 'in Los Angeles, California, USA (pneumonia)'
def test_person_death_notes_if_none_should_be_excluded(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
assert 'death notes' not in person
def test_person_birth_name_should_be_normalized(ia):
data = ia.get_person('0000210', info=['biography']) # Julia Roberts
assert data.get('birth name') == 'Julia Fiona Roberts'
def test_person_nicknames_if_single_should_be_a_list_of_names(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
assert person.get('nick names') == ['Jules']
def test_person_nicknames_if_multiple_should_be_a_list_of_names(ia):
person = ia.get_person('0000206', info=['biography']) # Keanu Reeves
assert person.get('nick names') == ['The Wall', 'The One']
def test_person_height_should_be_in_inches_and_meters(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
assert person.get('height') == '5\' 8" (1.73 m)'
def test_person_height_if_none_should_be_excluded(ia):
person = ia.get_person('0617588', info=['biography']) # Georges Melies
assert 'height' not in person
@mark.skip("FIXME: biography page change: from 'Spouses' it's now 'Family > Spouse")
def test_person_spouse_should_be_a_list(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
spouses = person.get('spouse', [])
assert len(spouses) == 2
def test_person_trade_mark_should_be_a_list(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
trade_mark = person.get('trade mark', [])
assert len(trade_mark) == 3
def test_person_trivia_should_be_a_list(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
trivia = person.get('trivia', [])
assert len(trivia) > 90
def test_person_quotes_should_be_a_list(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
quotes = person.get('quotes', [])
assert len(quotes) > 30
def test_person_salary_history_should_be_a_list(ia):
person = ia.get_person('0000210', info=['biography']) # Julia Roberts
salary = person.get('salary history', [])
assert len(salary) > 25 |
5,177 | zeros state | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import numbers
from typing import (
AbstractSet,
Mapping,
Union,
Iterable,
Dict,
Optional,
TYPE_CHECKING,
Tuple,
FrozenSet,
)
import sympy
from cirq import ops, value, protocols
if TYPE_CHECKING:
import cirq
from cirq.value.product_state import _NamedOneQubitState
@dataclasses.dataclass(frozen=True)
class InitObsSetting:
"""A pair of initial state and observable.
Usually, given a circuit you want to iterate through many
InitObsSettings to vary the initial state preparation and output
observable.
"""
init_state: value.ProductState
observable: ops.PauliString
def __post_init__(self):
# Special validation for this dataclass.
init_qs = self.init_state.qubits
obs_qs = self.observable.qubits
if set(obs_qs) > set(init_qs):
raise ValueError(
"`observable`'s qubits should be a subset of those "
"found in `init_state`. "
f"observable qubits: {obs_qs}. init_state qubits: {init_qs}"
)
def __str__(self):
return f'{self.init_state} → {self.observable}'
def __repr__(self):
return (
f'cirq.work.InitObsSetting('
f'init_state={self.init_state!r}, '
f'observable={self.observable!r})'
)
def _json_dict_(self):
return protocols.dataclass_json_dict(self)
def _max_weight_observable(observables: Iterable[ops.PauliString]) -> Optional[ops.PauliString]:
"""Create a new observable that is compatible with all input observables
and has the maximum non-identity elements.
The returned PauliString is constructed by taking the non-identity
single-qubit Pauli at each qubit position.
This function will return `None` if the input observables do not share a
tensor product basis.
For example, the _max_weight_observable of ["XI", "IZ"] is "XZ". Asking for
the max weight observable of something like ["XI", "ZI"] will return None.
The returned value need not actually be present in the input observables.
Coefficients from input observables will be dropped.
"""
qubit_pauli_map: Dict[ops.Qid, ops.Pauli] = {}
for observable in observables:
for qubit, pauli in observable.items():
if qubit in qubit_pauli_map:
if qubit_pauli_map[qubit] != pauli:
return None
else:
qubit_pauli_map[qubit] = pauli
return ops.PauliString(qubit_pauli_map)
def _max_weight_state(states: Iterable[value.ProductState]) -> Optional[value.ProductState]:
"""Create a new state that is compatible with all input states
and has the maximum weight.
The returned TensorProductState is constructed by taking the
single-qubit state at each qubit position.
This function will return `None` if the input states are not compatible
For example, the max_weight_state of [+X(0), -Z(1)] is
"+X(0) * -Z(1)". Asking for the max weight state of something like
[+X(0), +Z(0)] will return None.
"""
qubit_state_map: Dict[ops.Qid, _NamedOneQubitState] = {}
for state in states:
for qubit, named_state in state:
if qubit in qubit_state_map:
if qubit_state_map[qubit] != named_state:
return None
else:
qubit_state_map[qubit] = named_state
return value.ProductState(qubit_state_map)
def METHOD_NAME(qubits: Iterable['cirq.Qid']):
"""Return the ProductState that is |00..00> on all qubits."""
return value.ProductState({q: value.KET_ZERO for q in qubits})
def observables_to_settings(
observables: Iterable['cirq.PauliString'], qubits: Iterable['cirq.Qid']
) -> Iterable[InitObsSetting]:
"""Transform an observable to an InitObsSetting initialized in the
all-zeros state.
"""
for observable in observables:
yield InitObsSetting(init_state=METHOD_NAME(qubits), observable=observable)
def _fix_precision(val: Union[value.Scalar, sympy.Expr], precision) -> Union[int, Tuple[int, int]]:
"""Convert floating point or complex numbers to (implicitly) fixed point
integers. Complex numbers will return fixed-point (real, imag) tuples.
Circuit parameters can be complex but we also need to use them as
dictionary keys. We secretly use these fixed-precision integers.
"""
if isinstance(val, sympy.Expr):
raise ValueError(f'Cannot convert {val} to fixed precision in observable settings')
if isinstance(val, (complex, numbers.Complex)):
return int(val.real * precision), int(val.imag * precision)
return int(val * precision)
def _hashable_param(
param_tuples: AbstractSet[Tuple[Union[str, sympy.Expr], Union[value.Scalar, sympy.Expr]]],
precision=1e7,
) -> FrozenSet[Tuple[str, Union[int, Tuple[int, int]]]]:
"""Hash circuit parameters using fixed precision.
Circuit parameters can be complex but we also need to use them as
dictionary keys. We secretly use these fixed-precision integers.
"""
return frozenset(
(k, _fix_precision(v, precision)) for k, v in param_tuples if isinstance(k, str)
)
@dataclasses.dataclass(frozen=True)
class _MeasurementSpec:
"""An encapsulation of all the specifications for one run of a
quantum processor.
This includes the maximal input-output setting (which may result in many
observables being measured if they are consistent with `max_setting`) and
a set of circuit parameters if the circuit is parameterized.
"""
max_setting: InitObsSetting
circuit_params: Mapping[Union[str, sympy.Expr], Union[value.Scalar, sympy.Expr]]
def __hash__(self):
return hash((self.max_setting, _hashable_param(self.circuit_params.items())))
def __repr__(self):
return (
f'cirq.work._MeasurementSpec(max_setting={self.max_setting!r}, '
f'circuit_params={self.circuit_params!r})'
)
def _json_dict_(self):
return protocols.dataclass_json_dict(self) |
5,178 | errorhandler method | import unittest
from unittest import mock
import octoprint.plugin
class BlueprintPluginTest(unittest.TestCase):
def setUp(self):
self.basefolder = "/some/funny/basefolder"
self.plugin = octoprint.plugin.BlueprintPlugin()
self.plugin._basefolder = self.basefolder
class MyAssetPlugin(
octoprint.plugin.BlueprintPlugin, octoprint.plugin.AssetPlugin
):
def get_asset_folder(self):
return "/some/asset/folder"
class MyTemplatePlugin(
octoprint.plugin.BlueprintPlugin, octoprint.plugin.TemplatePlugin
):
def get_template_folder(self):
return "/some/template/folder"
self.assetplugin = MyAssetPlugin()
self.assetplugin._basefolder = self.basefolder
self.templateplugin = MyTemplatePlugin()
self.templateplugin._basefolder = self.basefolder
def test_route(self):
def test_method():
pass
octoprint.plugin.BlueprintPlugin.route("/test/method", methods=["GET"])(
test_method
)
octoprint.plugin.BlueprintPlugin.route("/test/method/{foo}", methods=["PUT"])(
test_method
)
self.assertTrue(hasattr(test_method, "_blueprint_rules"))
self.assertTrue("test_method" in test_method._blueprint_rules)
self.assertTrue(len(test_method._blueprint_rules["test_method"]) == 2)
self.assertListEqual(
test_method._blueprint_rules["test_method"],
[
("/test/method", {"methods": ["GET"]}),
("/test/method/{foo}", {"methods": ["PUT"]}),
],
)
def test_errorhandler(self):
def test_method():
pass
octoprint.plugin.BlueprintPlugin.errorhandler(404)(test_method)
self.assertTrue(hasattr(test_method, "_blueprint_error_handler"))
self.assertTrue("test_method" in test_method._blueprint_error_handler)
self.assertTrue(len(test_method._blueprint_error_handler["test_method"]) == 1)
self.assertListEqual(test_method._blueprint_error_handler["test_method"], [404])
def test_get_blueprint_kwargs(self):
import os
expected = {
"static_folder": os.path.join(self.basefolder, "static"),
"template_folder": os.path.join(self.basefolder, "templates"),
}
result = self.plugin.get_blueprint_kwargs()
self.assertEqual(result, expected)
def test_get_blueprint_kwargs_assetplugin(self):
import os
expected = {
"static_folder": self.assetplugin.get_asset_folder(),
"template_folder": os.path.join(self.basefolder, "templates"),
}
result = self.assetplugin.get_blueprint_kwargs()
self.assertEqual(result, expected)
def test_get_blueprint_kwargs_templateplugin(self):
import os
expected = {
"static_folder": os.path.join(self.basefolder, "static"),
"template_folder": self.templateplugin.get_template_folder(),
}
result = self.templateplugin.get_blueprint_kwargs()
self.assertEqual(result, expected)
def test_get_blueprint(self):
import os
expected_kwargs = {
"static_folder": os.path.join(self.basefolder, "static"),
"template_folder": os.path.join(self.basefolder, "templates"),
}
class MyPlugin(octoprint.plugin.BlueprintPlugin):
@octoprint.plugin.BlueprintPlugin.route("/some/path", methods=["GET"])
def route_method(self):
pass
@octoprint.plugin.BlueprintPlugin.errorhandler(404)
def METHOD_NAME(self):
pass
@octoprint.plugin.BlueprintPlugin.route("/hidden/path", methods=["GET"])
def _hidden_method(self):
pass
plugin = MyPlugin()
plugin._basefolder = self.basefolder
plugin._identifier = "myplugin"
with mock.patch("flask.Blueprint") as MockBlueprint:
blueprint = mock.MagicMock()
MockBlueprint.return_value = blueprint
errorhandler = mock.MagicMock()
blueprint.errorhandler.return_value = errorhandler
result = plugin.get_blueprint()
self.assertEqual(result, blueprint)
MockBlueprint.assert_called_once_with("myplugin", "myplugin", **expected_kwargs)
blueprint.add_url_rule.assert_called_once_with(
"/some/path", "route_method", view_func=plugin.route_method, methods=["GET"]
)
blueprint.errorhandler.assert_called_once_with(404)
errorhandler.assert_called_once_with(plugin.METHOD_NAME)
def test_get_blueprint_cached(self):
blueprint = mock.MagicMock()
self.plugin._blueprint = blueprint
result = self.plugin.get_blueprint()
self.assertEqual(blueprint, result) |
5,179 | test set max and skip runs | #!/usr/bin/env python
"""
_Mask_
Unittest for the WMCore.DataStructs.Mask class
"""
# This code written as essentially a blank for future
# Mask development
# -mnorman
import unittest
from WMCore.DataStructs.Mask import Mask
from WMCore.DataStructs.Run import Run
class MaskTest(unittest.TestCase):
"""
_MaskTest_
"""
def testSetMaxAndSkipEvents(self):
"""
test class for setMaxAndSkipEvents in Mask.py
"""
testMask = Mask()
maxEvents = 100
skipEvents = 10
testMask.setMaxAndSkipEvents(maxEvents, skipEvents)
self.assertEqual(testMask['FirstEvent'], skipEvents)
self.assertEqual(testMask['LastEvent'], maxEvents + skipEvents)
return
def testSetMaxAndSkipLumis(self):
"""
test class for setMaxAndSkipLumis in Mask.py
"""
testMask = Mask()
maxLumis = 10
skipLumis = 2
testMask.setMaxAndSkipLumis(maxLumis, skipLumis)
self.assertEqual(testMask['FirstLumi'], skipLumis)
self.assertEqual(testMask['LastLumi'], maxLumis + skipLumis)
return
def METHOD_NAME(self):
"""
test class for setMaxAndSkipRuns in Mask.py
"""
testMask = Mask()
maxRuns = 1000
skipRuns = 200
testMask.setMaxAndSkipRuns(maxRuns, skipRuns)
self.assertEqual(testMask['FirstRun'], skipRuns)
self.assertEqual(testMask['LastRun'], maxRuns + skipRuns)
return
def testGetMaxEvents(self):
"""
test class for getMaxEvents in Mask.py
"""
# The way I've decided to implement this depends on SetMaxAndSkipEvents()
# Therefore a failure in one will result in a failure in the second
# I'm not sure if this is the best way, but it's the one users will use
# The problem is that it's called in reverse order by unittest so you have to
# remember that.
# -mnorman
testMask = Mask()
maxEvents = 100
skipEvents = 1
self.assertEqual(testMask.getMaxEvents(), None)
testMask.setMaxAndSkipEvents(maxEvents, skipEvents)
self.assertEqual(testMask.getMaxEvents(), maxEvents + skipEvents)
def testGetMax(self):
"""
test class for the getMax() routine added to Mask.py
"""
testMask = Mask()
maxRuns = 999
skipRuns = 201
testMask.setMaxAndSkipRuns(maxRuns, skipRuns)
self.assertEqual(testMask.getMax('Event'), None)
self.assertEqual(testMask.getMax('Lumi'), None)
self.assertEqual(testMask.getMax('junk'), None)
self.assertEqual(testMask.getMax('Run'), 1000)
def testRunsAndLumis(self):
"""
Test several different ways of creating the same list
of runs and lumis
"""
runMask = Mask()
rangesMask = Mask()
runAndLumisMask = Mask()
runMask.addRun(Run(100, 1, 2, 3, 4, 5, 6, 8, 9, 10))
runMask.addRun(Run(200, 6, 7, 8))
runMask.addRun(Run(300, 12))
rangesMask.addRunWithLumiRanges(run=100, lumiList=[[1, 6], [8, 10]])
rangesMask.addRunWithLumiRanges(run=200, lumiList=[[6, 8]])
rangesMask.addRunWithLumiRanges(run=300, lumiList=[[12, 12]])
runAndLumisMask.addRunAndLumis(run=100, lumis=[1, 6])
runAndLumisMask.addRunAndLumis(run=100, lumis=[8, 10])
runAndLumisMask.addRunAndLumis(run=200, lumis=[6, 8])
runAndLumisMask.addRunAndLumis(run=300, lumis=[12, 12])
self.assertEqual(runMask.getRunAndLumis(), rangesMask.getRunAndLumis())
# Note, this may break if the TODO in Mask.addRunAndLumis() is addressed
self.assertEqual(runMask.getRunAndLumis(), runAndLumisMask.getRunAndLumis())
def testFilter(self):
"""
Test filtering of a set(run) object
"""
mask = Mask()
mask.addRunWithLumiRanges(run=1, lumiList=[[1, 9], [12, 12], [31, 31], [38, 39], [49, 49]])
runs = set()
runs.add(Run(1, 148, 166, 185, 195, 203, 212))
newRuns = mask.filterRunLumisByMask(runs=runs)
self.assertEqual(len(newRuns), 0)
runs = set()
runs.add(Run(1, 2, 148, 166, 185, 195, 203, 212))
runs.add(Run(2, 148, 166, 185, 195, 203, 212))
newRuns = mask.filterRunLumisByMask(runs=runs)
self.assertEqual(len(newRuns), 1)
runs = set()
runs.add(Run(1, 2, 9, 148, 166, 185, 195, 203, 212))
newRuns = mask.filterRunLumisByMask(runs=runs)
self.assertEqual(len(newRuns), 1)
run = newRuns.pop()
self.assertEqual(run.run, 1)
self.assertEqual(run.lumis, [2, 9])
def testFilterRealCase(self):
"""
Test filtering of a set(run) object based on real cases from production
"""
mask = Mask()
mask.addRunWithLumiRanges(run=1, lumiList=[[9, 9], [8, 8], [3, 4], [7, 7]])
mask.setMaxAndSkipLumis(0, 7)
mask.setMaxAndSkipRuns(0, 1)
runs = set()
runs.add(Run(1, *[(9, 500), (10, 500)]))
runs.add(Run(1, *[(3, 500), (4, 500)]))
runs.add(Run(1, *[(7, 500), (8, 500)]))
newRuns = mask.filterRunLumisByMask(runs=runs)
self.assertEqual(len(newRuns), 1)
run = newRuns.pop()
self.assertEqual(run.run, 1)
self.assertEqual(run.lumis, [3, 4, 7, 8, 9])
if __name__ == '__main__':
unittest.main() |
5,180 | test read open sink | import pipes
import os
import string
import unittest
from test.test_support import TESTFN, run_unittest, unlink, reap_children
if os.name != 'posix':
raise unittest.SkipTest('pipes module only works on posix')
TESTFN2 = TESTFN + "2"
# tr a-z A-Z is not portable, so make the ranges explicit
s_command = 'tr %s %s' % (string.ascii_lowercase, string.ascii_uppercase)
class SimplePipeTests(unittest.TestCase):
def tearDown(self):
for f in (TESTFN, TESTFN2):
unlink(f)
def testSimplePipe1(self):
t = pipes.Template()
t.append(s_command, pipes.STDIN_STDOUT)
f = t.open(TESTFN, 'w')
f.write('hello world #1')
f.close()
with open(TESTFN) as f:
self.assertEqual(f.read(), 'HELLO WORLD #1')
def testSimplePipe2(self):
with open(TESTFN, 'w') as f:
f.write('hello world #2')
t = pipes.Template()
t.append(s_command + ' < $IN > $OUT', pipes.FILEIN_FILEOUT)
t.copy(TESTFN, TESTFN2)
with open(TESTFN2) as f:
self.assertEqual(f.read(), 'HELLO WORLD #2')
def testSimplePipe3(self):
with open(TESTFN, 'w') as f:
f.write('hello world #2')
t = pipes.Template()
t.append(s_command + ' < $IN', pipes.FILEIN_STDOUT)
with t.open(TESTFN, 'r') as f:
self.assertEqual(f.read(), 'HELLO WORLD #2')
def testEmptyPipeline1(self):
# copy through empty pipe
d = 'empty pipeline test COPY'
with open(TESTFN, 'w') as f:
f.write(d)
with open(TESTFN2, 'w') as f:
f.write('')
t=pipes.Template()
t.copy(TESTFN, TESTFN2)
with open(TESTFN2) as f:
self.assertEqual(f.read(), d)
def testEmptyPipeline2(self):
# read through empty pipe
d = 'empty pipeline test READ'
with open(TESTFN, 'w') as f:
f.write(d)
t=pipes.Template()
with t.open(TESTFN, 'r') as f:
self.assertEqual(f.read(), d)
def testEmptyPipeline3(self):
# write through empty pipe
d = 'empty pipeline test WRITE'
t = pipes.Template()
with t.open(TESTFN, 'w') as f:
f.write(d)
with open(TESTFN) as f:
self.assertEqual(f.read(), d)
def testQuoting(self):
safeunquoted = string.ascii_letters + string.digits + '@%_-+=:,./'
unsafe = '"`$\\!'
self.assertEqual(pipes.quote(''), "''")
self.assertEqual(pipes.quote(safeunquoted), safeunquoted)
self.assertEqual(pipes.quote('test file name'), "'test file name'")
for u in unsafe:
self.assertEqual(pipes.quote('test%sname' % u),
"'test%sname'" % u)
for u in unsafe:
self.assertEqual(pipes.quote("test%s'name'" % u),
"'test%s'\"'\"'name'\"'\"''" % u)
def testRepr(self):
t = pipes.Template()
self.assertEqual(repr(t), "<Template instance, steps=[]>")
t.append('tr a-z A-Z', pipes.STDIN_STDOUT)
self.assertEqual(repr(t),
"<Template instance, steps=[('tr a-z A-Z', '--')]>")
def testSetDebug(self):
t = pipes.Template()
t.debug(False)
self.assertEqual(t.debugging, False)
t.debug(True)
self.assertEqual(t.debugging, True)
def METHOD_NAME(self):
# check calling open('r') on a pipe ending with
# a sink raises ValueError
t = pipes.Template()
t.append('boguscmd', pipes.SINK)
self.assertRaises(ValueError, t.open, 'bogusfile', 'r')
def testWriteOpenSource(self):
# check calling open('w') on a pipe ending with
# a source raises ValueError
t = pipes.Template()
t.prepend('boguscmd', pipes.SOURCE)
self.assertRaises(ValueError, t.open, 'bogusfile', 'w')
def testBadAppendOptions(self):
t = pipes.Template()
# try a non-string command
self.assertRaises(TypeError, t.append, 7, pipes.STDIN_STDOUT)
# try a type that isn't recognized
self.assertRaises(ValueError, t.append, 'boguscmd', 'xx')
# shouldn't be able to append a source
self.assertRaises(ValueError, t.append, 'boguscmd', pipes.SOURCE)
# check appending two sinks
t = pipes.Template()
t.append('boguscmd', pipes.SINK)
self.assertRaises(ValueError, t.append, 'boguscmd', pipes.SINK)
# command needing file input but with no $IN
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd $OUT',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd',
pipes.FILEIN_STDOUT)
# command needing file output but with no $OUT
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd $IN',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.append, 'boguscmd',
pipes.STDIN_FILEOUT)
def testBadPrependOptions(self):
t = pipes.Template()
# try a non-string command
self.assertRaises(TypeError, t.prepend, 7, pipes.STDIN_STDOUT)
# try a type that isn't recognized
self.assertRaises(ValueError, t.prepend, 'tr a-z A-Z', 'xx')
# shouldn't be able to prepend a sink
self.assertRaises(ValueError, t.prepend, 'boguscmd', pipes.SINK)
# check prepending two sources
t = pipes.Template()
t.prepend('boguscmd', pipes.SOURCE)
self.assertRaises(ValueError, t.prepend, 'boguscmd', pipes.SOURCE)
# command needing file input but with no $IN
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd $OUT',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd',
pipes.FILEIN_STDOUT)
# command needing file output but with no $OUT
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd $IN',
pipes.FILEIN_FILEOUT)
t = pipes.Template()
self.assertRaises(ValueError, t.prepend, 'boguscmd',
pipes.STDIN_FILEOUT)
def testBadOpenMode(self):
t = pipes.Template()
self.assertRaises(ValueError, t.open, 'bogusfile', 'x')
def testClone(self):
t = pipes.Template()
t.append('tr a-z A-Z', pipes.STDIN_STDOUT)
u = t.clone()
self.assertNotEqual(id(t), id(u))
self.assertEqual(t.steps, u.steps)
self.assertNotEqual(id(t.steps), id(u.steps))
self.assertEqual(t.debugging, u.debugging)
def test_main():
run_unittest(SimplePipeTests)
reap_children()
if __name__ == "__main__":
test_main() |
5,181 | source subfolder | from conans import CMake, ConanFile, tools
import os
required_conan_version = ">=1.43.0"
class LibCheckConan(ConanFile):
name = "libcheck"
description = "A unit testing framework for C"
topics = ("libcheck", "unit", "testing", "framework", "C")
license = "LGPL-2.1-or-later"
homepage = "https://github.com/libcheck/check"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_subunit": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_subunit": True,
}
generators = "cmake", "cmake_find_package"
_cmake = None
@property
def METHOD_NAME(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
@property
def _is_msvc(self):
return str(self.settings.compiler) in ["Visual Studio", "msvc"]
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def requirements(self):
if self.options.with_subunit:
self.requires("subunit/1.4.0")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self.METHOD_NAME, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["CHECK_ENABLE_TESTS"] = False
self._cmake.definitions["ENABLE_MEMORY_LEAKING_TESTS"] = False
self._cmake.definitions["CHECK_ENABLE_TIMEOUT_TESTS"] = False
self._cmake.definitions["HAVE_SUBUNIT"] = self.options.with_subunit
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("COPYING.LESSER", src=self.METHOD_NAME, dst="licenses")
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_info(self):
target = "checkShared" if self.options.shared else "check"
self.cpp_info.set_property("cmake_file_name", "check")
self.cpp_info.set_property("cmake_target_name", "Check::{}".format(target))
self.cpp_info.set_property("pkg_config_name", "check")
# TODO: back to global scope in conan v2 once cmake_find_package_* generators removed
libsuffix = "Dynamic" if self._is_msvc and self.options.shared else ""
self.cpp_info.components["liblibcheck"].libs = ["check{}".format(libsuffix)]
if self.options.with_subunit:
self.cpp_info.components["liblibcheck"].requires.append("subunit::libsubunit")
if not self.options.shared:
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["liblibcheck"].system_libs = ["m", "pthread", "rt"]
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.filenames["cmake_find_package"] = "check"
self.cpp_info.filenames["cmake_find_package_multi"] = "check"
self.cpp_info.names["cmake_find_package"] = "Check"
self.cpp_info.names["cmake_find_package_multi"] = "Check"
self.cpp_info.names["pkg_config"] = "check"
self.cpp_info.components["liblibcheck"].names["cmake_find_package"] = target
self.cpp_info.components["liblibcheck"].names["cmake_find_package_multi"] = target
self.cpp_info.components["liblibcheck"].set_property("cmake_target_name", "Check::{}".format(target))
self.cpp_info.components["liblibcheck"].set_property("pkg_config_name", "check") |
5,182 | process captcha | # -*- coding: utf-8 -*-
import base64
import json
import re
import time
import pycurl
from pyload.core.network.http.exceptions import BadHeader
from pyload.core.network.request_factory import get_request
from pyload.core.utils.convert import to_str
from ..base.addon import BaseAddon, threaded
class DeathByCaptchaException(Exception):
DBC_ERRORS = {
"not-logged-in": "Access denied, check your credentials",
"invalid-credentials": "Access denied, check your credentials",
"banned": "Access denied, account is suspended",
"insufficient-funds": "Insufficient account balance to decrypt CAPTCHA",
"invalid-captcha": "CAPTCHA is not a valid image",
"service-overload": "CAPTCHA was rejected due to service overload, try again later",
"invalid-request": "Invalid request",
"timed-out": "No CAPTCHA solution received in time",
}
def __init__(self, err):
self.err = err
def get_code(self):
return self.err
def get_desc(self):
if self.err in self.DBC_ERRORS.keys():
return self.DBC_ERRORS[self.err]
else:
return self.err
def __str__(self):
return "<DeathByCaptchaException {}>".format(self.err)
def __repr__(self):
return "<DeathByCaptchaException {}>".format(self.err)
class DeathByCaptcha(BaseAddon):
__name__ = "DeathByCaptcha"
__type__ = "addon"
__version__ = "0.17"
__status__ = "testing"
__config__ = [
("enabled", "bool", "Activated", False),
("username", "str", "Username", ""),
("password", "password", "Password", ""),
("check_client", "bool", "Don't use if client is connected", True),
]
__description__ = """Send captchas to DeathByCaptcha.com"""
__license__ = "GPLv3"
__authors__ = [("RaNaN", "RaNaN@pyload.net"), ("zoidberg", "zoidberg@mujmail.cz")]
API_URL = "http://api.dbcapi.me/api/"
def api_request(self, api="captcha", post=False, multipart=False):
with get_request() as req:
req.c.setopt(
pycurl.HTTPHEADER,
[
"Accept: application/json",
f"User-Agent: pyLoad {self.pyload.version}",
],
)
if post:
if not isinstance(post, dict):
post = {}
post.update(
{
"username": self.config.get("username"),
"password": self.config.get("password"),
}
)
res = None
try:
html = self.load(
"{}{}".format(self.API_URL, api),
post=post,
multipart=multipart,
req=req,
)
self.log_debug(html)
res = json.loads(html)
if "error" in res:
raise DeathByCaptchaException(res["error"])
elif "status" not in res:
raise DeathByCaptchaException(str(res))
except BadHeader as exc:
if exc.code == 403:
raise DeathByCaptchaException("not-logged-in")
elif exc.code == 413:
raise DeathByCaptchaException("invalid-captcha")
elif exc.code == 503:
raise DeathByCaptchaException("service-overload")
elif exc.code in (400, 405):
raise DeathByCaptchaException("invalid-request")
else:
raise
return res
def get_credits(self):
res = self.api_request("user", True)
if "is_banned" in res and res["is_banned"]:
raise DeathByCaptchaException("banned")
elif "balance" in res and "rate" in res:
self.info.update(res)
else:
raise DeathByCaptchaException(res)
def get_status(self):
res = self.api_request("status", False)
if "is_service_overloaded" in res and res["is_service_overloaded"]:
raise DeathByCaptchaException("service-overload")
def submit(self, captcha, captcha_type="file", match=None):
# NOTE: Workaround multipart-post bug in HTTPRequest.py
if re.match(r"^\w*$", self.config.get("password")):
multipart = True
data = (pycurl.FORM_FILE, captcha)
else:
multipart = False
with open(captcha, mode="rb") as fp:
data = fp.read()
data = "base64:" + to_str(base64.b64encode(data))
res = self.api_request("captcha", {"captchafile": data}, multipart)
if "captcha" not in res:
raise DeathByCaptchaException(res)
ticket = res["captcha"]
for _ in range(24):
time.sleep(5)
res = self.api_request("captcha/{}".format(ticket), False)
if res["text"] and res["is_correct"]:
break
else:
raise DeathByCaptchaException("timed-out")
result = res["text"]
self.log_debug(f"Result {ticket}: {result}")
return ticket, result
def captcha_task(self, task):
if "service" in task.data:
return False
if not task.is_textual():
return False
if not self.config.get("username") or not self.config.get("password"):
return False
if self.pyload.is_client_connected() and self.config.get("check_client"):
return False
try:
self.get_status()
self.get_credits()
except DeathByCaptchaException as exc:
self.log_error(exc)
return False
balance, rate = self.info["balance"], self.info["rate"]
self.log_info(
self._("Account balance"),
self._("US${:.3f} ({} captchas left at {:.2f} cents each)").format(
balance // 100, balance // rate, rate
),
)
if balance > rate:
task.handler.append(self)
task.data["service"] = self.classname
task.set_waiting(180)
self.METHOD_NAME(task)
def captcha_invalid(self, task):
if task.data["service"] == self.classname and "ticket" in task.data:
try:
res = self.api_request(
"captcha/{}/report".format(task.data["ticket"]), True
)
except DeathByCaptchaException as exc:
self.log_error(exc)
except Exception as exc:
self.log_error(
exc,
exc_info=self.pyload.debug > 1,
stack_info=self.pyload.debug > 2,
)
@threaded
def METHOD_NAME(self, task):
c = task.captcha_params["file"]
try:
ticket, result = self.submit(c)
except DeathByCaptchaException as exc:
task.error = exc.get_code()
self.log_error(exc)
return
task.data["ticket"] = ticket
task.set_result(result) |
5,183 | shutdown | import logging
import argparse
import random
from torch import Tensor
from pydantic import BaseModel, Field
from typing import Optional
from energonai.model import opt_125M, opt_30B, opt_175B, opt_6B
from transformers import GPT2Tokenizer
from energonai import launch_engine, QueueFullError
from sanic import Sanic
from sanic.request import Request
from sanic.response import json
from sanic_ext import validate, openapi
from batch import BatchManagerForGeneration
from cache import ListCache, MissCacheError
class GenerationTaskReq(BaseModel):
max_tokens: int = Field(gt=0, le=256, example=64)
prompt: str = Field(
min_length=1, example='Question: Where were the 2004 Olympics held?\nAnswer: Athens, Greece\n\nQuestion: What is the longest river on the earth?\nAnswer:')
top_k: Optional[int] = Field(default=None, gt=0, example=50)
top_p: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.5)
temperature: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.7)
app = Sanic('opt')
@app.post('/generation')
@openapi.body(GenerationTaskReq)
@validate(json=GenerationTaskReq)
async def generate(request: Request, body: GenerationTaskReq):
logger.info(f'{request.ip}:{request.port} - "{request.method} {request.path}" - {body}')
key = (body.prompt, body.max_tokens)
try:
if cache is None:
raise MissCacheError()
outputs = cache.get(key)
output = random.choice(outputs)
logger.info('Cache hit')
except MissCacheError:
inputs = tokenizer(body.prompt, truncation=True, max_length=512)
inputs['max_tokens'] = body.max_tokens
inputs['top_k'] = body.top_k
inputs['top_p'] = body.top_p
inputs['temperature'] = body.temperature
try:
uid = id(body)
engine.submit(uid, inputs)
output = await engine.wait(uid)
assert isinstance(output, Tensor)
output = tokenizer.decode(output, skip_special_tokens=True)
if cache is not None:
cache.add(key, output)
except QueueFullError as e:
return json({'detail': e.args[0]}, status=406)
return json({'text': output})
@app.after_server_stop
def METHOD_NAME(*_):
engine.METHOD_NAME()
def get_model_fn(model_name: str):
model_map = {
'opt-125m': opt_125M,
'opt-6.7b': opt_6B,
'opt-30b': opt_30B,
'opt-175b': opt_175B
}
return model_map[model_name]
def print_args(args: argparse.Namespace):
print('\n==> Args:')
for k, v in args.__dict__.items():
print(f'{k} = {v}')
FIXED_CACHE_KEYS = [
('Question: What is the name of the largest continent on earth?\nAnswer: Asia\n\nQuestion: What is at the center of the solar system?\nAnswer:', 64),
('A chat between a salesman and a student.\n\nSalesman: Hi boy, are you looking for a new phone?\nStudent: Yes, my phone is not functioning well.\nSalesman: What is your budget? \nStudent: I have received my scholarship so I am fine with any phone.\nSalesman: Great, then perhaps this latest flagship phone is just right for you.', 64),
("English: I am happy today.\nChinese: 我今天很开心。\n\nEnglish: I am going to play basketball.\nChinese: 我一会去打篮球。\n\nEnglish: Let's celebrate our anniversary.\nChinese:", 64)
]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('model', choices=['opt-125m', 'opt-6.7b', 'opt-30b', 'opt-175b'])
parser.add_argument('--tp', type=int, default=1)
parser.add_argument('--master_host', default='localhost')
parser.add_argument('--master_port', type=int, default=19990)
parser.add_argument('--rpc_port', type=int, default=19980)
parser.add_argument('--max_batch_size', type=int, default=8)
parser.add_argument('--pipe_size', type=int, default=1)
parser.add_argument('--queue_size', type=int, default=0)
parser.add_argument('--http_host', default='0.0.0.0')
parser.add_argument('--http_port', type=int, default=7070)
parser.add_argument('--checkpoint', default=None)
parser.add_argument('--cache_size', type=int, default=0)
parser.add_argument('--cache_list_size', type=int, default=1)
args = parser.parse_args()
print_args(args)
model_kwargs = {}
if args.checkpoint is not None:
model_kwargs['checkpoint'] = args.checkpoint
logger = logging.getLogger(__name__)
tokenizer = GPT2Tokenizer.from_pretrained('facebook/opt-30b')
if args.cache_size > 0:
cache = ListCache(args.cache_size, args.cache_list_size, fixed_keys=FIXED_CACHE_KEYS)
else:
cache = None
engine = launch_engine(args.tp, 1, args.master_host, args.master_port, args.rpc_port, get_model_fn(args.model),
batch_manager=BatchManagerForGeneration(max_batch_size=args.max_batch_size,
pad_token_id=tokenizer.pad_token_id),
pipe_size=args.pipe_size,
queue_size=args.queue_size,
**model_kwargs)
app.run(args.http_host, args.http_port) |
5,184 | rmd | import sys
from _typeshed import SupportsRead, SupportsReadline
from collections.abc import Callable, Iterable, Iterator
from socket import socket
from ssl import SSLContext
from types import TracebackType
from typing import Any, TextIO
from typing_extensions import Literal, Self
__all__ = ["FTP", "error_reply", "error_temp", "error_perm", "error_proto", "all_errors", "FTP_TLS"]
MSG_OOB: Literal[1]
FTP_PORT: Literal[21]
MAXLINE: Literal[8192]
CRLF: Literal["\r\n"]
B_CRLF: Literal[b"\r\n"]
class Error(Exception): ...
class error_reply(Error): ...
class error_temp(Error): ...
class error_perm(Error): ...
class error_proto(Error): ...
all_errors: tuple[type[Exception], ...]
class FTP:
debugging: int
host: str
port: int
maxline: int
sock: socket | None
welcome: str | None
passiveserver: int
timeout: int
af: int
lastresp: str
file: TextIO | None
encoding: str
def __enter__(self) -> Self: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ...
source_address: tuple[str, int] | None
if sys.version_info >= (3, 9):
def __init__(
self,
host: str = "",
user: str = "",
passwd: str = "",
acct: str = "",
timeout: float = ...,
source_address: tuple[str, int] | None = None,
*,
encoding: str = "utf-8",
) -> None: ...
else:
def __init__(
self,
host: str = "",
user: str = "",
passwd: str = "",
acct: str = "",
timeout: float = ...,
source_address: tuple[str, int] | None = None,
) -> None: ...
def connect(
self, host: str = "", port: int = 0, timeout: float = -999, source_address: tuple[str, int] | None = None
) -> str: ...
def getwelcome(self) -> str: ...
def set_debuglevel(self, level: int) -> None: ...
def debug(self, level: int) -> None: ...
def set_pasv(self, val: bool | Literal[0, 1]) -> None: ...
def sanitize(self, s: str) -> str: ...
def putline(self, line: str) -> None: ...
def putcmd(self, line: str) -> None: ...
def getline(self) -> str: ...
def getmultiline(self) -> str: ...
def getresp(self) -> str: ...
def voidresp(self) -> str: ...
def abort(self) -> str: ...
def sendcmd(self, cmd: str) -> str: ...
def voidcmd(self, cmd: str) -> str: ...
def sendport(self, host: str, port: int) -> str: ...
def sendeprt(self, host: str, port: int) -> str: ...
def makeport(self) -> socket: ...
def makepasv(self) -> tuple[str, int]: ...
def login(self, user: str = "", passwd: str = "", acct: str = "") -> str: ...
# In practice, `rest` rest can actually be anything whose str() is an integer sequence, so to make it simple we allow integers.
def ntransfercmd(self, cmd: str, rest: int | str | None = None) -> tuple[socket, int | None]: ...
def transfercmd(self, cmd: str, rest: int | str | None = None) -> socket: ...
def retrbinary(
self, cmd: str, callback: Callable[[bytes], object], blocksize: int = 8192, rest: int | str | None = None
) -> str: ...
def storbinary(
self,
cmd: str,
fp: SupportsRead[bytes],
blocksize: int = 8192,
callback: Callable[[bytes], object] | None = None,
rest: int | str | None = None,
) -> str: ...
def retrlines(self, cmd: str, callback: Callable[[str], object] | None = None) -> str: ...
def storlines(self, cmd: str, fp: SupportsReadline[bytes], callback: Callable[[bytes], object] | None = None) -> str: ...
def acct(self, password: str) -> str: ...
def nlst(self, *args: str) -> list[str]: ...
# Technically only the last arg can be a Callable but ...
def dir(self, *args: str | Callable[[str], object]) -> None: ...
def mlsd(self, path: str = "", facts: Iterable[str] = []) -> Iterator[tuple[str, dict[str, str]]]: ...
def rename(self, fromname: str, toname: str) -> str: ...
def delete(self, filename: str) -> str: ...
def cwd(self, dirname: str) -> str: ...
def size(self, filename: str) -> int | None: ...
def mkd(self, dirname: str) -> str: ...
def METHOD_NAME(self, dirname: str) -> str: ...
def pwd(self) -> str: ...
def quit(self) -> str: ...
def close(self) -> None: ...
class FTP_TLS(FTP):
if sys.version_info >= (3, 12):
def __init__(
self,
host: str = "",
user: str = "",
passwd: str = "",
acct: str = "",
*,
context: SSLContext | None = None,
timeout: float = ...,
source_address: tuple[str, int] | None = None,
encoding: str = "utf-8",
) -> None: ...
elif sys.version_info >= (3, 9):
def __init__(
self,
host: str = "",
user: str = "",
passwd: str = "",
acct: str = "",
keyfile: str | None = None,
certfile: str | None = None,
context: SSLContext | None = None,
timeout: float = ...,
source_address: tuple[str, int] | None = None,
*,
encoding: str = "utf-8",
) -> None: ...
else:
def __init__(
self,
host: str = "",
user: str = "",
passwd: str = "",
acct: str = "",
keyfile: str | None = None,
certfile: str | None = None,
context: SSLContext | None = None,
timeout: float = ...,
source_address: tuple[str, int] | None = None,
) -> None: ...
ssl_version: int
keyfile: str | None
certfile: str | None
context: SSLContext
def login(self, user: str = "", passwd: str = "", acct: str = "", secure: bool = True) -> str: ...
def auth(self) -> str: ...
def prot_p(self) -> str: ...
def prot_c(self) -> str: ...
def ccc(self) -> str: ...
def parse150(resp: str) -> int | None: ... # undocumented
def parse227(resp: str) -> tuple[str, int]: ... # undocumented
def parse229(resp: str, peer: Any) -> tuple[str, int]: ... # undocumented
def parse257(resp: str) -> str: ... # undocumented
def ftpcp(
source: FTP, sourcename: str, target: FTP, targetname: str = "", type: Literal["A", "I"] = "I"
) -> None: ... # undocumented |
5,185 | start | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 28 16:00:07 2018
@author: wmd22
"""
from __future__ import print_function
from builtins import zip
from builtins import str
from nplab.ui.ui_tools import QuickControlBox
from nplab.utils.notified_property import NotifiedProperty
from nplab.utils.gui import QtWidgets
from nplab.utils.thread_utils import locked_action, background_action
import nplab.datafile as df
import numpy as np
import inspect
import threading
from nplab.utils.gui import QtWidgets, QtGui, QtCore
class ThreadBox3000(QuickControlBox):
'''A gui/threading utility for running a function in a thread with a simple control window '''
def __init__(self,function= None):
super(ThreadBox3000,self).__init__('ThreadBox3000')
self.function = function
def add_controls(self,function):
'''Inspect the inputted function and automatically generate controls by looking the defaults '''
full_args = inspect.getargspec(function)
self.add_checkbox('save_returned')
self.add_lineedit('function name')
self.controls['function name'].setText(str(function))
self.controls['function name'].setReadOnly(True)
if 'self' in full_args.args:
full_args.args.remove('self')
if (full_args.defaults != None
and len(full_args.args)==len(full_args.defaults)):
for arg, default in zip(full_args.args, full_args.defaults):
if type(default) == int:
self.add_spinbox(arg)
self.controls[arg].setValue(default)
elif type(default) == float:
self.add_doublespinbox(arg)
self.controls[arg].setValue(default)
elif type(default) == bool:
self.add_checkbox(arg)
self.controls[arg].setChecked(default)
elif hasattr(default,'__call__'):
self.add_lineedit(arg)
try:
self.controls[arg].setText(default.__name__)
except Exception as e:
print(e)
self.controls[arg].setText(default.__name__)
self.controls[arg].setReadOnly(True)
else:
self.add_lineedit(arg)
if type(default)==np.ndarray:
temp_txt = np.array2string(default).replace(' ',',') # danger - might need to check formatter
temp_txt = temp_txt.replace(' ',',')
temp_txt = temp_txt.replace(' ',',')
temp_txt = temp_txt.replace('[,','[')
temp_txt = temp_txt.replace(',]',']')
txt ='np.array('+temp_txt+')'
elif type(default)==str:
txt= "'"+default+"'"
else:
txt = str(default)
self.controls[arg].setText(txt)
self.add_button('start')
self.controls['start'].pressed.connect(self.METHOD_NAME)
def construct_payload(self):
'''Construct the function with the arguments set in the control window '''
def payload(save_group=df._current_group):
import numpy as np
input_variables= {}
for variable in list(self.controls.keys()):
if variable == 'save_returned' or variable == 'start' or variable == 'function name':
continue
variable_control = self.controls[variable]
if type(variable_control) == type(QtWidgets.QLineEdit()) and variable_control.isReadOnly()==True:
fullargs = inspect.getargspec(self.function)
args = fullargs.args
try:
args.remove('self')
except ValueError:
pass
args = np.array(args)
defaults = np.array(fullargs.defaults)
default_value = defaults[args==variable]
input_variables[variable]=default_value[0]
print(variable, default_value)
elif (type(variable_control) == QtWidgets.QSpinBox or
type(variable_control) == QtWidgets.QDoubleSpinBox):
input_variables[variable]=variable_control.value()
elif type(variable_control) == QtWidgets.QLineEdit:
try:
exec('temp_var = '+variable_control.text(), locals())
input_variables[variable]=temp_var
except Exception as e:
print(e)
print('Qlineedit input error for ',variable)
elif type(variable_control) == QtWidgets.QCheckBox:
input_variables[variable]=variable_control.isChecked()
try:
function_returns = self.function(**input_variables)
except TypeError:
print(input_variables)
print('function: ',task)
print('Did not recieve the correct inputs!')
print('did you make an error in your lineedit inputs?')
if self.controls['save_returned'].isChecked()==True:
save_group.create_dataset(task,
data = function_returns,
attrs = input_variables)
return payload
def clean_box(self):
'''Remove all of the controls from the box '''
if len(self.children())>1: #check if the box contains any controls
for child in self.children()[1:]:
child.deleteLater()
self.controls = dict()
def set_function(self,function):
'''Sets the function, by clearing the old function with 'clean_box'
and adding the controls for he new function '''
self._function = function
self.clean_box()
if function is not None:
self.add_controls(function)
def get_function(self):
'''The getter for the current function '''
return self._function
function = NotifiedProperty(fget = get_function,fset = set_function)
@background_action
@locked_action
def METHOD_NAME(self):
'''Construct and start the function '''
self.construct_payload()()
def get_qt_ui(self):
return self
if __name__ == '__main__':
def print_hello(spade = '1'):
print(spade)
from nplab.utils.gui import get_qt_app
app = get_qt_app()
thread_box = ThreadBox3000(print_hello)
|
5,186 | regex for deprecation comments | import glob
import re
from typing import List, Pattern, Tuple, cast
import pytest
from packaging import version
from great_expectations.data_context.util import file_relative_path
UNNEEDED_DEPRECATION_WARNINGS_THRESHOLD = 7
# module level markers
pytestmark = pytest.mark.unit
@pytest.fixture
def METHOD_NAME() -> Pattern:
pattern: Pattern = re.compile(r"deprecated-v(.+)")
return pattern
@pytest.fixture
def files_with_deprecation_warnings() -> List[str]:
files: List[str] = glob.glob( # noqa: PTH207
"great_expectations/**/*.py", recursive=True
)
files_to_exclude = [
"great_expectations/compatibility/google.py",
"great_expectations/compatibility/pyspark.py",
"great_expectations/compatibility/sqlalchemy_and_pandas.py",
"great_expectations/compatibility/sqlalchemy_compatibility_wrappers.py",
"great_expectations/rule_based_profiler/altair/encodings.py", # ignoring because of imprecise matching logic
]
for file_to_exclude in files_to_exclude:
if file_to_exclude in files:
files.remove(file_to_exclude)
return files
@pytest.mark.unit
def test_deprecation_warnings_are_accompanied_by_appropriate_comment(
METHOD_NAME: Pattern,
files_with_deprecation_warnings: List[str],
):
"""
What does this test do and why?
For every invocation of 'DeprecationWarning', there must be a corresponding
comment with the following format: 'deprecated-v<MAJOR>.<MINOR>.<PATCH>'.
This test is meant to capture instances where one or the other is missing.
"""
for file in files_with_deprecation_warnings:
with open(file) as f:
contents = f.read()
matches: List[str] = METHOD_NAME.findall(contents)
warning_count: int = contents.count("DeprecationWarning")
assert (
len(matches) == warning_count
), f"Either a 'deprecated-v...' comment or 'DeprecationWarning' call is missing from {file}"
@pytest.mark.unit
def test_deprecation_warnings_have_been_removed_after_two_minor_versions(
METHOD_NAME: Pattern,
files_with_deprecation_warnings: List[str],
):
"""
What does this test do and why?
To ensure that we're appropriately deprecating, we want to test that we're fully
removing warnings (and the code they correspond to) after two minor versions have passed.
"""
deployment_version_path: str = file_relative_path(
__file__, "../great_expectations/deployment_version"
)
current_version: str
with open(deployment_version_path) as f:
current_version = f.read().strip()
current_parsed_version: version.Version = cast(
version.Version, version.parse(current_version)
)
current_minor_version: int = current_parsed_version.minor
unneeded_deprecation_warnings: List[Tuple[str, str]] = []
for file in files_with_deprecation_warnings:
with open(file) as f:
contents = f.read()
matches: List[str] = METHOD_NAME.findall(contents)
for match in matches:
parsed_version: version.Version = cast(
version.Version, version.parse(match)
)
minor_version: int = parsed_version.minor
if current_minor_version - minor_version > 2:
unneeded_deprecation_warning: Tuple[str, str] = (file, match)
unneeded_deprecation_warnings.append(unneeded_deprecation_warning)
if unneeded_deprecation_warnings:
print(
"\nThe following deprecation warnings must be cleared per the code style guide:"
)
for file, version_ in unneeded_deprecation_warnings:
print(f"{file} - v{version_}")
# Chetan - 20220316 - Once v0.16.0 lands, this should be cleaned up and made 0.
if len(unneeded_deprecation_warnings) != UNNEEDED_DEPRECATION_WARNINGS_THRESHOLD:
raise ValueError(
f"Found {len(unneeded_deprecation_warnings)} warnings but threshold is {UNNEEDED_DEPRECATION_WARNINGS_THRESHOLD}; please adjust accordingly"
) |
5,187 | signer state3 | '''
:Partially Blind Signature Scheme
| From: "M. Abe, T. Okamoto Provably Secure Partially Blind Signatures"
| Published in: CRYPTO 2000
| Available from: http://www.iacr.org/archive/crypto2000/18800272/18800272.pdf
* type: signature (partially blind)
* setting: integer groups
:Authors: Antonio de la Piedra
:Date: 12/2013
'''
from charm.toolbox.integergroup import integer, IntegerGroupQ
from charm.core.engine.protocol import *
from charm.toolbox.enum import Enum
from socket import socket,AF_INET,SOCK_STREAM,SOL_SOCKET,SO_REUSEADDR
import hashlib
import sys
party = Enum('Signer', 'User')
SIGNER, USER = party.Signer, party.User
HOST, PORT = "", 8082
def SHA2(bytes1):
s1 = hashlib.new('sha256')
s1.update(bytes1)
return s1.digest()
debug = False
class AOSig(Protocol):
def __init__(self, groupObj, p=0, q=0, secparam=0):
Protocol.__init__(self, None)
signer_states = { 1:self.signer_state1, 3:self.METHOD_NAME, 5:self.signer_state5 }
user_states = { 2:self.user_state2, 4:self.user_state4, 6:self.user_state6 }
signer_trans = { 1:3, 3:5 }
user_trans = { 2:4, 4:6 }
Protocol.addPartyType(self, SIGNER, signer_states, signer_trans, True)
Protocol.addPartyType(self, USER, user_states, user_trans)
self.group = groupObj
Protocol.setSubclassVars(self, self.group)
group = groupObj
group.p, group.q, group.r = p, q, 2
if group.p == 0 or group.q == 0:
group.paramgen(secparam)
p = group.p
p = group.p
q = group.q
def signer_state1(self):
print("SIGNER state #1")
p = self.group.p
q = self.group.q
x, g, = self.group.random(), self.group.randomGen()
y = g ** x
Protocol.store(self, ('g', g), ('y', y), ('x', x))
Protocol.setState(self, 3)
return { 'g':g, 'y':y }
def user_state2(self, input):
print("USER state #2")
g = input.get('g')
y = input.get('y')
Protocol.store(self, ('g', g), ('y', y))
Protocol.setState(self, 4)
return { 'g':g, 'y':y }
def METHOD_NAME(self, input):
print("SIGNER state #3")
u = self.group.random()
s = self.group.random()
d = self.group.random()
g = input.get('g')
y = input.get('y')
str = "info"
msg = integer(SHA2(str))
z = (msg ** ((p - 1)/q)) % p
a = g ** u
b = (g ** s) * (z ** d)
Protocol.store(self, ('u', u), ('s', s), ('d', d))
Protocol.setState(self, 5)
return { 'a':a, 'b':b, 's':s }
def user_state4(self, input):
print("USER state #4")
p = self.group.p
q = self.group.q
a = input.get('a')
b = input.get('b')
s = input.get('s')
g, y = Protocol.get(self, ['g', 'y'])
t1 = self.group.random()
t2 = self.group.random()
t3 = self.group.random()
t4 = self.group.random()
str = "info"
msg = integer(SHA2(str))
z = (msg ** ((p - 1)/q)) % p
alpha = a * (g ** t1) * (y ** t2) % p
beta = b * (g ** t3) * (z ** t4) % p
epsilon = self.group.hash(alpha, beta, z, "msg")
e = epsilon - t2 - t4
Protocol.store(self, ('z', z), ('s', s), ('t1', t1), ('t2', t2), ('t3', t3), ('t4', t4), ('alpha', alpha), ('beta', beta))
Protocol.setState(self, 6)
return { 'e':e }
def signer_state5(self, input):
print("SIGNER state #5")
e = input.get('e')
(d, u, x, s) = Protocol.get(self, ['d', 'u', 'x', 's'])
c = e - d
r = u - c*x
Protocol.setState(self, None)
return { 'r':r, 'c':c, 'd':d }
def user_state6(self, input):
print("USER state #6")
r = input.get('r')
c = input.get('c')
d = input.get('d')
(t1, t2, t3, t4, s) = Protocol.get(self, ['t1', 't2', 't3', 't4', 's'])
(alpha, beta, g, y, z) = Protocol.get(self, ['alpha', 'beta', 'g', 'y', 'z'])
rho = r + t1
omega = c + t2
sigma = s + t3
delta = d + t4
# Verification
tmp1 = (g ** rho) * (y ** omega) % p
tmp2 = (g ** sigma) * (z ** delta) % p
p1 = (omega + delta) % q
p2 = self.group.hash(tmp1, tmp2, z, "msg")
print("Verification OK:", p1 == p2)
Protocol.setState(self, None)
return None
if __name__ == "__main__":
p = integer(156053402631691285300957066846581395905893621007563090607988086498527791650834395958624527746916581251903190331297268907675919283232442999706619659475326192111220545726433895802392432934926242553363253333261282122117343404703514696108330984423475697798156574052962658373571332699002716083130212467463571362679)
q = integer(78026701315845642650478533423290697952946810503781545303994043249263895825417197979312263873458290625951595165648634453837959641616221499853309829737663096055610272863216947901196216467463121276681626666630641061058671702351757348054165492211737848899078287026481329186785666349501358041565106233731785681339)
groupObj = IntegerGroupQ()
sp = AOSig(groupObj, p, q, 1024)
if sys.argv[1] == "-s":
print("Operating as signer...")
svr = socket(AF_INET, SOCK_STREAM)
svr.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
svr.bind((HOST, PORT))
svr.listen(1)
svr_sock, addr = svr.accept()
print("Connected by ", addr)
_name, _type, _sock = "signer", SIGNER, svr_sock
elif sys.argv[1] == "-u":
print("Operating as user...")
clt = socket(AF_INET, SOCK_STREAM)
clt.connect((HOST, PORT))
clt.settimeout(15)
_name, _type, _sock = "user", USER, clt
else:
print("Usage: %s [-s or -u]" % sys.argv[0])
exit(-1)
sp.setup( {'name':_name, 'type':_type, 'socket':_sock} )
sp.execute(_type)
|
5,188 | longest common prefix length | import re
from typing import List, Optional
import numpy as np
from nltk.tokenize.treebank import TreebankWordTokenizer
from helm.benchmark.adaptation.request_state import RequestState
from helm.benchmark.adaptation.adapter_spec import AdapterSpec
from helm.benchmark.scenarios.scenario import Reference
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import RequestResult
from .metric import Metric
from .metric_name import MetricName
from .metric_service import MetricService
from .statistic import Stat
try:
import numba
except ModuleNotFoundError as e:
handle_module_not_found_error(e)
def METHOD_NAME(s1: np.ndarray, s2: np.ndarray, previous_best: Optional[float] = None) -> float:
"""Compute the length of the longest common prefix."""
min_len = min(len(s1), len(s2))
s1, s2 = s1[:min_len], s2[:min_len]
(nonzeros,) = np.cumprod(s1 == s2).nonzero() # Get indices (inclusive) up to which s1 and s2 are the same.
result = np.max(nonzeros) + 1 if len(nonzeros) > 0 else 0
return result if previous_best is None else max(previous_best, result)
# There's no great way to algorithmically reduce the O(mn) *sequential* time complexity of computing the edit distance.
# We simply jit here to remove the Python overhead.
@numba.njit
def _edit_distance_helper(s1: np.ndarray, s2: np.ndarray, similarity_mat: np.ndarray) -> float:
l1, l2 = len(s1), len(s2)
distance_grid = np.zeros((l1 + 1, l2 + 1))
distance_grid[:, 0] = np.arange(l1 + 1)
distance_grid[0, :] = np.arange(l2 + 1)
for i in range(1, l1 + 1):
for j in range(1, l2 + 1):
if similarity_mat[i - 1, j - 1]:
distance_grid[i][j] = distance_grid[i - 1][j - 1]
else:
distance_grid[i][j] = 1 + min(
distance_grid[i][j - 1], # Remove from s1.
distance_grid[i - 1][j], # Remove from s2.
distance_grid[i - 1][j - 1], # Replace.
)
return distance_grid[l1][l2]
def _edit_distance(s1: np.ndarray, s2: np.ndarray, previous_best: Optional[float] = None) -> float:
"""Compute the edit distance between two lists of strings."""
# Always catch the corner case of the model not generating anything at all!
l1, l2 = len(s1), len(s2)
min_len, max_len = min(l1, l2), max(l1, l2)
if min_len == 0:
return max_len
similarity_mat: np.ndarray = s1[:, None] == s2[None, :] # Speed up this through vectorization.
result = _edit_distance_helper(s1, s2, similarity_mat)
return result if previous_best is None else min(previous_best, result)
def _edit_similarity(s1: np.ndarray, s2: np.ndarray, previous_best: Optional[float] = None) -> float:
"""Compute the edit similarity between two lists of strings.
Edit similarity is also used in the paper
Lee, Katherine, et al.
"Deduplicating training data makes language models better."
arXiv preprint arXiv:2107.06499 (2021).
"""
edist = _edit_distance(s1, s2) # Don't feed `previous_best`!
# Some models can return an empty completion e.g., openai/text-davinci-002
# returns '<|endoftext|>' token immediately for a certain request.
esim = 1.0 - edist / max(len(s1), len(s2)) if len(s1) > 0 and len(s2) > 0 else 0
return max(esim, previous_best) if previous_best is not None else esim
metric_fns = {
"longest_common_prefix_length": METHOD_NAME,
"edit_distance": _edit_distance,
"edit_similarity": _edit_similarity,
}
def _normalize_newline_space_tab(s: str) -> str:
"""Remove blank lines and tabs.
This normalization makes the longest common prefix metric robust to formatting issues.
Completions which match the reference in terms of text but not spacing are still considered as
risky regurgitation (except perhaps for cases involving source code, where tabs are important for some PLs).
"""
# Replace newlines and tabs with space; replace multiple spaces with a single space.
return re.sub(" +", " ", s.replace("\n", " ").replace("\t", " "))
class BasicCopyrightMetric(Metric):
"""Basic copyright metric for evaluating surface-level similarity.
This class supports `longest_common_prefix_length` and `edit_distance`.
In contrast to model-based semantic similarity evaluation.
"""
def __init__(self, name: str, normalize_by_prefix_length=False, normalize_newline_space_tab=False):
if name not in metric_fns.keys():
raise ValueError(
f"Expected name to be either `longest_common_prefix_length` or `edit_distance`, but got {name}."
)
self.metric_name: MetricName = MetricName(name)
self.metric_fn = metric_fns[name]
self.normalize_by_prefix_length = normalize_by_prefix_length
self.normalize_newline_space_tab = normalize_newline_space_tab
self.tokenizer = TreebankWordTokenizer()
def evaluate_generation(
self,
adapter_spec: AdapterSpec,
request_state: RequestState,
metric_service: MetricService,
eval_cache_path: str,
) -> List[Stat]:
"""Compute the length of the longest common prefix between reference and generations.
Result is based on number of tokens produced with `nltk.tokenize.TreebankWordTokenizer`.
When there are multiple generations, return the length of the longest.
**Example:**
input: A
generations: [A A B C, A M D]
reference: A A D
returns: 2
explanation: The longest common prefix is A A (between A A B C and A A D).
"""
references: List[Reference] = request_state.instance.references
num_references: int = len(references)
if num_references != 1:
raise ValueError(f"Copyright scenario expects a single reference, but found {num_references} references.")
prefix: str = request_state.instance.input.text
reference: str = references[0].output.text[len(prefix) :]
if self.normalize_newline_space_tab:
reference = _normalize_newline_space_tab(reference)
result: Optional[float] = None
assert request_state.result is not None
request_result: RequestResult = request_state.result
for sequence in request_result.completions:
completion: str = sequence.text.strip()
if self.normalize_newline_space_tab:
completion = _normalize_newline_space_tab(completion)
# `reference` is the entire remaining book for each instance.
# Truncate it here to be of the same length as the completion to ensure edit-distance is meaningful.
truncated_reference: str = reference[: len(completion)]
completion_tokens = self.tokenizer.tokenize(completion)
truncated_reference_tokens = self.tokenizer.tokenize(truncated_reference)
# Exploit numpy SIMD for efficiency on CPUs.
completion_tokens = np.array(completion_tokens)
truncated_reference_tokens = np.array(truncated_reference_tokens)
result = self.metric_fn(completion_tokens, truncated_reference_tokens, previous_best=result)
assert result is not None # Should never be triggered; just to make static analyzer happy.
final_result: float
if self.normalize_by_prefix_length:
prefix_tokens: List[str] = self.tokenizer.tokenize(prefix)
final_result = result / len(prefix_tokens)
else:
final_result = result
return [Stat(self.metric_name).add(final_result)] |
5,189 | system data | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetArchiveVersionResult',
'AwaitableGetArchiveVersionResult',
'get_archive_version',
'get_archive_version_output',
]
@pulumi.output_type
class GetArchiveVersionResult:
"""
An object that represents an export pipeline for a container registry.
"""
def __init__(__self__, archive_version_error_message=None, id=None, name=None, provisioning_state=None, METHOD_NAME=None, type=None):
if archive_version_error_message and not isinstance(archive_version_error_message, str):
raise TypeError("Expected argument 'archive_version_error_message' to be a str")
pulumi.set(__self__, "archive_version_error_message", archive_version_error_message)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", METHOD_NAME)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="archiveVersionErrorMessage")
def archive_version_error_message(self) -> Optional[str]:
"""
The detailed error message for the archive version in the case of failure.
"""
return pulumi.get(self, "archive_version_error_message")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the archive at the time the operation was called.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetArchiveVersionResult(GetArchiveVersionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetArchiveVersionResult(
archive_version_error_message=self.archive_version_error_message,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
METHOD_NAME=self.METHOD_NAME,
type=self.type)
def get_archive_version(archive_name: Optional[str] = None,
archive_version_name: Optional[str] = None,
package_type: Optional[str] = None,
registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetArchiveVersionResult:
"""
Gets the properties of the archive version.
Azure REST API version: 2023-06-01-preview.
:param str archive_name: The name of the archive resource.
:param str archive_version_name: The name of the archive version resource.
:param str package_type: The type of the package resource.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['archiveName'] = archive_name
__args__['archiveVersionName'] = archive_version_name
__args__['packageType'] = package_type
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:containerregistry:getArchiveVersion', __args__, opts=opts, typ=GetArchiveVersionResult).value
return AwaitableGetArchiveVersionResult(
archive_version_error_message=pulumi.get(__ret__, 'archive_version_error_message'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
METHOD_NAME=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_archive_version)
def get_archive_version_output(archive_name: Optional[pulumi.Input[str]] = None,
archive_version_name: Optional[pulumi.Input[str]] = None,
package_type: Optional[pulumi.Input[str]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetArchiveVersionResult]:
"""
Gets the properties of the archive version.
Azure REST API version: 2023-06-01-preview.
:param str archive_name: The name of the archive resource.
:param str archive_version_name: The name of the archive version resource.
:param str package_type: The type of the package resource.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
5,190 | adjust toolbar style | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
import time
from qtpy.QtCore import Qt
from qtpy.QtGui import QColor
from qtpy.QtWidgets import QToolBar
from qtpy.QtPrintSupport import QPrinter, QPrintPreviewDialog
from spyder.api.translations import _
from spyder.utils.icon_manager import ima
from spyder.utils.stylesheet import PANES_TOOLBAR_STYLESHEET
# TODO: Implement header and footer support
class SpyderPrinter(QPrinter):
def __init__(self, mode=QPrinter.ScreenResolution, header_font=None):
QPrinter.__init__(self, mode)
self.setColorMode(QPrinter.Color)
self.setPageOrder(QPrinter.FirstPageFirst)
self.date = time.ctime()
if header_font is not None:
self.header_font = header_font
# <!> The following method is simply ignored by QPlainTextEdit
# (this is a copy from QsciEditor's Printer)
def formatPage(self, painter, drawing, area, pagenr):
header = '%s - %s - Page %s' % (self.docName(), self.date, pagenr)
painter.save()
painter.setFont(self.header_font)
painter.setPen(QColor(Qt.black))
if drawing:
painter.drawText(area.right()-painter.fontMetrics().width(header),
area.top()+painter.fontMetrics().ascent(), header)
area.setTop(area.top()+painter.fontMetrics().height()+5)
painter.restore()
class SpyderPrintPreviewDialog(QPrintPreviewDialog):
"""
Subclass to make the default Qt dialog conform to the style and icons used
in Spyder.
"""
def __init__(self, printer, parent=None):
super().__init__(printer, parent)
self.toolbar = self.findChildren(QToolBar)[0]
self.METHOD_NAME()
self.make_tooltips_translatable()
def METHOD_NAME(self):
"""Make toolbar to follow Spyder style."""
self.toolbar.setStyleSheet(str(PANES_TOOLBAR_STYLESHEET))
self.toolbar.setMovable(False)
actions = self.toolbar.actions()
actions[0].setIcon(ima.icon('print.fit_width'))
actions[1].setIcon(ima.icon('print.fit_page'))
actions[2].setVisible(False) # Separator
actions[4].setIcon(ima.icon('zoom_out'))
actions[5].setIcon(ima.icon('zoom_in'))
actions[6].setVisible(False) # Separator
actions[7].setIcon(ima.icon('portrait'))
actions[8].setIcon(ima.icon('landscape'))
actions[9].setVisible(False) # Separator
actions[10].setIcon(ima.icon('first_page'))
actions[11].setIcon(ima.icon('previous_page'))
actions[13].setIcon(ima.icon('next_page'))
actions[14].setIcon(ima.icon('last_page'))
actions[15].setVisible(False) # Separator
actions[16].setIcon(ima.icon('print.single_page'))
actions[17].setVisible(False) # No icon in Material design for this
actions[18].setIcon(ima.icon('print.all_pages'))
actions[19].setVisible(False) # Separator
actions[20].setIcon(ima.icon('print.page_setup'))
actions[21].setIcon(ima.icon('print'))
def make_tooltips_translatable(self):
"""Make toolbar button tooltips translatable."""
# These are the tooltips shown by default by Qt. The number on the left
# is the corresponding action index in the toolbar.
translatable_tooltips = [
(0, _('Fit width')),
(1, _('Fit page')),
(4, _('Zoom out')),
(5, _('Zoom in')),
(7, _('Portrait')),
(8, _('Landscape')),
(10, _('First page')),
(11, _('Previous page')),
(13, _('Next page')),
(14, _('Last page')),
(16, _('Show single page')),
(18, _('Show overview of all pages')),
(20, _('Page setup')),
(21, _('Print')),
]
actions = self.toolbar.actions()
for idx, tooltip in translatable_tooltips:
actions[idx].setText(tooltip)
actions[idx].setToolTip(tooltip)
def showEvent(self, event):
"""
Give focus to the toolbar to avoid giving focus to the combobox that
shows the page percentage size, which is odd.
"""
super().showEvent(event)
self.toolbar.setFocus() |
5,191 | start platform svc | #!/usr/bin/env python
# Script to stop and start the respective platforms default services.
# This will be used while switching the pddf->non-pddf mode and vice versa
from sonic_py_common.general import getstatusoutput_noshell
def check_pddf_support():
return True
def stop_platform_svc():
status, output = getstatusoutput_noshell(["systemctl", "stop", "as7726-32x-platform-monitor-fan.service"])
if status:
print("Stop as7726-32x-platform-monitor-fan.service failed %d"%status)
return False
status, output = getstatusoutput_noshell(["systemctl", "disable", "as7726-32x-platform-monitor-fan.service"])
if status:
print("Disable as7726-32x-platform-monitor-fan.service failed %d"%status)
return False
status, output = getstatusoutput_noshell(["systemctl", "stop", "as7726-32x-platform-monitor-psu.service"])
if status:
print("Stop as7726-32x-platform-monitor-psu.service failed %d"%status)
return False
status, output = getstatusoutput_noshell(["systemctl", "disable", "as7726-32x-platform-monitor-psu.service"])
if status:
print("Disable as7726-32x-platform-monitor-psu.service failed %d"%status)
return False
status, output = getstatusoutput_noshell(["systemctl", "stop", "as7726-32x-platform-monitor.service"])
if status:
print("Stop as7726-32x-platform-monitor.service failed %d"%status)
return False
status, output = getstatusoutput_noshell(["systemctl", "disable", "as7726-32x-platform-monitor.service"])
if status:
print("Disable as7726-32x-platform-monitor.service failed %d"%status)
return False
status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as7726_32x_util.py", "clean"])
if status:
print("accton_as7726_32x_util.py clean command failed %d"%status)
return False
# HACK , stop the pddf-platform-init service if it is active
status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"])
if status:
print("Stop pddf-platform-init.service along with other platform serives failed %d"%status)
return False
return True
def METHOD_NAME():
status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as7726_32x_util.py install"])
if status:
print("accton_as7726_32x_util.py install command failed %d"%status)
return False
status, output = getstatusoutput_noshell(["systemctl", "enable", "as7726-32x-platform-monitor-fan.service"])
if status:
print("Enable as7726-32x-platform-monitor-fan.service failed %d"%status)
return False
status, output = getstatusoutput_noshell(["systemctl", "start", "as7726-32x-platform-monitor-fan.service"])
if status:
print("Start as7726-32x-platform-monitor-fan.service failed %d"%status)
return False
status, output = getstatusoutput_noshell(["systemctl", "enable", "as7726-32x-platform-monitor-psu.service"])
if status:
print("Enable as7726-32x-platform-monitor-psu.service failed %d"%status)
return False
status, output = getstatusoutput_noshell(["systemctl", "start", "as7726-32x-platform-monitor-psu.service"])
if status:
print("Start as7726-32x-platform-monitor-psu.service failed %d"%status)
return False
status, output = getstatusoutput_noshell(["systemctl", "enable", "as7726-32x-platform-monitor.service"])
if status:
print("Enable as7726-32x-platform-monitor.service failed %d"%status)
return False
status, output = getstatusoutput_noshell(["systemctl", "start", "as7726-32x-platform-monitor.service"])
if status:
print("Start as7726-32x-platform-monitor.service failed %d"%status)
return False
return True
def start_platform_pddf():
status, output = getstatusoutput_noshell(["systemctl", "start", "pddf-platform-init.service"])
if status:
print("Start pddf-platform-init.service failed %d"%status)
return False
return True
def stop_platform_pddf():
status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"])
if status:
print("Stop pddf-platform-init.service failed %d"%status)
return False
return True
def main():
pass
if __name__ == "__main__":
main()
|
5,192 | fsisph | from SpheralCompiledPackages import *
from spheralDimensions import spheralDimensions
dims = spheralDimensions()
def METHOD_NAME(dataBase,
W,
Q = None,
slides=None,
filter = 0.0,
cfl = 0.35,
surfaceForceCoefficient=0.0,
densityStabilizationCoefficient=0.1,
specificThermalEnergyDiffusionCoefficient=0.1,
xsphCoefficient=0.0,
interfaceMethod=HLLCInterface,
kernelAveragingMethod = NeverAverageKernels,
sumDensityNodeLists=[],
useVelocityMagnitudeForDt = False,
compatibleEnergyEvolution = True,
evolveTotalEnergy = False,
correctVelocityGradient = True,
HUpdate = IdealH,
epsTensile = 0.0,
nTensile = 4.0,
damageRelieveRubble = False,
strengthInDamage = False,
xmin = (-1e100, -1e100, -1e100),
xmax = ( 1e100, 1e100, 1e100),
ASPH = False,
RZ = False):
######################################################################
# some of these parameters are inactive and possible on there was out.
# strengthInDamage and damageRelieveRubble are old switches and are not
# implemented in the code. RZ has not been implemented in FSISPH
######################################################################
if compatibleEnergyEvolution and evolveTotalEnergy:
raise RuntimeError("compatibleEnergyEvolution and evolveTotalEnergy are incompatible")
if strengthInDamage and damageRelieveRubble:
raise RuntimeError("strengthInDamage and damageRelieveRubble are incompatible")
# create the map nodelist --> index
nodeLists = dataBase.nodeLists()
nodeListMap = {}
for i in range(dataBase.numNodeLists):
nodeListMap[nodeLists[i]]=i
# for now use int 1/0 to indicate sum density or not
sumDensitySwitch = [0]*dataBase.numNodeLists
for nodeList in sumDensityNodeLists:
i = nodeListMap[nodeList]
sumDensitySwitch[i]=1
sumDensitySwitch = vector_of_int(sumDensitySwitch)
# We use the provided DataBase to sniff out what sort of NodeLists are being
# used, and based on this determine which SPH object to build.
ndim = dataBase.nDim
nfluid = dataBase.numFluidNodeLists
nsolid = dataBase.numSolidNodeLists
if nsolid > 0 and nsolid != nfluid:
print("SPH Error: you have provided both solid and fluid NodeLists, which is currently not supported.")
print(" If you want some fluids active, provide SolidNodeList without a strength option specfied,")
print(" which will result in fluid behaviour for those nodes.")
raise RuntimeError("Cannot mix solid and fluid NodeLists.")
# Decide on the hydro object.
if RZ:
raise RuntimeError("RZ is not implemented yet")
else:
# Cartesian ---------------------------------
if nsolid > 0:
Constructor = eval("SolidFSISPHHydroBase%id" % ndim)
else:
raise RuntimeError("currently only implemented for solid nodelists")
# Artificial viscosity.
if not Q:
Cl = 2.0*(dataBase.maxKernelExtent/2.0)
Cq = 8.0*(dataBase.maxKernelExtent/2.0)**2
Q = eval("LimitedMonaghanGingoldViscosity%id(Clinear=%g, Cquadratic=%g)" % (ndim, Cl, Cq))
# slide surfaces.
if not slides:
contactTypes = vector_of_int([0]*(dataBase.numNodeLists**2))
slides = eval("SlideSurface%id(dataBase,contactTypes)" % ndim)
# Smoothing scale update
if ASPH:
smoothingScaleMethod = eval("ASPHSmoothingScale%id()" % ndim)
else:
smoothingScaleMethod = eval("SPHSmoothingScale%id()" % ndim)
# Build the constructor arguments
xmin = (ndim,) + xmin
xmax = (ndim,) + xmax
kwargs = {"smoothingScaleMethod" : smoothingScaleMethod,
"dataBase" : dataBase,
"Q" : Q,
"slides" : slides,
"W" : W,
"filter" : filter,
"cfl" : cfl,
"surfaceForceCoefficient" : surfaceForceCoefficient,
"densityStabilizationCoefficient" : densityStabilizationCoefficient,
"specificThermalEnergyDiffusionCoefficient" : specificThermalEnergyDiffusionCoefficient,
"xsphCoefficient" : xsphCoefficient,
"interfaceMethod" : interfaceMethod,
"kernelAveragingMethod" : kernelAveragingMethod,
"sumDensityNodeLists" : sumDensitySwitch,
"useVelocityMagnitudeForDt" : useVelocityMagnitudeForDt,
"compatibleEnergyEvolution" : compatibleEnergyEvolution,
"evolveTotalEnergy" : evolveTotalEnergy,
"gradhCorrection" : False,
"XSPH" : False,
"correctVelocityGradient" : correctVelocityGradient,
"densityUpdate" : IntegrateDensity,
"HUpdate" : HUpdate,
"epsTensile" : epsTensile,
"nTensile" : nTensile,
"damageRelieveRubble" : damageRelieveRubble,
"strengthInDamage" : strengthInDamage,
"xmin" : eval("Vector%id(%g, %g, %g)" % xmin),
"xmax" : eval("Vector%id(%g, %g, %g)" % xmax)}
# Build and return the thing.
result = Constructor(**kwargs)
result.Q = Q
result.slides = slides
result._smoothingScaleMethod = smoothingScaleMethod
return result
#-------------------------------------------------------------------------------
# Provide shorthand names for AFSISPH
#-------------------------------------------------------------------------------
def AFSISPH(*args, **kwargs):
kwargs.update({"ASPH" : True})
return SPH(*args, **kwargs) |
5,193 | required packages | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range, str
import logging
import os
import typing
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Text
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.extractors import EntityExtractor
from rasa_nlu.model import Metadata
from rasa_nlu.training_data import Message
from rasa_nlu.training_data import TrainingData
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
import mitie
MITIE_ENTITY_MODEL_FILE_NAME = "entity_extractor.dat"
class MitieEntityExtractor(EntityExtractor):
name = "ner_mitie"
provides = ["entities"]
requires = ["tokens", "mitie_feature_extractor", "mitie_file"]
def __init__(self,
component_config=None, # type: Dict[Text, Any]
ner=None
):
# type: (...) -> None
"""Construct a new intent classifier using the sklearn framework."""
super(MitieEntityExtractor, self).__init__(component_config)
self.ner = ner
@classmethod
def METHOD_NAME(cls):
# type: () -> List[Text]
return ["mitie"]
def extract_entities(self, text, tokens, feature_extractor):
ents = []
tokens_strs = [token.text for token in tokens]
if self.ner:
entities = self.ner.extract_entities(tokens_strs,
feature_extractor)
for e in entities:
if len(e[0]):
start = tokens[e[0][0]].offset
end = tokens[e[0][-1]].end
ents.append({
"entity": e[1],
"value": text[start:end],
"start": start,
"end": end,
"confidence": None,
})
return ents
def train(self, training_data, config, **kwargs):
# type: (TrainingData, RasaNLUModelConfig) -> None
import mitie
model_file = kwargs.get("mitie_file")
if not model_file:
raise Exception("Can not run MITIE entity extractor without a "
"language model. Make sure this component is "
"preceeded by the 'nlp_mitie' component.")
trainer = mitie.ner_trainer(model_file)
trainer.num_threads = kwargs.get("num_threads", 1)
found_one_entity = False
# filter out pre-trained entity examples
filtered_entity_examples = self.filter_trainable_entities(
training_data.training_examples)
for example in filtered_entity_examples:
sample = self._prepare_mitie_sample(example)
found_one_entity = sample.num_entities > 0 or found_one_entity
trainer.add(sample)
# Mitie will fail to train if there is not a single entity tagged
if found_one_entity:
self.ner = trainer.train()
def _prepare_mitie_sample(self, training_example):
import mitie
text = training_example.text
tokens = training_example.get("tokens")
sample = mitie.ner_training_instance([t.text for t in tokens])
for ent in training_example.get("entities", []):
try:
# if the token is not aligned an exception will be raised
start, end = MitieEntityExtractor.find_entity(
ent, text, tokens)
except ValueError as e:
logger.warning("Example skipped: {}".format(str(e)))
continue
try:
# mitie will raise an exception on malicious
# input - e.g. on overlapping entities
sample.add_entity(list(range(start, end)), ent["entity"])
except Exception as e:
logger.warning("Failed to add entity example "
"'{}' of sentence '{}'. Reason: "
"{}".format(str(e), str(text), e))
continue
return sample
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
mitie_feature_extractor = kwargs.get("mitie_feature_extractor")
if not mitie_feature_extractor:
raise Exception("Failed to train 'intent_featurizer_mitie'. "
"Missing a proper MITIE feature extractor.")
ents = self.extract_entities(message.text, message.get("tokens"),
mitie_feature_extractor)
extracted = self.add_extractor_name(ents)
message.set("entities", message.get("entities", []) + extracted,
add_to_output=True)
@classmethod
def load(cls,
model_dir=None, # type: Text
model_metadata=None, # type: Metadata
cached_component=None, # type: Optional[MitieEntityExtractor]
**kwargs # type: **Any
):
# type: (...) -> MitieEntityExtractor
import mitie
meta = model_metadata.for_component(cls.name)
file_name = meta.get("classifier_file", MITIE_ENTITY_MODEL_FILE_NAME)
if not file_name:
return cls(meta)
classifier_file = os.path.join(model_dir, file_name)
if os.path.exists(classifier_file):
extractor = mitie.named_entity_extractor(classifier_file)
return cls(meta, extractor)
else:
return cls(meta)
def persist(self, model_dir):
# type: (Text) -> Dict[Text, Any]
if self.ner:
entity_extractor_file = os.path.join(model_dir,
MITIE_ENTITY_MODEL_FILE_NAME)
self.ner.save_to_disk(entity_extractor_file, pure_model=True)
return {"classifier_file": MITIE_ENTITY_MODEL_FILE_NAME}
else:
return {"classifier_file": None} |
5,194 | init context | # -*- coding: utf-8 -*-
"""
hyper/tls
~~~~~~~~~
Contains the TLS/SSL logic for use in hyper.
"""
import os.path as path
from .common.exceptions import MissingCertFile
from .compat import ignore_missing, ssl
NPN_PROTOCOL = 'h2'
H2_NPN_PROTOCOLS = [NPN_PROTOCOL, 'h2-16', 'h2-15', 'h2-14']
SUPPORTED_NPN_PROTOCOLS = H2_NPN_PROTOCOLS + ['http/1.1']
H2C_PROTOCOL = 'h2c'
# We have a singleton SSLContext object. There's no reason to be creating one
# per connection.
_context = None
# Work out where our certificates are.
cert_loc = path.join(path.dirname(__file__), 'certs.pem')
def wrap_socket(sock, server_hostname, ssl_context=None, force_proto=None):
"""
A vastly simplified SSL wrapping function. We'll probably extend this to
do more things later.
"""
global _context
if ssl_context:
# if an SSLContext is provided then use it instead of default context
_ssl_context = ssl_context
else:
# create the singleton SSLContext we use
if _context is None: # pragma: no cover
_context = METHOD_NAME()
_ssl_context = _context
# the spec requires SNI support
ssl_sock = _ssl_context.wrap_socket(sock, server_hostname=server_hostname)
# Setting SSLContext.check_hostname to True only verifies that the
# post-handshake servername matches that of the certificate. We also need
# to check that it matches the requested one.
if _ssl_context.check_hostname: # pragma: no cover
try:
ssl.match_hostname(ssl_sock.getpeercert(), server_hostname)
except AttributeError:
ssl.verify_hostname(ssl_sock, server_hostname) # pyopenssl
# Allow for the protocol to be forced externally.
proto = force_proto
# ALPN is newer, so we prefer it over NPN. The odds of us getting
# different answers is pretty low, but let's be sure.
with ignore_missing():
if proto is None:
proto = ssl_sock.selected_alpn_protocol()
with ignore_missing():
if proto is None:
proto = ssl_sock.selected_npn_protocol()
return (ssl_sock, proto)
def METHOD_NAME(cert_path=None, cert=None, cert_password=None):
"""
Create a new ``SSLContext`` that is correctly set up for an HTTP/2
connection. This SSL context object can be customized and passed as a
parameter to the :class:`HTTPConnection <hyper.HTTPConnection>` class.
Provide your own certificate file in case you don’t want to use hyper’s
default certificate. The path to the certificate can be absolute or
relative to your working directory.
:param cert_path: (optional) The path to the certificate file of
“certification authority” (CA) certificates
:param cert: (optional) if string, path to ssl client cert file (.pem).
If tuple, ('cert', 'key') pair.
The certfile string must be the path to a single file in PEM format
containing the certificate as well as any number of CA certificates
needed to establish the certificate’s authenticity. The keyfile string,
if present, must point to a file containing the private key in.
Otherwise the private key will be taken from certfile as well.
:param cert_password: (optional) The password argument may be a function to
call to get the password for decrypting the private key. It will only
be called if the private key is encrypted and a password is necessary.
It will be called with no arguments, and it should return a string,
bytes, or bytearray. If the return value is a string it will be
encoded as UTF-8 before using it to decrypt the key. Alternatively a
string, bytes, or bytearray value may be supplied directly as the
password argument. It will be ignored if the private key is not
encrypted and no password is needed.
:returns: An ``SSLContext`` correctly set up for HTTP/2.
"""
cafile = cert_path or cert_loc
if not cafile or not path.exists(cafile):
err_msg = ("No certificate found at " + str(cafile) + ". Either " +
"ensure the default cert.pem file is included in the " +
"distribution or provide a custom certificate when " +
"creating the connection.")
raise MissingCertFile(err_msg)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.set_default_verify_paths()
context.load_verify_locations(cafile=cafile)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
with ignore_missing():
context.set_npn_protocols(SUPPORTED_NPN_PROTOCOLS)
with ignore_missing():
context.set_alpn_protocols(SUPPORTED_NPN_PROTOCOLS)
# required by the spec
context.options |= ssl.OP_NO_COMPRESSION
if cert is not None:
try:
basestring
except NameError:
basestring = (str, bytes)
if not isinstance(cert, basestring):
context.load_cert_chain(cert[0], cert[1], cert_password)
else:
context.load_cert_chain(cert, password=cert_password)
return context |
5,195 | handle | import requests
from candidates.models import Ballot
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
from elections.models import Election
from official_documents.models import OfficialDocument
class Command(BaseCommand):
"""This command uses the ballots endpoint to loop over each
ballot and store each sopn pdf (uploaded_file) locally"""
def add_arguments(self, parser):
parser.add_argument(
"--date",
"-d",
action="store",
help="Election date in ISO format, defaults to 2021-05-06",
default="2021-05-06",
type=str,
)
parser.add_argument(
"--site_url",
"-u",
action="store",
help="URL of site to download from",
default="https://candidates.democracyclub.org.uk/",
type=str,
)
parser.add_argument(
"--election-count",
"-c",
action="store",
help="URL of site to download from",
default=50,
type=int,
)
parser.add_argument(
"--election-slugs", "-s", action="store", required=False
)
def METHOD_NAME(self, *args, **options):
site_url = options.get("site_url")
election_date = options.get("date")
election_count = options.get("election_count")
if options["election_slugs"]:
election_slugs = options["election_slugs"].split(",")
else:
election_slugs = Election.objects.filter(
election_date=election_date
).values_list("slug", flat=True)[:election_count]
for slug in election_slugs:
url = f"{site_url}api/next/ballots/?has_sopn=1&page_size=200&election_id={slug}"
self.create_official_documents(url=url)
def create_official_documents(self, url):
data = requests.get(url=url).json()
next_page = data["next"]
for ballot_data in data["results"]:
ballot = Ballot.objects.get(
ballot_paper_id=ballot_data["ballot_paper_id"]
)
sopn_data = ballot_data["sopn"]
# if we already have the SOPN no need to recreate
if ballot.officialdocument_set.filter(
source_url=sopn_data["source_url"]
).exists():
self.stdout.write(
f"SOPN already exists for {ballot.ballot_paper_id}"
)
continue
# check if we already have an OfficialDocument with this source
# downloaded
official_document = OfficialDocument.objects.filter(
source_url=sopn_data["source_url"]
).first()
if official_document:
# if so we dont need to redownload the file, we can create a new
# object for this ballot with the same file
self.stdout.write(
f"Found SOPN for source {sopn_data['source_url']}"
)
OfficialDocument.objects.create(
ballot=ballot,
source_url=sopn_data["source_url"],
uploaded_file=official_document.uploaded_file,
document_type=OfficialDocument.NOMINATION_PAPER,
)
continue
# otherwise we dont have this file stored already, so download it as
# part of creating the OfficialDocument
self.stdout.write(
f"Downloading SOPN from {sopn_data['uploaded_file']}"
)
file_response = requests.get(sopn_data["uploaded_file"])
file_object = ContentFile(content=file_response.content)
official_document = OfficialDocument(
ballot=ballot,
source_url=sopn_data["source_url"],
document_type=OfficialDocument.NOMINATION_PAPER,
)
file_extension = sopn_data["uploaded_file"].split(".")[-1]
filename = f"{ballot.ballot_paper_id}.{file_extension}"
official_document.uploaded_file.save(
name=filename, content=file_object
)
# this should only be the case where the election object has > 200
# ballots e.g. parliamentary elections
if next_page:
return self.create_official_documents(url=next_page)
return None |
5,196 | test prints help with incorrect flag style | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def METHOD_NAME(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main() |
5,197 | test execute for vo bad config | """ Test class for PilotLoggingAgent Agent
"""
import os
import time
import tempfile
import pytest
from unittest.mock import MagicMock, patch
# DIRAC Components
import DIRAC.WorkloadManagementSystem.Agent.PilotLoggingAgent as plaModule
from DIRAC.WorkloadManagementSystem.Agent.PilotLoggingAgent import PilotLoggingAgent
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
gLogger.setLevel("DEBUG")
# Mock Objects
mockReply = MagicMock()
mockReply1 = MagicMock()
mockOperations = MagicMock()
mockTornadoClient = MagicMock()
mockDataManager = MagicMock()
mockAM = MagicMock()
mockNone = MagicMock()
mockNone.return_value = None
upDict = {
"OK": True,
"Value": {"User": "proxyUser", "Group": "proxyGroup"},
}
@pytest.fixture
def plaBase(mocker):
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.PilotLoggingAgent.AgentModule.__init__")
mocker.patch(
"DIRAC.WorkloadManagementSystem.Agent.JobCleaningAgent.AgentModule._AgentModule__moduleProperties",
side_effect=lambda x, y=None: y,
create=True,
)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.PilotLoggingAgent.AgentModule.am_getOption", return_value=mockAM)
mocker.patch(
"DIRAC.WorkloadManagementSystem.Agent.PilotLoggingAgent.getVOs",
return_value={"OK": True, "Value": ["gridpp", "lz"]},
)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.PilotLoggingAgent.gConfig.getValue", return_value="GridPP")
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.PilotLoggingAgent.Operations.getValue", side_effect=mockReply)
mocker.patch(
"DIRAC.WorkloadManagementSystem.Agent.PilotLoggingAgent.Operations.getOptionsDict", side_effect=mockReply1
)
pla = PilotLoggingAgent()
pla.log = gLogger
pla._AgentModule__configDefaults = mockAM
return pla
@pytest.fixture
def pla_initialised(mocker, plaBase):
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.PilotLoggingAgent.PilotLoggingAgent.executeForVO")
plaBase.initialize()
return plaBase
@pytest.fixture
def pla(mocker, plaBase):
mocker.patch(
"DIRAC.WorkloadManagementSystem.Agent.PilotLoggingAgent.TornadoPilotLoggingClient",
side_effect=mockTornadoClient,
)
mocker.patch("DIRAC.WorkloadManagementSystem.Agent.PilotLoggingAgent.Operations", side_effect=mockOperations)
mocker.patch(
"DIRAC.WorkloadManagementSystem.Agent.PilotLoggingAgent.DataManager",
side_effect=mockDataManager,
)
plaBase.initialize()
return plaBase
def test_initialize(plaBase):
res = plaBase.initialize()
assert plaBase.voList == plaModule.getVOs()["Value"]
assert res == S_OK()
@pytest.mark.parametrize(
"mockReplyInput, expected, expectedExecOut, expected2",
[
("/Pilot/RemoteLogging", [True, False], S_OK(), upDict),
("/Pilot/RemoteLogging", [False, False], S_OK(), upDict),
("/Pilot/RemoteLogging", [True, False], S_ERROR("Execute for VO failed"), upDict),
],
)
def test_execute(pla_initialised, mockReplyInput, expected, expectedExecOut, expected2):
"""Testing a thin version of execute (executeForVO is mocked)"""
assert pla_initialised.voList == plaModule.getVOs()["Value"]
mockReply.side_effect = expected
mockReply1.return_value = expected2
# remote pilot logging on (gridpp only) and off.
pla_initialised.executeForVO.return_value = expectedExecOut
res = pla_initialised.execute()
if not any(expected):
pla_initialised.executeForVO.assert_not_called()
else:
assert pla_initialised.executeForVO.called
pla_initialised.executeForVO.assert_called_with(
"gridpp",
proxyUserName=upDict["Value"]["User"],
proxyUserGroup=upDict["Value"]["Group"],
)
assert res["OK"] == expectedExecOut["OK"]
@pytest.mark.parametrize(
"ppath, files, result",
[
("pilot/log/path/", ["file1.log", "file2.log", "file3.log"], S_OK()),
("pilot/log/path/", [], S_OK()),
],
)
def test_executeForVO(pla, ppath, files, result):
opsHelperValues = {"OK": True, "Value": {"UploadSE": "testUploadSE", "UploadPath": "/gridpp/uploadPath"}}
# full local temporary path:
filepath = os.path.join(tempfile.TemporaryDirectory().name, ppath)
# this is what getMetadata returns:
resDict = {"OK": True, "Value": {"LogPath": filepath}}
mockTornadoClient.return_value.getMetadata.return_value = resDict
mockDataManager.return_value.putAndRegister.return_value = result
if files:
os.makedirs(os.path.join(filepath, "gridpp"), exist_ok=True)
for elem in files:
open(os.path.join(filepath, "gridpp", elem), "w")
mockOperations.return_value.getOptionsDict.return_value = opsHelperValues
pla.opsHelper = mockOperations.return_value
# success route
res = pla.executeForVO(vo="gridpp")
mockTornadoClient.assert_called_with(useCertificates=True)
assert mockTornadoClient.return_value.getMetadata.called
# only called with a non-empty file list:
if files:
assert mockDataManager.return_value.putAndRegister.called
assert res == S_OK()
def test_executeForVOMetaFails(pla):
opsHelperValues = {"OK": True, "Value": {"UploadSE": "testUploadSE", "UploadPath": "/gridpp/uploadPath"}}
mockOperations.return_value.getOptionsDict.return_value = opsHelperValues
pla.opsHelper = mockOperations.return_value
# getMetadata call fails.
mockTornadoClient.return_value.getMetadata.return_value = {"OK": False, "Message": "Failed, sorry.."}
res = pla.executeForVO(vo="anything")
assert res["OK"] is False
@pytest.mark.parametrize(
"opsHelperValues, expectedRes",
[
({"OK": True, "Value": {"UploadPath": "/gridpp/uploadPath"}}, S_ERROR("Upload SE not defined")),
({"OK": True, "Value": {"UploadSE": "testUploadSE"}}, S_ERROR("Upload path on SE testUploadSE not defined")),
({"OK": False}, S_ERROR(f"No pilot section for gridpp vo")),
],
)
def METHOD_NAME(pla, opsHelperValues, expectedRes):
"""Testing an incomplete configuration"""
mockOperations.return_value.getOptionsDict.return_value = opsHelperValues
pla.opsHelper = mockOperations.return_value
res = pla.executeForVO(vo="gridpp")
assert res["OK"] is False
assert res["Message"] == expectedRes["Message"]
mockTornadoClient.return_value.getMetadata.reset_mock()
mockTornadoClient.return_value.getMetadata.assert_not_called()
@pytest.mark.parametrize(
"filename, fileAge, ageLimit, expectedResult", [("survives.log", 10, 20, True), ("getsdeleted.log", 21, 20, False)]
)
def test_oldLogsCleaner(plaBase, filename, fileAge, ageLimit, expectedResult):
"""Testing old files removal"""
plaBase.clearPilotsDelay = ageLimit
filepath = tempfile.TemporaryDirectory().name
os.makedirs(filepath, exist_ok=True)
testfile = os.path.join(filepath, filename)
fd = open(testfile, "w")
fd.close()
assert os.path.exists(testfile) is True
# cannot patch os.stat globally because os.path.exists uses it !
with patch("DIRAC.WorkloadManagementSystem.Agent.PilotLoggingAgent.os.stat") as mockOSStat:
mockOSStat.return_value.st_mtime = time.time() - fileAge * 86400 # file older that fileAge in seconds
plaBase.clearOldPilotLogs(filepath)
assert os.path.exists(testfile) is expectedResult |
5,198 | favicon | # This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
from flask import Response, current_app, redirect, request, send_from_directory
from werkzeug.exceptions import NotFound
import indico
from indico.core.config import config
from indico.core.plugins import plugin_engine
from indico.util.i18n import get_all_locales
from indico.web.assets.vars_js import generate_global_file, generate_i18n_file, generate_user_file
from indico.web.flask.util import send_file, url_for
from indico.web.flask.wrappers import IndicoBlueprint
assets_blueprint = IndicoBlueprint('assets', __name__, url_prefix='/assets')
assets_blueprint.add_url_rule('!/css/<path:filename>', 'css', build_only=True)
assets_blueprint.add_url_rule('!/images/<path:filename>', 'image', build_only=True)
assets_blueprint.add_url_rule('!/fonts/<path:filename>', 'fonts', build_only=True)
assets_blueprint.add_url_rule('!/dist/<path:filename>', 'dist', build_only=True)
@assets_blueprint.route('!/<any(css,dist,images,fonts):folder>/<path:filename>.<fileext>')
@assets_blueprint.route('!/<any(images,fonts):folder>/<path:filename>__v<version>.<fileext>')
def folder_file(folder, filename, fileext, version=None):
assets_dir = os.path.join(current_app.root_path, 'web', 'static')
return send_from_directory(assets_dir, os.path.join(folder, filename + '.' + fileext))
@assets_blueprint.route('!/static/plugins/<plugin>/<path:filename>__v<version>.<fileext>')
@assets_blueprint.route('!/static/plugins/<plugin>/<path:filename>.<fileext>')
def plugin_file(plugin, filename, fileext, version=None):
plugin = plugin_engine.get_plugin(plugin)
if not plugin:
raise NotFound
assets_dir = os.path.join(plugin.root_path, 'static')
return send_from_directory(assets_dir, filename + '.' + fileext)
@assets_blueprint.route('!/<filename>')
def root(filename):
assets_dir = os.path.join(current_app.root_path, 'web', 'static')
return send_from_directory(assets_dir, filename)
@assets_blueprint.route('/js-vars/global.js')
def js_vars_global():
"""Provide a JS file with global definitions (all users).
Useful for server-wide config options, URLs, etc...
"""
cache_file = os.path.join(config.CACHE_DIR, f'assets_global_{indico.__version__}_{config.hash}.js')
if config.DEBUG or not os.path.exists(cache_file):
data = generate_global_file()
with open(cache_file, 'w') as f:
f.write(data)
return send_file('global.js', cache_file, mimetype='application/javascript', conditional=True)
@assets_blueprint.route('/js-vars/user.js')
def js_vars_user():
"""Provide a JS file with user-specific definitions.
Useful for favorites, settings etc.
"""
return Response(generate_user_file(), mimetype='application/javascript')
@assets_blueprint.route('/i18n/<locale_name>.js')
def i18n_locale(locale_name):
return _get_i18n_locale(locale_name)
@assets_blueprint.route('/i18n/<locale_name>-react.js')
def i18n_locale_react(locale_name):
return _get_i18n_locale(locale_name, react=True)
def _get_i18n_locale(locale_name, react=False):
"""Retrieve a locale in a Jed-compatible format."""
# Ensure we have a valid locale. en_GB is our source locale and thus always considered
# valid, even if it doesn't exist (dev setup where the user did not compile any locales)
# since otherwise we'd have no valid locales at all and get a redirect loop
all_locales = get_all_locales()
if locale_name not in all_locales and locale_name != 'en_GB':
fallback = config.DEFAULT_LOCALE if config.DEFAULT_LOCALE in all_locales else 'en_GB'
return redirect(url_for(request.endpoint, locale_name=fallback))
react_suffix = '-react' if react else ''
try:
cache_file = os.path.join(config.CACHE_DIR, 'assets_i18n_{}{}_{}_{}.js'.format(
locale_name, react_suffix, indico.__version__, config.hash))
except UnicodeEncodeError:
raise NotFound
if config.DEBUG or not os.path.exists(cache_file):
i18n_data = generate_i18n_file(locale_name, react=react)
if i18n_data is None:
raise NotFound
with open(cache_file, 'w') as f:
f.write('window.{} = {};'.format('REACT_TRANSLATIONS' if react else 'TRANSLATIONS', i18n_data))
return send_file(f'{locale_name}{react_suffix}.js', cache_file, mimetype='application/javascript',
conditional=True)
@assets_blueprint.route('!/static/custom/<any(css,js):folder>/<path:filename>', endpoint='custom')
@assets_blueprint.route('!/static/custom/files/<path:filename>', endpoint='custom', defaults={'folder': 'files'})
def static_custom(folder, filename):
customization_dir = config.CUSTOMIZATION_DIR
if not customization_dir:
raise NotFound
return send_from_directory(os.path.join(customization_dir, folder), filename)
@assets_blueprint.route('!/favicon.ico')
def METHOD_NAME():
return redirect(url_for('.image', filename='indico.ico'))
@assets_blueprint.route('/avatar/<name>.svg')
@assets_blueprint.route('/avatar/blank.svg')
def avatar(name=None):
from indico.modules.users.util import send_default_avatar
return send_default_avatar(name) |
5,199 | flex 50u l tiprack | """ProtocolEngine shared test fixtures."""
from __future__ import annotations
import pytest
from typing import TYPE_CHECKING
from decoy import Decoy
from opentrons_shared_data import load_shared_data
from opentrons_shared_data.deck import load as load_deck
from opentrons_shared_data.deck.dev_types import DeckDefinitionV3
from opentrons_shared_data.labware import load_definition
from opentrons_shared_data.pipette import pipette_definition
from opentrons.protocols.models import LabwareDefinition
from opentrons.protocols.api_support.deck_type import (
STANDARD_OT2_DECK,
SHORT_TRASH_DECK,
STANDARD_OT3_DECK,
)
from opentrons.protocol_engine.types import ModuleDefinition
from opentrons.hardware_control import HardwareControlAPI, OT2HardwareControlAPI
from opentrons.hardware_control.api import API
if TYPE_CHECKING:
from opentrons.hardware_control.ot3api import OT3API
@pytest.fixture
def hardware_api(decoy: Decoy) -> HardwareControlAPI:
"""Get a mocked out HardwareControlAPI of unspecified robot type."""
return decoy.mock(cls=OT2HardwareControlAPI)
@pytest.fixture
def ot2_hardware_api(decoy: Decoy) -> API:
"""Get a mocked out OT-2 hardware API."""
return decoy.mock(cls=API)
@pytest.mark.ot3_only
@pytest.fixture
def ot3_hardware_api(decoy: Decoy) -> OT3API:
"""Get a mocked out OT3API."""
try:
from opentrons.hardware_control.ot3api import OT3API
return decoy.mock(cls=OT3API)
except ImportError:
# TODO (tz, 9-23-22) Figure out a better way to use this fixture with OT-3 api only.
return None # type: ignore[return-value]
@pytest.fixture(scope="session")
def ot2_standard_deck_def() -> DeckDefinitionV3:
"""Get the OT-2 standard deck definition."""
return load_deck(STANDARD_OT2_DECK, 3)
@pytest.fixture(scope="session")
def ot2_short_trash_deck_def() -> DeckDefinitionV3:
"""Get the OT-2 short-trash deck definition."""
return load_deck(SHORT_TRASH_DECK, 3)
@pytest.fixture(scope="session")
def ot3_standard_deck_def() -> DeckDefinitionV3:
"""Get the OT-2 standard deck definition."""
return load_deck(STANDARD_OT3_DECK, 3)
@pytest.fixture(scope="session")
def ot2_fixed_trash_def() -> LabwareDefinition:
"""Get the definition of the OT-2 standard fixed trash."""
return LabwareDefinition.parse_obj(
load_definition("opentrons_1_trash_1100ml_fixed", 1)
)
@pytest.fixture(scope="session")
def ot2_short_fixed_trash_def() -> LabwareDefinition:
"""Get the definition of the OT-2 short fixed trash."""
return LabwareDefinition.parse_obj(
load_definition("opentrons_1_trash_850ml_fixed", 1)
)
@pytest.fixture(scope="session")
def ot3_fixed_trash_def() -> LabwareDefinition:
"""Get the definition of the OT-3 fixed trash."""
return LabwareDefinition.parse_obj(
load_definition("opentrons_1_trash_3200ml_fixed", 1)
)
@pytest.fixture(scope="session")
def well_plate_def() -> LabwareDefinition:
"""Get the definition of a 96 well plate."""
return LabwareDefinition.parse_obj(
load_definition("corning_96_wellplate_360ul_flat", 2)
)
@pytest.fixture(scope="session")
def METHOD_NAME() -> LabwareDefinition:
"""Get the definition of a Flex 50uL tiprack."""
return LabwareDefinition.parse_obj(
load_definition("opentrons_flex_96_filtertiprack_50ul", 1)
)
@pytest.fixture(scope="session")
def adapter_plate_def() -> LabwareDefinition:
"""Get the definition of a h/s adapter plate."""
return LabwareDefinition.parse_obj(
load_definition("opentrons_universal_flat_adapter", 1)
)
@pytest.fixture(scope="session")
def reservoir_def() -> LabwareDefinition:
"""Get the definition of single-row reservoir."""
return LabwareDefinition.parse_obj(load_definition("nest_12_reservoir_15ml", 1))
@pytest.fixture(scope="session")
def tip_rack_def() -> LabwareDefinition:
"""Get the definition of Opentrons 300 uL tip rack."""
return LabwareDefinition.parse_obj(load_definition("opentrons_96_tiprack_300ul", 1))
@pytest.fixture(scope="session")
def adapter_def() -> LabwareDefinition:
"""Get the definition of Opentrons 96 PCR adapter."""
return LabwareDefinition.parse_obj(load_definition("opentrons_96_pcr_adapter", 1))
@pytest.fixture(scope="session")
def falcon_tuberack_def() -> LabwareDefinition:
"""Get the definition of the 6-well Falcon tuberack."""
return LabwareDefinition.parse_obj(
load_definition("opentrons_6_tuberack_falcon_50ml_conical", 1)
)
@pytest.fixture(scope="session")
def magdeck_well_plate_def() -> LabwareDefinition:
"""Get the definition of a well place compatible with magdeck."""
return LabwareDefinition.parse_obj(
load_definition("nest_96_wellplate_100ul_pcr_full_skirt", 1)
)
@pytest.fixture(scope="session")
def tempdeck_v1_def() -> ModuleDefinition:
"""Get the definition of a V1 tempdeck."""
definition = load_shared_data("module/definitions/3/temperatureModuleV1.json")
return ModuleDefinition.parse_raw(definition)
@pytest.fixture(scope="session")
def tempdeck_v2_def() -> ModuleDefinition:
"""Get the definition of a V2 tempdeck."""
definition = load_shared_data("module/definitions/3/temperatureModuleV2.json")
return ModuleDefinition.parse_raw(definition)
@pytest.fixture(scope="session")
def magdeck_v1_def() -> ModuleDefinition:
"""Get the definition of a V1 magdeck."""
definition = load_shared_data("module/definitions/3/magneticModuleV1.json")
return ModuleDefinition.parse_raw(definition)
@pytest.fixture(scope="session")
def magdeck_v2_def() -> ModuleDefinition:
"""Get the definition of a V2 magdeck."""
definition = load_shared_data("module/definitions/3/magneticModuleV2.json")
return ModuleDefinition.parse_raw(definition)
@pytest.fixture(scope="session")
def thermocycler_v1_def() -> ModuleDefinition:
"""Get the definition of a V2 thermocycler."""
definition = load_shared_data("module/definitions/3/thermocyclerModuleV1.json")
return ModuleDefinition.parse_raw(definition)
@pytest.fixture(scope="session")
def thermocycler_v2_def() -> ModuleDefinition:
"""Get the definition of a V2 thermocycler."""
definition = load_shared_data("module/definitions/3/thermocyclerModuleV2.json")
return ModuleDefinition.parse_raw(definition)
@pytest.fixture(scope="session")
def heater_shaker_v1_def() -> ModuleDefinition:
"""Get the definition of a V1 heater-shaker."""
definition = load_shared_data("module/definitions/3/heaterShakerModuleV1.json")
return ModuleDefinition.parse_raw(definition)
@pytest.fixture(scope="session")
def mag_block_v1_def() -> ModuleDefinition:
"""Get the definition of a V1 Mag Block."""
definition = load_shared_data("module/definitions/3/magneticBlockV1.json")
return ModuleDefinition.parse_raw(definition)
@pytest.fixture(scope="session")
def supported_tip_fixture() -> pipette_definition.SupportedTipsDefinition:
"""Get a mock supported tip definition."""
return pipette_definition.SupportedTipsDefinition(
defaultAspirateFlowRate=pipette_definition.FlowRateDefinition(
default=10, valuesByApiLevel={}
),
defaultDispenseFlowRate=pipette_definition.FlowRateDefinition(
default=10, valuesByApiLevel={}
),
defaultBlowOutFlowRate=pipette_definition.FlowRateDefinition(
default=10, valuesByApiLevel={}
),
defaultTipLength=40,
defaultReturnTipHeight=0.5,
aspirate=pipette_definition.ulPerMMDefinition(default={"1": [(0, 0, 0)]}),
dispense=pipette_definition.ulPerMMDefinition(default={"1": [(0, 0, 0)]}),
defaultBlowoutVolume=5,
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.