id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
298,700 | sentence mentions pairs | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import io
import json
import os
import pickle
from segtok.segmenter import split_multi
##### Reading helpers #####
def read_sentences_from_file(path_to_file, one_sentence_per_line=True):
lines = []
with io.open(path_to_file, mode="r", encoding="utf-8") as file:
for line in file:
line = line.strip()
if line != "":
lines.append(line.strip())
if one_sentence_per_line:
sentences = lines
else:
text = " ".join(lines)
sentences = list(split_multi(text))
sentences = [sentence for sentence in sentences if sentence != ""]
return sentences
##### Printing / writing helpers #####
def get_candidate_summary(candidate):
wikipedia_id = candidate["wikipedia_id"]
wikidata_id = candidate["wikidata_id"]
wikipedia_title = candidate["wikipedia_title"]
return "{}, {}, {}".format(wikipedia_id, wikidata_id, wikipedia_title)
def present_sentence_mentions(sentence, mentions, output_file):
if output_file != None:
f = io.open(output_file, mode="a", encoding="utf-8")
output = lambda s: f.write("{}\n".format(s))
else:
output = lambda s: print(s)
output("Sentence: {}".format(sentence))
mention_entity_pairs = []
for mention in mentions:
candidates = mention["candidates"]
# prediction = mention.get('predicted_candidate_idx', 0)
prediction = mention["predicted_candidate_idx"]
if prediction < len(candidates):
# print(type(mention['prob_assigned_to_candidate']))
# print(mention['prob_assigned_to_candidate'])
mention_rep = "{} ({}, {}) - {} (conf. {:.5f})".format(
mention["text"],
mention["start_pos"],
mention["end_pos"],
get_candidate_summary(candidates[prediction]),
mention["prob_assigned_to_candidate"],
)
else:
mention_rep = "{} ({}, {}) - {}".format(
mention["text"],
mention["start_pos"],
mention["end_pos"],
"No candidate selected",
)
mention_entity_pairs.append(mention_rep)
if len(mention_entity_pairs) != 0:
output("Mention-Entity pairs: \n{}".format("\n".join(mention_entity_pairs)))
else:
output("No detected mentions")
output("")
def METHOD_NAME(sentences, mentions):
mentions_per_sent = {}
for m in mentions:
sent_idx = int(m["sent_idx"])
curr_ments = mentions_per_sent.get(sent_idx, [])
curr_ments.append(m)
mentions_per_sent[sent_idx] = curr_ments
pairs = []
for idx, sent in enumerate(sentences):
pairs.append((sent, mentions_per_sent.get(idx, [])))
return pairs
def present_annotated_sentences(sentences, mentions, output_file=None):
pairs = METHOD_NAME(sentences, mentions)
for sent, ments in pairs:
present_sentence_mentions(sent, ments, output_file)
def write_dicts_as_json_per_line(list_of_dicts, txt_file_path):
with io.open(txt_file_path, mode="w", encoding="utf-8") as file:
for idx, mention in enumerate(list_of_dicts):
json_string = json.dumps(mention)
file.write(json_string)
if idx != (len(list_of_dicts) - 1):
file.write("\n")
def get_mentions_txt_file_path(output_folder_path):
os.makedirs(output_folder_path, exist_ok=True)
file_name = "mentions.jsonl"
path_to_file = os.path.join(output_folder_path, file_name)
return path_to_file
def get_sentences_txt_file_path(output_folder_path):
os.makedirs(output_folder_path, exist_ok=True)
file_name = "sentences.jsonl"
path_to_file = os.path.join(output_folder_path, file_name)
return path_to_file
def get_end2end_pickle_output_file_path(output_folder_path):
os.makedirs(output_folder_path, exist_ok=True)
file_name = "mentions_and_sentences.pickle"
path_to_file = os.path.join(output_folder_path, file_name)
return path_to_file
def write_end2end_pickle_output(sentences, mentions, output_file_id):
obj = {"sentences": sentences, "mentions": mentions}
with open(get_end2end_pickle_output_file_path(output_file_id), "wb") as file:
pickle.dump(obj, file)
def get_end2end_pretty_output_file_path(output_folder_path):
os.makedirs(output_folder_path, exist_ok=True)
file_name = "pretty.txt"
path_to_file = os.path.join(output_folder_path, file_name)
return path_to_file |
298,701 | github constraints satisfied | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# VulnerableCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/vulnerablecode for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import operator
from typing import Union
class GenericVersion:
def __init__(self, version):
self.value = version.replace(" ", "").lstrip("v")
self.decomposed = tuple(
[int(com) if com.isnumeric() else com for com in self.value.split(".")]
)
def __str__(self):
return str(self.value)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.value.__eq__(other.value)
def __lt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
for i, j in zip(self.decomposed, other.decomposed):
if not isinstance(i, type(j)):
continue
if i.__gt__(j):
return False
return True
def __le__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.__lt__(other) or self.__eq__(other)
def compare(version, package_comparator, package_version):
operator_comparator = {
"<": operator.lt,
">": operator.gt,
"=": operator.eq,
"<=": operator.le,
">=": operator.ge,
"==": operator.eq,
"!=": operator.ne,
")": operator.lt,
"]": operator.le,
"(": operator.gt,
"[": operator.ge,
}
compare = operator_comparator[package_comparator]
return compare(version, package_version)
def parse_constraint(constraint):
"""
Return operator and version from a constraint
For example:
>>> assert parse_constraint(">=7.0.0") == ('>=', '7.0.0')
>>> assert parse_constraint("=7.0.0") == ('=', '7.0.0')
>>> assert parse_constraint("[3.0.0") == ('[', '3.0.0')
>>> assert parse_constraint("3.1.25]") == (']', '3.1.25')
"""
if constraint.startswith(("<=", ">=", "==", "!=")):
return constraint[:2], constraint[2:]
if constraint.startswith(("<", ">", "=", "[", "(")):
return constraint[0], constraint[1:]
if constraint.endswith(("]", ")")):
return constraint[-1], constraint[:-1]
def METHOD_NAME(github_constrain, version):
"""
Return True or False depending on whether the given version satisfies the github constraint
For example:
>>> assert github_constraints_satisfied(">= 7.0.0, <= 7.6.57", "7.1.1") == True
>>> assert github_constraints_satisfied(">= 10.4.0, <= 10.4.1", "10.6.0") == False
"""
gh_constraints = github_constrain.strip().replace(" ", "")
constraints = gh_constraints.split(",")
for constraint in constraints:
gh_comparator, gh_version = parse_constraint(constraint)
if not gh_version:
continue
if not compare(GenericVersion(version), gh_comparator, GenericVersion(gh_version)):
return False
return True
def snky_constraints_satisfied(snyk_constrain, version):
"""
Return True or False depending on whether the given version satisfies the snyk constraint
For example:
>>> assert snky_constraints_satisfied(">=4.0.0, <4.0.10.16", "4.0.10.15") == True
>>> assert snky_constraints_satisfied(" >=4.1.0, <4.4.15.7", "4.0.10.15") == False
>>> assert snky_constraints_satisfied("[3.0.0,3.1.25)", "3.0.2") == True
"""
snyk_constraints = snyk_constrain.strip().replace(" ", "")
constraints = snyk_constraints.split(",")
for constraint in constraints:
snyk_comparator, snyk_version = parse_constraint(constraint)
if not snyk_version:
continue
if not compare(GenericVersion(version), snyk_comparator, GenericVersion(snyk_version)):
return False
return True
def gitlab_constraints_satisfied(gitlab_constrain, version):
"""
Return True or False depending on whether the given version satisfies the gitlab constraint
For example:
>>> assert gitlab_constraints_satisfied("[7.0.0,7.0.11),[7.2.0,7.2.4)", "7.2.1") == True
>>> assert gitlab_constraints_satisfied("[7.0.0,7.0.11),[7.2.0,7.2.4)", "8.2.1") == False
>>> assert gitlab_constraints_satisfied( ">=4.0,<4.3||>=5.0,<5.2", "5.4") == False
>>> assert gitlab_constraints_satisfied( ">=0.19.0 <0.30.0", "0.24") == True
>>> assert gitlab_constraints_satisfied( ">=1.5,<1.5.2", "2.2") == False
"""
gitlab_constraints = gitlab_constrain.strip()
if gitlab_constraints.startswith(("[", "(")):
# transform "[7.0.0,7.0.11),[7.2.0,7.2.4)" -> [ "[7.0.0,7.0.11)", "[7.2.0,7.2.4)" ]
splitted = gitlab_constraints.split(",")
constraints = [f"{a},{b}" for a, b in zip(splitted[::2], splitted[1::2])]
delimiter = ","
else:
# transform ">=4.0,<4.3||>=5.0,<5.2" -> [ ">=4.0,<4.3", ">=5.0,<5.2" ]
# transform ">=0.19.0 <0.30.0" -> [ ">=0.19.0 <0.30.0" ]
# transform ">=1.5,<1.5.2" -> [ ">=1.5,<1.5.2" ]
delimiter = "," if "," in gitlab_constraints else " "
constraints = gitlab_constraints.split("||")
for constraint in constraints:
is_constraint_satisfied = True
for subcontraint in constraint.strip().split(delimiter):
if not subcontraint:
continue
gitlab_comparator, gitlab_version = parse_constraint(subcontraint.strip())
if not gitlab_version:
continue
if not compare(
GenericVersion(version), gitlab_comparator, GenericVersion(gitlab_version)
):
is_constraint_satisfied = False
break
if is_constraint_satisfied:
return True
return False
def get_item(entity: Union[dict, list], *attributes):
"""
Return `item` by going through all the `attributes` present in the `dictionary/list`
Do a DFS for the `item` in the `dictionary/list` by traversing the `attributes`
and return None if can not traverse through the `attributes`
For example:
>>> get_item({'a': {'b': {'c': 'd'}}}, 'a', 'b', 'e')
Traceback (most recent call last):
...
KeyError: "Missing attribute e in {'c': 'd'}"
>>> assert get_item({'a': {'b': {'c': 'd'}}}, 'a', 'b', 'c') == 'd'
>>> assert get_item({'a': [{'b': {'c': 'd'}}]}, 'a', 0, 'b') == {'c': 'd'}
>>> assert get_item(['b', ['c', ['d']]], 1, 1, 0) == 'd'
"""
for attribute in attributes:
if not entity:
return
if not isinstance(entity, (dict, list)):
raise TypeError(f"Entity must be of type `dict` or `list` not {type(entity)}")
if isinstance(entity, dict) and attribute not in entity:
raise KeyError(f"Missing attribute {attribute} in {entity}")
if isinstance(entity, list) and not isinstance(attribute, int):
raise TypeError(f"List indices must be integers not {type(attribute)}")
if isinstance(entity, list) and len(entity) <= attribute:
raise IndexError(f"Index {attribute} out of range for {entity}")
entity = entity[attribute]
return entity |
298,702 | url parameters | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"eventhubs georecovery-alias delete",
)
class Delete(AAZCommand):
"""Delete an Alias(Disaster Recovery configuration)
"""
_aaz_info = {
"version": "2023-01-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.eventhub/namespaces/{}/disasterrecoveryconfigs/{}", "2023-01-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return None
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.alias = AAZStrArg(
options=["-a", "--alias"],
help="The Disaster Recovery configuration name",
required=True,
id_part="child_name_1",
fmt=AAZStrArgFormat(
max_length=50,
min_length=1,
),
)
_args_schema.namespace_name = AAZStrArg(
options=["--namespace-name"],
help="The Namespace name",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
max_length=50,
min_length=6,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.DisasterRecoveryConfigsDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class DisasterRecoveryConfigsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
if session.http_response.status_code in [204]:
return self.on_204(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}",
**self.METHOD_NAME
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_url_param(
"alias", self.ctx.args.alias,
required=True,
),
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-01-01-preview",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
298,703 | forward features | # Part of the implementation is borrowed and modified from ConvNext,
# publicly available at https://github.com/facebookresearch/ConvNeXt
import torch
import torch.nn as nn
import torch.nn.functional as F
from .timm_tinyc import DropPath
class Block(nn.Module):
r""" ConvNeXt Block. There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
We use (2) as we find it slightly faster in PyTorch
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6):
super().__init__()
self.dwconv = nn.Conv2d(
dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.norm = LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(
dim,
4 * dim) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(
layer_scale_init_value * torch.ones((dim)),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
class ConvNeXt(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` -
https://arxiv.org/pdf/2201.03545.pdf
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(
self,
in_chans=1,
num_classes=1000,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.,
layer_scale_init_value=1e-6,
head_init_scale=1.,
):
super().__init__()
self.downsample_layers = nn.ModuleList(
) # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format='channels_first'))
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format='channels_first'),
nn.Conv2d(
dims[i], dims[i + 1], kernel_size=(2, 1), stride=(2, 1)),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList(
) # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates = [
x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
]
cur = 0
for i in range(4):
stage = nn.Sequential(*[
Block(
dim=dims[i],
drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value)
for j in range(depths[i])
])
self.stages.append(stage)
cur += depths[i]
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def METHOD_NAME(self, x):
for i in range(4):
x = self.downsample_layers[i](x.contiguous())
x = self.stages[i](x.contiguous())
return x # global average pooling, (N, C, H, W) -> (N, C)
def forward(self, x):
x = self.METHOD_NAME(x.contiguous())
return x.contiguous()
class LayerNorm(nn.Module):
r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self,
normalized_shape,
eps=1e-6,
data_format='channels_last'):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ['channels_last', 'channels_first']:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == 'channels_last':
return F.layer_norm(x, self.normalized_shape, self.weight,
self.bias, self.eps)
elif self.data_format == 'channels_first':
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
def convnext_tiny():
model = ConvNeXt(depths=[3, 3, 8, 3], dims=[96, 192, 256, 512])
return model |
298,704 | register | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
from math import sin, cos, pi, degrees, radians
import bpy
from bpy.props import BoolProperty, IntProperty, FloatProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import (fullList, match_long_repeat, updateNode)
class SvCircleNode(SverchCustomTreeNode, bpy.types.Node):
''' Circle. [default]
Radius (float): [1.0]
Num verts (int), min 3: [24]
Degrees (float, range 0-360): [360]
Mode: [False - Segment], True - Sector
'''
bl_idname = 'SvCircleNode'
bl_label = 'Circle'
bl_icon = 'MESH_CIRCLE'
rad_: FloatProperty(name='Radius', description='Radius', default=1.0, update=updateNode)
vert_: IntProperty(name='num Verts', description='Vertices. Min 3.', default=24, min=3, update=updateNode)
mode_: BoolProperty(name='mode_', description='Mode. False - Segment, True - Sector', default=0, update=updateNode)
degr_: FloatProperty(name='Degrees', description='Degrees. Range: 0.0-360.0', default=360.0, min=0, max=360.0, update=updateNode)
def sv_init(self, context):
self.inputs.new('SvStringsSocket', "Radius").prop_name = 'rad_'
self.inputs.new('SvStringsSocket', "num Verts").prop_name = 'vert_'
self.inputs.new('SvStringsSocket', "Degrees").prop_name = 'degr_'
self.outputs.new('SvVerticesSocket', "Vertices")
self.outputs.new('SvStringsSocket', "Edges")
self.outputs.new('SvStringsSocket', "Polygons")
def draw_buttons(self, context, layout):
layout.prop(self, "mode_", text="Mode")
def make_verts(self, Angle, Vertices, Radius):
if Angle < 360:
theta = Angle/(Vertices-1)
else:
theta = Angle/Vertices
listVertX = []
listVertY = []
for i in range(Vertices):
listVertX.append(Radius*cos(radians(theta*i)))
listVertY.append(Radius*sin(radians(theta*i)))
if Angle < 360 and self.mode_ == 0:
sigma = radians(Angle)
listVertX[-1] = Radius*cos(sigma)
listVertY[-1] = Radius*sin(sigma)
elif Angle < 360 and self.mode_ == 1:
listVertX.append(0.0)
listVertY.append(0.0)
points = list((x,y,0) for x,y in zip(listVertX, listVertY) )
return points
def make_edges(self, Angle, Vertices):
listEdg = [(i, i+1) for i in range(Vertices-1)]
if Angle < 360 and self.mode_ == 1:
listEdg.append((0, Vertices))
listEdg.append((Vertices-1, Vertices))
else:
listEdg.append((Vertices-1, 0))
return listEdg
def make_faces(self, Angle, Vertices):
listPlg = list(range(Vertices))
if Angle < 360 and self.mode_ == 1:
listPlg.insert(0, Vertices)
return [listPlg]
def process(self):
# inputs
input_socket_names = ['Radius', 'num Verts', 'Degrees']
radius_input, n_vert_input, angle_input = [self.inputs[n] for n in input_socket_names]
radius = radius_input.sv_get(deepcopy=False)[0]
n_verts = [self.vert_]
if n_vert_input.is_linked:
n_verts = n_vert_input.sv_get(deepcopy=False)[0]
n_verts = list(map(lambda x: max(3, int(x)), n_verts))
angle = angle_input.sv_get(deepcopy=False)[0]
if angle_input.is_linked:
angle = list(map(lambda x: min(360, max(0, x)), angle))
parameters = match_long_repeat([angle, n_verts, radius])
# outputs
output_socket_names = ['Vertices', 'Edges', 'Polygons']
verts_output, edges_output, faces_output = [self.outputs[n] for n in output_socket_names]
if verts_output.is_linked:
points = [self.make_verts(a, v, r) for a, v, r in zip(*parameters)]
verts_output.sv_set(points)
if edges_output.is_linked:
edg = [self.make_edges(a, v) for a, v, r in zip(*parameters)]
edges_output.sv_set(edg)
if faces_output.is_linked:
plg = [self.make_faces(a, v) for a, v, r in zip(*parameters)]
faces_output.sv_set(plg)
def METHOD_NAME():
bpy.utils.register_class(SvCircleNode)
def unregister():
bpy.utils.unregister_class(SvCircleNode) |
298,705 | test init stores connection | from unittest import mock
import kombu
import pytest
from h_matchers import Any
from kombu.exceptions import LimitExceeded, OperationalError
from h import realtime
from h.exceptions import RealtimeMessageQueueError
from h.tasks import RETRY_POLICY_QUICK, RETRY_POLICY_VERY_QUICK
class TestConsumer:
def METHOD_NAME(self, consumer):
assert consumer.connection == mock.sentinel.connection
def test_init_stores_routing_key(self, consumer):
assert consumer.routing_key == "annotation"
def test_init_stores_handler(self, consumer, handler):
assert consumer.handler == handler
def test_get_consumers_creates_a_queue(self, Queue, consumer, generate_queue_name):
consumer_factory = mock.Mock(spec_set=[])
exchange = realtime.get_exchange()
consumer.get_consumers(consumer_factory, mock.Mock())
Queue.assert_called_once_with(
generate_queue_name.return_value,
exchange=exchange,
durable=False,
routing_key="annotation",
auto_delete=True,
)
def test_get_consumers_creates_a_consumer(self, Queue, consumer):
consumer_factory = mock.Mock(spec_set=[])
consumer.get_consumers(consumer_factory, channel=None)
consumer_factory.assert_called_once_with(
queues=[Queue.return_value], callbacks=[consumer.handle_message]
)
def test_get_consumers_returns_list_of_one_consumer(self, consumer):
consumer_factory = mock.Mock(spec_set=[])
consumers = consumer.get_consumers(consumer_factory, channel=None)
assert consumers == [consumer_factory.return_value]
def test_handle_message_acks_message(self, consumer):
message = mock.Mock()
consumer.handle_message({}, message)
message.ack.assert_called_once_with()
def test_handle_message_calls_the_handler(self, consumer, handler):
body = {"foo": "bar"}
consumer.handle_message(body, mock.Mock())
handler.assert_called_once_with(body)
def test_handle_message_doesnt_explode_if_timestamp_missing(self, handler):
consumer = realtime.Consumer(mock.sentinel.connection, "annotation", handler)
message = mock.Mock()
message.headers = {}
consumer.handle_message({}, message)
@pytest.fixture
def Queue(self, patch):
return patch("h.realtime.kombu.Queue")
@pytest.fixture
def consumer(self, handler):
return realtime.Consumer(mock.sentinel.connection, "annotation", handler)
@pytest.fixture
def handler(self):
return mock.Mock(spec_set=[])
@pytest.fixture
def generate_queue_name(self, patch):
return patch("h.realtime.Consumer.generate_queue_name")
class TestPublisher:
def test_publish_annotation(self, producer, publisher, exchange):
payload = {"action": "create", "annotation": {"id": "foobar"}}
publisher.publish_annotation(payload)
producer.publish.assert_called_once_with(
payload,
exchange=exchange,
declare=[exchange],
routing_key="annotation",
retry=True,
retry_policy=RETRY_POLICY_VERY_QUICK,
)
def test_publish_user(self, producer, publisher, exchange):
payload = {"action": "create", "user": {"id": "foobar"}}
publisher.publish_user(payload)
producer.publish.assert_called_once_with(
payload,
exchange=exchange,
declare=[exchange],
routing_key="user",
retry=True,
retry_policy=RETRY_POLICY_VERY_QUICK,
)
@pytest.mark.parametrize("exception", (OperationalError, LimitExceeded))
def test_it_raises_RealtimeMessageQueueError_on_errors(
self, publisher, producer, exception
):
producer.publish.side_effect = exception
with pytest.raises(RealtimeMessageQueueError):
publisher.publish_user({})
@pytest.fixture
def producer(self, patch):
producer_pool = patch("h.realtime.producer_pool")
with producer_pool["foobar"].acquire() as pool:
yield pool
@pytest.fixture
def publisher(self, pyramid_request):
return realtime.Publisher(pyramid_request)
@pytest.fixture
def exchange(self):
return realtime.get_exchange()
class TestGetExchange:
def test_returns_the_exchange(self):
exchange = realtime.get_exchange()
assert isinstance(exchange, kombu.Exchange)
def test_type(self):
exchange = realtime.get_exchange()
assert exchange.type == "direct"
def test_durable(self):
exchange = realtime.get_exchange()
assert not exchange.durable
def test_delivery_mode(self):
"""Test that delivery mode is 1 (transient)."""
exchange = realtime.get_exchange()
assert exchange.delivery_mode == 1
class TestGetConnection:
def test_defaults(self, Connection):
realtime.get_connection({})
Connection.assert_called_once_with("amqp://guest:guest@localhost:5672//")
def test_returns_the_connection(self, Connection):
connection = realtime.get_connection({})
assert connection == Connection.return_value
def test_allows_to_overwrite_broker_url(self, Connection):
broker_url = "amqp://alice:bob@rabbitmq.int:5673/prj"
realtime.get_connection({"broker_url": broker_url})
Connection.assert_called_once_with(broker_url)
def test_it_adds_timeout_options_for_failfast(self, Connection):
realtime.get_connection({}, fail_fast=True)
Connection.assert_called_once_with(
Any.string(), transport_options=RETRY_POLICY_QUICK
)
@pytest.fixture
def Connection(self, patch):
return patch("h.realtime.kombu.Connection") |
298,706 | test initialize | import unittest
from unittest.mock import Mock
from AnyQt.QtWidgets import QComboBox, QCheckBox, QSpinBox, QLineEdit
from orangewidget.tests.base import GuiTest
from orangewidget.utils.visual_settings_dlg import SettingsDialog, FontList
class TestSettingsDialog(GuiTest):
def setUp(self):
self.defaults = {
"Box": {"Items": {
"P1": (["Foo", "Bar", "Baz"], "Bar"),
"P2": (range(3, 10, 2), 5),
"P3": (None, True),
"P4": (None, "Foo Bar"),
"P5": (FontList([".Foo", ".Bar"]), ".Foo"),
}}
}
self.dlg = SettingsDialog(None, self.defaults)
@property
def dialog_controls(self):
return self.dlg._SettingsDialog__controls
def METHOD_NAME(self):
controls = self.dialog_controls
self.assertEqual(len(controls), len(self.defaults["Box"]["Items"]))
self.assertIsInstance(controls[("Box", "Items", "P1")][0], QComboBox)
self.assertIsInstance(controls[("Box", "Items", "P2")][0], QSpinBox)
self.assertIsInstance(controls[("Box", "Items", "P3")][0], QCheckBox)
self.assertIsInstance(controls[("Box", "Items", "P4")][0], QLineEdit)
self.assertIsInstance(controls[("Box", "Items", "P5")][0], QComboBox)
def test_changed_settings(self):
self.dialog_controls[("Box", "Items", "P1")][0].setCurrentText("Foo")
self.dialog_controls[("Box", "Items", "P2")][0].setValue(7)
self.dialog_controls[("Box", "Items", "P3")][0].setChecked(False)
self.dialog_controls[("Box", "Items", "P4")][0].setText("Foo Baz")
self.dialog_controls[("Box", "Items", "P5")][0].setCurrentIndex(1)
changed = {("Box", "Items", "P1"): "Foo",
("Box", "Items", "P2"): 7,
("Box", "Items", "P3"): False,
("Box", "Items", "P4"): "Foo Baz",
("Box", "Items", "P5"): ".Bar"}
self.assertDictEqual(self.dlg.changed_settings, changed)
def test_reset(self):
ctrls = self.dialog_controls
ctrls[("Box", "Items", "P1")][0].setCurrentText("Foo")
ctrls[("Box", "Items", "P2")][0].setValue(7)
ctrls[("Box", "Items", "P3")][0].setChecked(False)
ctrls[("Box", "Items", "P4")][0].setText("Foo Baz")
self.dialog_controls[("Box", "Items", "P5")][0].setCurrentIndex(1)
self.dlg._SettingsDialog__reset()
self.assertDictEqual(self.dlg.changed_settings, {})
self.assertEqual(ctrls[("Box", "Items", "P1")][0].currentText(), "Bar")
self.assertEqual(ctrls[("Box", "Items", "P2")][0].value(), 5)
self.assertTrue(ctrls[("Box", "Items", "P3")][0].isChecked())
self.assertEqual(ctrls[("Box", "Items", "P4")][0].text(), "Foo Bar")
self.assertEqual(ctrls[("Box", "Items", "P5")][0].currentText(), "Foo")
def test_setting_changed(self):
handler = Mock()
self.dlg.setting_changed.connect(handler)
self.dialog_controls[("Box", "Items", "P1")][0].setCurrentText("Foo")
handler.assert_called_with(('Box', 'Items', 'P1'), "Foo")
self.dialog_controls[("Box", "Items", "P2")][0].setValue(7)
handler.assert_called_with(('Box', 'Items', 'P2'), 7)
self.dialog_controls[("Box", "Items", "P3")][0].setChecked(False)
handler.assert_called_with(('Box', 'Items', 'P3'), False)
self.dialog_controls[("Box", "Items", "P4")][0].setText("Foo Baz")
handler.assert_called_with(('Box', 'Items', 'P4'), "Foo Baz")
self.dialog_controls[("Box", "Items", "P5")][0].setCurrentIndex(1)
handler.assert_called_with(('Box', 'Items', 'P5'), ".Bar")
def test_apply_settings(self):
changed = [(("Box", "Items", "P1"), "Foo"),
(("Box", "Items", "P2"), 7),
(("Box", "Items", "P3"), False),
(("Box", "Items", "P4"), "Foo Baz"),
(("Box", "Items", "P5"), ".Bar")]
self.dlg.apply_settings(changed)
ctrls = self.dialog_controls
self.assertEqual(ctrls[("Box", "Items", "P1")][0].currentText(), "Foo")
self.assertEqual(ctrls[("Box", "Items", "P2")][0].value(), 7)
self.assertFalse(ctrls[("Box", "Items", "P3")][0].isChecked())
self.assertEqual(ctrls[("Box", "Items", "P4")][0].text(), "Foo Baz")
self.assertEqual(ctrls[("Box", "Items", "P5")][0].currentText(), "Bar")
self.assertDictEqual(self.dlg.changed_settings,
{k: v for k, v in changed})
if __name__ == '__main__':
unittest.main() |
298,707 | get json result path | # SPDX-License-Identifier: GPL-3.0-or-later
# SPDX-FileCopyrightText: Copyright contributors to the OpenScanHub project.
import glob
import json
import logging
import os
import subprocess
import tempfile
RESULT_FILE_JSON = 'scan-results.js'
RESULT_FILE_ERR = 'scan-results.err'
RESULT_FILE_HTML = 'scan-results.html'
logger = logging.getLogger(__name__)
class ResultsExtractor:
def __init__(self, path, output_dir=None, unpack_in_temp=True):
"""
path is either path to tarball or to a dir with results
"""
self.path = path
if output_dir:
self.output_dir = output_dir
elif unpack_in_temp:
self.output_dir = tempfile.mkdtemp(prefix='csmock-')
else:
self.output_dir = os.path.dirname(os.path.expanduser(path))
self._json_path = None
@property
def json_path(self):
if self._json_path is None:
self.process()
if not os.path.exists(self._json_path):
raise RuntimeError('json results do not exist: ' + self._json_path)
return self._json_path
def extract_tarball(self, exclude_patterns=None):
"""
"""
exclude_patterns = exclude_patterns or []
exclude_patterns.append("*debug") # do not unpack debug dir
# python 2 does not support lzma
command = [
'tar', '-xf', self.path,
'-C', self.output_dir,
'--wildcards',
'--wildcards-match-slash',
]
if exclude_patterns:
# do NOT quote pattern! it won't work
command += ['--exclude=' + p for p in exclude_patterns]
logger.debug('Running command %s', command)
subprocess.check_call(command)
def METHOD_NAME(self):
return self.json_path
def process(self):
""" untar results if needed """
if os.path.isdir(self.path):
self._json_path = os.path.join(self.path, RESULT_FILE_JSON)
else:
self.extract_tarball()
try:
self._json_path = glob.glob(os.path.join(self.output_dir, '*', RESULT_FILE_JSON))[0]
except IndexError:
logger.error("no results (%s) in dir %s", RESULT_FILE_JSON, self.output_dir)
self._json_path = ''
class CsmockAPI:
"""
Parser for the csmock JSON results:
{
"scan": {
"analyzer-version-clang": "15.0.7",
"analyzer-version-cppcheck": "2.4",
"analyzer-version-gcc": "12.3.1",
"analyzer-version-gcc-analyzer": "12.3.1",
"analyzer-version-shellcheck": "0.8.0",
"enabled-plugins": "clang, cppcheck, gcc, shellcheck",
"exit-code": 0,
"host": "osh-worker",
"mock-config": "fedora-37-x86_64",
"project-name": "None",
"store-results-to": "/tmp/tmp14o1xjr8/output.tar.xz",
"time-created": "2023-08-22 11:38:47",
"time-finished": "2023-08-22 11:39:08",
"tool": "csmock",
"tool-args": "'/usr/bin/csmock' '-t' 'gcc,clang,cppcheck,shellcheck' '-r' 'fedora-37-x86_64' '--no-scan' '--use-host-cppcheck' '--gcc-analyze' '-o' '/tmp/tmp14o1xjr8/output.tar.xz'",
"tool-version": "csmock-3.4.2-1.el8"
},
"defects": []
}
"""
def __init__(self, json_results_path):
"""
path -- path to results in JSON format
"""
self.json_results_path = json_results_path
self._json_result = None
@property
def json_result(self):
if self._json_result is None:
with open(self.json_results_path) as fp:
self._json_result = json.load(fp)
return self._json_result
def get_defects(self):
"""
return list of defects: csmock's output is used directly
"""
return self.json_result['defects']
def get_scan_metadata(self):
return self.json_result.get('scan', {})
def json(self):
"""
return result report from csmock as json
"""
return self.json_result
def get_analyzers(self):
"""
return analyzers used for scan, format:
[{
'name': 'analyzer1',
'version': '1.2.3'
},... ]
"""
scan = self.get_scan_metadata()
analyzers = []
for key, value in scan.items():
if key.startswith('analyzer-version-'):
# analyzer-version-[gcc]
analyzer = {'name': key[17:], 'version': value}
analyzers.append(analyzer)
return analyzers
def unpack_and_return_api(tb_path, in_dir=""):
""" convenience shortcut """
in_dir = in_dir or os.path.dirname(tb_path)
rex = ResultsExtractor(tb_path, output_dir=in_dir, unpack_in_temp=False)
try:
return CsmockAPI(rex.json_path)
except RuntimeError as ex:
logger.error('Error while creating csmock api: %s', ex)
return None |
298,708 | test model info | import os
import json
import pandas as pd
from shutil import copyfile
from nose.tools import eq_, ok_
from . import folder, TestGramex
class TestModelHandler(TestGramex):
@classmethod
def setUpClass(cls):
# Create a model and copy iris.csv to the tests/ folder
original_iris_path = os.path.join(folder, '..', 'testlib', 'iris.csv')
copyfile(original_iris_path, os.path.join(folder, 'iris.csv'))
@classmethod
def tearDownClass(cls):
for file_ in {'iris.csv', 'iris.pkl', 'iris2.pkl'}:
if os.path.exists(os.path.join(folder, file_)):
os.unlink(os.path.join(folder, file_))
def METHOD_NAME(self):
# PUT should return 200
self.check(url='/model/iris2/', method='put')
r = self.check('/model/iris2/', method='get')
info = r.json() # Get Model Info
ok_('BernoulliNB' in info['model_class'])
eq_(info['trained'], False)
def test_train_model_check_info(self):
self.check(
'/model/iris/',
method='put',
data=json.dumps(
{'url': 'iris.csv', 'model_class': 'sklearn.linear_model.SGDClassifier'}
),
request_headers={'Model-Retrain': 'True'},
)
r = self.check('/model/iris/')
info = r.json()
eq_(info['trained'], True)
eq_(info['input'], ['sepal_length', 'sepal_width', 'petal_length', 'petal_width'])
eq_(info['output'], 'species')
def test_predict(self):
data = [
{'sepal_length': 5, 'sepal_width': 3, 'petal_length': 1.5, 'petal_width': 0},
{'sepal_length': 5, 'sepal_width': 2, 'petal_length': 5.0, 'petal_width': 1},
{'sepal_length': 6, 'sepal_width': 3, 'petal_length': 4.8, 'petal_width': 2},
]
# Test individual results
self.check(
'/model/iris/',
method='put',
data=json.dumps(
{'url': 'iris.csv', 'model_class': 'sklearn.linear_model.SGDClassifier'}
),
request_headers={'Model-Retrain': 'True'},
)
# In case model not trained, train it.
single = [self.check('/model/iris/', method='post', data=json.dumps(k)) for k in data]
single_responses = [response.json()[0] for response in single]
ok_(
all(
response['result'] in {'setosa', 'versicolor', 'virginica'}
for response in single_responses
)
)
data_df = pd.DataFrame(data)
# Test multiple results
multi = self.check(
'/model/iris/', method='post', data=json.dumps(data_df.to_dict(orient='list'))
)
eq_(multi.json(), single_responses)
# This currently does not work since the model is not deterministic
# Skip eq_([result['result'] for result in multi.json()],
# ['setosa', 'versicolor', 'virginica'])
def test_predict_incomplete(self):
self.check('/model/iris/', method='post', data=json.dumps({'sepal_length': 5}), code=500)
def test_change_params_without_training(self):
self.check(
'/model/iris/',
method='put',
data=json.dumps(
{'url': 'iris.csv', 'model_class': 'sklearn.linear_model.SGDClassifier'}
),
request_headers={'Model-Retrain': 'True'},
)
# Train a model
self.check(
'/model/iris/',
method='post',
data=json.dumps({'model_class': 'sklearn.ensemble.RandomForestClassifier'}),
)
# Change a parameter
r = self.check('/model/iris/').json()
eq_(r['model_class'], 'sklearn.ensemble.RandomForestClassifier')
eq_(r['trained'], False)
def test_delete_model(self):
self.check('/model/iris/', method='delete')
ok_('iris.pkl' not in os.listdir(folder))
def test_get_training_data(self):
self.check(
'/model/iris/',
method='put',
data=json.dumps({'url': 'iris.csv'}),
request_headers={'Model-Retrain': 'True'},
)
r = self.check('/model/iris/data?').json()
training_file = pd.read_csv(os.path.join(folder, 'iris.csv'), encoding='utf-8')
eq_(len(r), len(training_file))
def test_add_training_data(self):
# Gets interpreted as string which breaks other tests
# TODO: Fix Data.py update and insert types then re-add this test
...
def test_remove_training_data(self):
# TODO: Write after test_add_training_data()
...
def test_update_training_data(self):
# TODO: Write after test_add_training_data()
... |
298,709 | stream api v2 pk too many properties | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
from unittest.mock import Mock
import pytest
from airbyte_cdk.models import ConfiguredAirbyteCatalog
from source_salesforce.api import Salesforce
from source_salesforce.source import SourceSalesforce
@pytest.fixture(autouse=True)
def time_sleep_mock(mocker):
time_mock = mocker.patch("time.sleep", lambda x: None)
yield time_mock
@pytest.fixture(scope="module")
def bulk_catalog():
with open("unit_tests/bulk_catalog.json") as f:
data = json.loads(f.read())
return ConfiguredAirbyteCatalog.parse_obj(data)
@pytest.fixture(scope="module")
def rest_catalog():
with open("unit_tests/rest_catalog.json") as f:
data = json.loads(f.read())
return ConfiguredAirbyteCatalog.parse_obj(data)
@pytest.fixture(scope="module")
def state():
state = {"Account": {"LastModifiedDate": "2021-10-01T21:18:20.000Z"}, "Asset": {"SystemModstamp": "2021-10-02T05:08:29.000Z"}}
return state
@pytest.fixture(scope="module")
def stream_config():
"""Generates streams settings for BULK logic"""
return {
"client_id": "fake_client_id",
"client_secret": "fake_client_secret",
"refresh_token": "fake_refresh_token",
"start_date": "2010-01-18T21:18:20Z",
"is_sandbox": False,
"wait_timeout": 15,
}
@pytest.fixture(scope="module")
def stream_config_date_format():
"""Generates streams settings with `start_date` in format YYYY-MM-DD"""
return {
"client_id": "fake_client_id",
"client_secret": "fake_client_secret",
"refresh_token": "fake_refresh_token",
"start_date": "2010-01-18",
"is_sandbox": False,
"wait_timeout": 15,
}
@pytest.fixture(scope="module")
def stream_config_without_start_date():
"""Generates streams settings for REST logic without start_date"""
return {
"client_id": "fake_client_id",
"client_secret": "fake_client_secret",
"refresh_token": "fake_refresh_token",
"is_sandbox": False,
"wait_timeout": 15,
}
def _stream_api(stream_config, describe_response_data=None):
sf_object = Salesforce(**stream_config)
sf_object.login = Mock()
sf_object.access_token = Mock()
sf_object.instance_url = "https://fase-account.salesforce.com"
response_data = {"fields": [{"name": "LastModifiedDate", "type": "string"}, {"name": "Id", "type": "string"}]}
if describe_response_data:
response_data = describe_response_data
sf_object.describe = Mock(return_value=response_data)
return sf_object
@pytest.fixture(scope="module")
def stream_api(stream_config):
return _stream_api(stream_config)
@pytest.fixture(scope="module")
def stream_api_v2(stream_config):
describe_response_data = {"fields": [{"name": "LastModifiedDate", "type": "string"}, {"name": "BillingAddress", "type": "address"}]}
return _stream_api(stream_config, describe_response_data=describe_response_data)
@pytest.fixture(scope="module")
def stream_api_pk(stream_config):
describe_response_data = {"fields": [{"name": "LastModifiedDate", "type": "string"}, {"name": "Id", "type": "string"}]}
return _stream_api(stream_config, describe_response_data=describe_response_data)
@pytest.fixture(scope="module")
def stream_api_v2_too_many_properties(stream_config):
describe_response_data = {
"fields": [{"name": f"Property{str(i)}", "type": "string"} for i in range(Salesforce.REQUEST_SIZE_LIMITS)]
}
describe_response_data["fields"].extend([{"name": "BillingAddress", "type": "address"}])
return _stream_api(stream_config, describe_response_data=describe_response_data)
@pytest.fixture(scope="module")
def METHOD_NAME(stream_config):
describe_response_data = {
"fields": [{"name": f"Property{str(i)}", "type": "string"} for i in range(Salesforce.REQUEST_SIZE_LIMITS)]
}
describe_response_data["fields"].extend([
{"name": "BillingAddress", "type": "address"}, {"name": "Id", "type": "string"}
])
return _stream_api(stream_config, describe_response_data=describe_response_data)
def generate_stream(stream_name, stream_config, stream_api):
return SourceSalesforce.generate_streams(stream_config, {stream_name: None}, stream_api)[0]
def encoding_symbols_parameters():
return [(x, {"Content-Type": "text/csv; charset=ISO-8859-1"}, b'"\xc4"\n,"4"\n\x00,"\xca \xfc"', [{"Ä": "4"}, {"Ä": "Ê ü"}]) for x in range(1, 11)] + [
(
x,
{"Content-Type": "text/csv; charset=utf-8"},
b'"\xd5\x80"\n "\xd5\xaf","\xd5\xaf"\n\x00,"\xe3\x82\x82 \xe3\x83\xa4 \xe3\x83\xa4 \xf0\x9d\x9c\xb5"',
[{"Հ": "կ"}, {"Հ": "も ヤ ヤ 𝜵"}],
)
for x in range(1, 11)
] + [
(
x,
{"Content-Type": "text/csv"},
b'"\xd5\x80"\n "\xd5\xaf","\xd5\xaf"\n\x00,"\xe3\x82\x82 \xe3\x83\xa4 \xe3\x83\xa4 \xf0\x9d\x9c\xb5"',
[{"Հ": "կ"}, {"Հ": "も ヤ ヤ 𝜵"}],
)
for x in range(1, 11)
] + [
(
x,
{},
b'"\xd5\x80"\n "\xd5\xaf","\xd5\xaf"\n\x00,"\xe3\x82\x82 \xe3\x83\xa4 \xe3\x83\xa4 \xf0\x9d\x9c\xb5"',
[{"Հ": "կ"}, {"Հ": "も ヤ ヤ 𝜵"}],
)
for x in range(1, 11)
] |
298,710 | u raðtala | """
Greynir: Natural language processing for Icelandic
Number parsing grammar.
Copyright (C) 2023 Miðeind ehf.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses/.
Utility module
Exposes nonterminal "URaðtala" for parsing ordinal
numbers either written in natural language or in digits,
Constructs the value of the ordinal number in result["numbers"],
and returns the ordinal number in result["ordinals"].
"""
# TODO: Support "einn fjórði" etc.
from tree import Result, ParamList, Node
from queries.util import read_utility_grammar_file
from queries.util.cardinals import (
_sum_children,
_multiply_children,
_lookup_function_generator,
)
# The context-free grammar for number utterances recognized by this utility module
GRAMMAR = read_utility_grammar_file("ordinals")
_ORDINAL_NUMBERS = {
"núllti": 0,
"fyrstur": 1,
"annar": 2,
"annars": 2,
"þriðji": 3,
"fjórði": 4,
"fimmti": 5,
"sjötti": 6,
"sjöundi": 7,
"áttundi": 8,
"níundi": 9,
"tíundi": 10,
"ellefti": 11,
"tólfti": 12,
"þrettándi": 13,
"fjórtándi": 14,
"fimmtándi": 15,
"sextándi": 16,
"sautjándi": 17,
"seytjándi": 17,
"átjándi": 18,
"nítjándi": 19,
"tuttugasti": 20,
"þrítugasti": 30,
"fertugasti": 40,
"fimmtugasti": 50,
"sextugasti": 60,
"sjötugasti": 70,
"átttugasti": 80,
"nítugasti": 90,
"hundraðasti": 100,
"hundruðasti": 100,
"þúsundasti": 1000,
"milljónasti": 10**6,
"miljarðasti": 10**9,
"milljarðasti": 10**9,
"billjónasti": 10**12,
"billjarðasti": 10**15,
"trilljónasti": 10**18,
"trilljarðasti": 10**21,
"kvaðrilljónasti": 10**24,
}
def METHOD_NAME(node: Node, params: ParamList, result: Result) -> None:
# Check if a number was specified in digits instead of written out
tala = node.first_child(lambda n: n.has_t_base("tala"))
if tala is not None and tala.contained_number is not None:
result["numbers"] = [int(tala.contained_number)]
result["ordinals"] = [result.numbers[0]]
# Plural named functions (e.g. "UTöluðRaðtalaMilljónir") take the product of the children nodes
(
UTöluðRaðtalaHundruð,
UTöluðRaðtala10Til19Hundruð,
UTöluðRaðtalaÞúsundir,
UTöluðRaðtalaMilljónir,
UTöluðRaðtalaMilljarðar,
UTöluðRaðtalaBilljónir,
UTöluðRaðtalaBilljarðar,
UTöluðRaðtalaTrilljónir,
UTöluðRaðtalaTrilljarðar,
UTöluðRaðtalaKvaðrilljónir,
UTöluðRaðtalaKvaðrilljarðar,
UTöluðRaðtalaKvintilljónir,
UTöluðRaðtalaSextilljónir,
UTöluðRaðtalaSeptilljónir,
UTöluðRaðtalaOktilljónir,
) = [_multiply_children] * 15
# "UTöluðRaðtalaUndirX" functions take the sum of the children nodes,
# along with the root "UTöluðRaðtala"
(
UTöluðRaðtala,
UTöluðRaðtalaUndirHundrað,
UTöluðRaðtalaUndirÞúsund,
UTöluðRaðtalaUndirMilljón,
UTöluðRaðtalaUndirMilljarði,
UTöluðRaðtalaUndirBilljón,
UTöluðRaðtalaUndirBilljarði,
UTöluðRaðtalaUndirTrilljón,
UTöluðRaðtalaUndirTrilljarði,
UTöluðRaðtalaUndirKvaðrilljón,
UTöluðRaðtalaUndirKvaðrilljarði,
UTöluðRaðtalaUndirKvintilljón,
UTöluðRaðtalaUndirSextilljón,
UTöluðRaðtalaUndirSeptilljón,
UTöluðRaðtalaUndirOktilljón,
) = [_sum_children] * 15
# Singular named functions (e.g. "UTöluðTalaHundrað") find the corresponding numeric value of the word
(
UTöluðRaðtala0,
UTöluðRaðtala1,
UTöluðRaðtala2Til9,
UTöluðRaðtala10Til19,
UTöluðRaðtalaTugir,
UTöluðRaðtalaHundrað,
UTöluðRaðtalaÞúsund,
UTöluðRaðtalaMilljón,
UTöluðRaðtalaMilljarður,
UTöluðRaðtalaBilljón,
UTöluðRaðtalaBilljarður,
UTöluðRaðtalaTrilljón,
UTöluðRaðtalaTrilljarður,
UTöluðRaðtalaKvaðrilljón,
UTöluðRaðtalaKvaðrilljarður,
UTöluðRaðtalaKvintilljón,
UTöluðRaðtalaSextilljón,
UTöluðRaðtalaSeptilljón,
UTöluðRaðtalaOktilljón,
) = [_lookup_function_generator(_ORDINAL_NUMBERS)] * 19 |
298,711 | root dir | #!/usr/bin/env python3
# Copyright 2023 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import json, os, platform, subprocess, sys
def load_json_config(filename):
try:
with open(filename, 'r') as f:
return json.load(f)
except IOError:
return None
os_name = platform.system()
# Find Regent.
regent_exe = os.path.realpath(__file__)
regent_dir = os.path.dirname(regent_exe)
terra_dir = os.path.join(regent_dir, 'terra')
# Find Legion (in the environment, or relative to Regent).
if 'LG_RT_DIR' in os.environ:
runtime_dir = os.path.realpath(os.environ['LG_RT_DIR'])
else:
runtime_dir = os.path.join(os.path.dirname(regent_dir), 'runtime')
bindings_dir = os.path.join(os.path.dirname(runtime_dir), 'bindings', 'regent')
python_dir = os.path.join(os.path.dirname(runtime_dir), 'bindings', 'python')
# Find CUDA.
cuda_dir = os.environ.get('CUDA') or os.environ.get('CUDA_HOME') or os.environ.get('CUDATOOLKIT_HOME')
if cuda_dir:
cuda_dir = os.path.realpath(cuda_dir)
cuda_include_dir = os.path.join(cuda_dir, 'include') if cuda_dir is not None else None
# Find HIP.
rocm_dir = os.environ.get('ROCM_PATH')
hip_dir = os.environ.get('HIP_PATH') or (os.path.join(rocm_dir, 'hip') if rocm_dir is not None else None)
hip_cub_dir = os.path.join(rocm_dir, 'hipcub') if rocm_dir is not None else None
hip_include_dir = os.path.join(hip_dir, 'include') if hip_dir is not None else None
hip_cub_include_dir = os.path.join(hip_cub_dir, 'include') if hip_cub_dir is not None else None
# Thrust only needs to be manually located with HIP, where we need an older version to work around a bug.
thrust_dir = os.environ.get('THRUST_PATH')
# Find RDIR.
if 'USE_RDIR' in os.environ:
use_rdir = os.environ['USE_RDIR']
else:
rdir_config_filename = os.path.join(regent_dir, '.rdir.json')
rdir = load_json_config(rdir_config_filename)
use_rdir = '1' if rdir in ['auto', 'manual'] else '0'
# Detect use of CMake.
if 'USE_CMAKE' in os.environ:
cmake = os.environ['USE_CMAKE'] == '1'
else:
cmake_config_filename = os.path.join(regent_dir, '.cmake.json')
cmake = load_json_config(cmake_config_filename)
cmake_build_dir = os.path.join(regent_dir, 'build')
legion_install_prefix_filename = os.path.join(regent_dir, '.legion_install_prefix.json')
legion_install_prefix = None
if os.path.exists(legion_install_prefix_filename):
legion_install_prefix = load_json_config(legion_install_prefix_filename)
include_path = (
(os.environ['INCLUDE_PATH'].split(';')
if 'INCLUDE_PATH' in os.environ else []) +
[bindings_dir,
runtime_dir,
] +
([os.path.join(cmake_build_dir, 'runtime')] if cmake else []) +
([os.path.join(legion_install_prefix, 'include')] if legion_install_prefix is not None else []))
if cuda_include_dir is not None:
include_path.append(cuda_include_dir)
if hip_include_dir is not None:
include_path.append(hip_include_dir)
if hip_cub_include_dir is not None:
include_path.append(hip_cub_include_dir)
# per runtime/runtime.mk, has to go ahead of HIP_PATH
if thrust_dir is not None:
include_path.insert(0, thrust_dir)
LD_LIBRARY_PATH = 'LD_LIBRARY_PATH'
if os_name == 'Darwin':
LD_LIBRARY_PATH = 'DYLD_LIBRARY_PATH'
lib_path = (
(os.environ[LD_LIBRARY_PATH].split(':')
if LD_LIBRARY_PATH in os.environ else []) +
[os.path.join(terra_dir, 'build')])
if legion_install_prefix is not None:
lib_path += [os.path.join(legion_install_prefix, 'lib')]
elif cmake:
lib_path += [os.path.join(cmake_build_dir, 'lib')]
else:
lib_path += [bindings_dir]
def METHOD_NAME():
return os.path.dirname(runtime_dir)
def regent(args, env={}, cwd=None, **kwargs):
terra_exe = os.path.join(terra_dir, 'terra')
if not os.path.exists(terra_exe):
terra_exe = os.path.join(terra_dir, 'bin', 'terra')
if not os.path.exists(terra_exe):
terra_exe = os.path.join(terra_dir, 'release', 'bin', 'terra')
if 'TERRA_PATH' in os.environ:
terra_path = os.environ['TERRA_PATH'].split(';')
else:
terra_path = []
if 'PYTHONPATH' in os.environ:
python_path = os.environ['PYTHONPATH'].split(':')
else:
python_path = []
normal_args = [arg for arg in args if not arg.startswith('-')]
first_arg = None
if len(normal_args) >= 1:
first_arg = os.path.realpath(
os.path.join(cwd, normal_args[0]) if cwd is not None else normal_args[0])
terra_path += (
['?.t', '?.rg'] +
([os.path.join(os.path.dirname(first_arg), '?.t'),
os.path.join(os.path.dirname(first_arg), '?.rg')]
if first_arg is not None and os.path.exists(first_arg) else []) +
[os.path.join(regent_dir, 'src', '?.t'),
os.path.join(regent_dir, 'src', '?.rg'),
os.path.join(regent_dir, 'src', 'rdir', 'plugin', 'src', '?.t'),
os.path.join(terra_dir, 'tests', 'lib', '?.t'),
os.path.join(terra_dir, 'release', 'include', '?.t'),
os.path.join(bindings_dir, '?.t')])
if first_arg is not None:
python_path.append(os.path.dirname(first_arg))
python_path.append(python_dir)
terra_env = {
'REGENT': regent_exe,
'TERRA_PATH': ';'.join(terra_path),
LD_LIBRARY_PATH: ':'.join(lib_path),
'INCLUDE_PATH': ';'.join(include_path),
'PYTHONPATH': ':'.join(python_path),
'LG_RT_DIR': runtime_dir,
'USE_CMAKE': '1' if cmake else '0',
'CMAKE_BUILD_DIR': cmake_build_dir,
'USE_RDIR': use_rdir,
}
if legion_install_prefix is not None:
terra_env['LEGION_INSTALL_PREFIX'] = legion_install_prefix
if cuda_dir is not None:
terra_env['CUDA_HOME'] = cuda_dir
cmd = []
if 'LAUNCHER' in os.environ:
cmd = cmd + (os.environ['LAUNCHER'].split()
if 'LAUNCHER' in os.environ else [])
cmd = cmd + [terra_exe] + args
cmd_env = dict(os.environ.items())
cmd_env.update(terra_env)
cmd_env.update(env)
try:
return subprocess.Popen(
cmd, env=cmd_env, cwd=cwd, **kwargs)
except OSError:
print('Command failed: %s' % cmd, file=sys.stderr)
sys.stderr.flush()
raise
if __name__ == '__main__':
sys.exit(regent(sys.argv[1:]).wait()) |
298,712 | test create counterpart | ##########################################################################
#
# Copyright (c) 2023, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import Gaffer
import GafferTest
class OptionalValuePlugTest( GafferTest.TestCase ) :
def testConstruction( self ) :
valuePlug = Gaffer.IntPlug()
plug = Gaffer.OptionalValuePlug( "name", valuePlug, True )
self.assertEqual( plug.getName(), "name" )
self.assertEqual( plug.direction(), Gaffer.Plug.Direction.In )
self.assertEqual( len( plug ), 2 )
self.assertIsInstance( plug["enabled"], Gaffer.BoolPlug )
self.assertEqual( plug["enabled"].defaultValue(), True )
self.assertEqual( plug["enabled"].direction(), Gaffer.Plug.Direction.In )
self.assertTrue( plug["enabled"].isSame( plug[0] ) )
self.assertTrue( plug["value"].isSame( valuePlug ) )
self.assertTrue( plug["value"].isSame( plug[1] ) )
def testAcceptsChild( self ) :
plug = Gaffer.OptionalValuePlug( "name", Gaffer.IntPlug() )
self.assertFalse( plug.acceptsChild( Gaffer.IntPlug() ) )
def METHOD_NAME( self ) :
plug = Gaffer.OptionalValuePlug( "name", Gaffer.IntPlug() )
plug["enabled"].setValue( True ) # Current values should be ignored by
plug["value"].setValue( 10 ) # `createCounterpart()`.
for direction in ( Gaffer.Plug.Direction.In, Gaffer.Plug.Direction.Out ) :
with self.subTest( direction = direction ) :
plug2 = plug.createCounterpart( "counter", direction )
self.assertEqual( plug2.direction(), direction )
self.assertTrue( plug2.isSetToDefault() )
self.assertEqual( plug2["enabled"].direction(), direction )
self.assertEqual( plug2["enabled"].defaultValue(), plug["enabled"].defaultValue() )
self.assertEqual( plug2["value"].direction(), direction )
self.assertIsInstance( plug2["value"], Gaffer.IntPlug )
self.assertEqual( plug2["value"].defaultValue(), plug["value"].defaultValue() )
def testSerialisation( self ) :
script = Gaffer.ScriptNode()
script["node"] = Gaffer.Node()
script["node"]["user"]["p"] = Gaffer.OptionalValuePlug( valuePlug = Gaffer.IntPlug(), enabledPlugDefaultValue = True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["node"]["user"]["p"]["enabled"].setValue( False )
script["node"]["user"]["p"]["value"].setValue( 10 )
script2 = Gaffer.ScriptNode()
script2.execute( script.serialise() )
self.assertEqual( script2["node"]["user"]["p"]["enabled"].defaultValue(), script["node"]["user"]["p"]["enabled"].defaultValue() )
self.assertEqual( script2["node"]["user"]["p"]["enabled"].getValue(), script["node"]["user"]["p"]["enabled"].getValue() )
self.assertEqual( script2["node"]["user"]["p"]["value"].defaultValue(), script["node"]["user"]["p"]["value"].defaultValue() )
self.assertEqual( script2["node"]["user"]["p"]["value"].getValue(), script["node"]["user"]["p"]["value"].getValue() )
if __name__ == "__main__":
unittest.main() |
298,713 | tags | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetQueryPackResult',
'AwaitableGetQueryPackResult',
'get_query_pack',
'get_query_pack_output',
]
@pulumi.output_type
class GetQueryPackResult:
"""
An Log Analytics QueryPack definition.
"""
def __init__(__self__, id=None, location=None, name=None, provisioning_state=None, query_pack_id=None, METHOD_NAME=None, time_created=None, time_modified=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if query_pack_id and not isinstance(query_pack_id, str):
raise TypeError("Expected argument 'query_pack_id' to be a str")
pulumi.set(__self__, "query_pack_id", query_pack_id)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", METHOD_NAME)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
if time_modified and not isinstance(time_modified, str):
raise TypeError("Expected argument 'time_modified' to be a str")
pulumi.set(__self__, "time_modified", time_modified)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Current state of this QueryPack: whether or not is has been provisioned within the resource group it is defined. Users cannot change this value but are able to read from it. Values will include Succeeded, Deploying, Canceled, and Failed.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="queryPackId")
def query_pack_id(self) -> str:
"""
The unique ID of your application. This field cannot be changed.
"""
return pulumi.get(self, "query_pack_id")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
Creation Date for the Log Analytics QueryPack, in ISO 8601 format.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeModified")
def time_modified(self) -> str:
"""
Last modified date of the Log Analytics QueryPack, in ISO 8601 format.
"""
return pulumi.get(self, "time_modified")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetQueryPackResult(GetQueryPackResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetQueryPackResult(
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
query_pack_id=self.query_pack_id,
METHOD_NAME=self.METHOD_NAME,
time_created=self.time_created,
time_modified=self.time_modified,
type=self.type)
def get_query_pack(query_pack_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetQueryPackResult:
"""
Returns a Log Analytics QueryPack.
:param str query_pack_name: The name of the Log Analytics QueryPack resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['queryPackName'] = query_pack_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:operationalinsights/v20190901preview:getQueryPack', __args__, opts=opts, typ=GetQueryPackResult).value
return AwaitableGetQueryPackResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
query_pack_id=pulumi.get(__ret__, 'query_pack_id'),
METHOD_NAME=pulumi.get(__ret__, 'tags'),
time_created=pulumi.get(__ret__, 'time_created'),
time_modified=pulumi.get(__ret__, 'time_modified'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_query_pack)
def get_query_pack_output(query_pack_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetQueryPackResult]:
"""
Returns a Log Analytics QueryPack.
:param str query_pack_name: The name of the Log Analytics QueryPack resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
298,714 | configure underlying network | from lnst.Controller import HostReq, DeviceReq, RecipeParam
from lnst.Common.IpAddress import (
AF_INET,
Ip4Address,
Ip6Address,
interface_addresses,
)
from lnst.Common.Parameters import (
Param,
IPv4NetworkParam,
)
from lnst.Devices import GreDevice, MacvlanDevice
from lnst.RecipeCommon.Ping.PingEndpoints import PingEndpoints
from lnst.RecipeCommon.PacketAssert import PacketAssertConf
from lnst.Recipes.ENRT.BaseTunnelRecipe import BaseTunnelRecipe
from lnst.Recipes.ENRT.ConfigMixins.MTUHWConfigMixin import (
MTUHWConfigMixin,
)
from lnst.Recipes.ENRT.ConfigMixins.OffloadSubConfigMixin import (
OffloadSubConfigMixin,
)
from lnst.Recipes.ENRT.ConfigMixins.PauseFramesHWConfigMixin import (
PauseFramesHWConfigMixin,
)
class GreTunnelOverMacvlanRecipe(
MTUHWConfigMixin, PauseFramesHWConfigMixin, OffloadSubConfigMixin, BaseTunnelRecipe
):
"""
This class implements a recipe that configures a GRE tunnel between
two hosts that are connected through a macvlan device.
.. code-block:: none
.--------.
.------| switch |-----.
| '--------' |
| |
.-------|------. .-------|------.
| .--'-. | | .--'-. |
| |eth0| | | |eth0| |
| '----' | | '----' |
| | | | | |
| macvlan0 | | macvlan0 |
| | | | | | | |
| | | | | | | |
| ----' '--- | | ----' '--- |
| gre tunnel | | gre tunnel |
| ---------- | | ---------- |
| | | |
| host1 | | host2 |
'--------------' '--------------'
The actual test machinery is implemented in the :any:`BaseEnrtRecipe` class.
The test wide configuration is implemented in the :any:`BaseTunnelRecipe`
class.
"""
host1 = HostReq()
host1.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver"))
host2 = HostReq()
host2.eth0 = DeviceReq(label="net1", driver=RecipeParam("driver"))
offload_combinations = Param(
default=(
dict(gro="on", gso="on", tso="on"),
dict(gro="off", gso="on", tso="on"),
dict(gro="on", gso="off", tso="off"),
dict(gro="on", gso="on", tso="off"),
)
)
net_ipv4 = IPv4NetworkParam(default="192.168.101.0/24")
def METHOD_NAME(self, configuration):
"""
The underlying network for the tunnel consists of two MACVLAN
devices configured on top of the Ethernet devices on the matched hosts.
"""
host1, host2 = self.matched.host1, self.matched.host2
host1.macvlan10 = MacvlanDevice(realdev=host1.eth0, hwaddr="0A:00:00:00:00:01")
host2.macvlan10 = MacvlanDevice(realdev=host2.eth0, hwaddr="0A:00:00:00:00:02")
ipv4_addr = interface_addresses(self.params.net_ipv4)
for device in [host1.macvlan10, host2.macvlan10]:
device.ip_add(next(ipv4_addr))
configuration.test_wide_devices.append(device)
for dev in [
host1.eth0,
host1.macvlan10,
host2.eth0,
host2.macvlan10,
]:
dev.up()
configuration.tunnel_endpoints = (host1.macvlan10, host2.macvlan10)
def create_tunnel(self, configuration):
"""
The GRE tunnel devices are configured with local and remote ip addresses
matching the MACVLAN device IP addresses.
The GRE tunnel devices are configured with IPv4 and IPv6 addresses
of individual networks. Routes are configured accordingly.
"""
endpoint1, endpoint2 = configuration.tunnel_endpoints
m1 = endpoint1.netns
m2 = endpoint2.netns
ip_filter = {"family": AF_INET}
endpoint1_ip = endpoint1.ips_filter(**ip_filter)[0]
endpoint2_ip = endpoint2.ips_filter(**ip_filter)[0]
a_ip4 = Ip4Address("192.168.6.2/24")
a_net4 = "192.168.6.0/24"
b_ip4 = Ip4Address("192.168.7.2/24")
b_net4 = "192.168.7.0/24"
a_ip6 = Ip6Address("6001:db8:ac10:fe01::2/64")
a_net6 = "6001:db8:ac10:fe01::0/64"
b_ip6 = Ip6Address("7001:db8:ac10:fe01::2/64")
b_net6 = "7001:db8:ac10:fe01::0/64"
m1.gre_tunnel = GreDevice(local=endpoint1_ip, remote=endpoint2_ip)
m2.gre_tunnel = GreDevice(local=endpoint2_ip, remote=endpoint1_ip)
# A
m1.gre_tunnel.up()
m1.gre_tunnel.ip_add(a_ip4)
m1.gre_tunnel.ip_add(a_ip6)
m1.run("ip -4 route add {} dev {}".format(b_net4, m1.gre_tunnel.name))
m1.run("ip -6 route add {} dev {}".format(b_net6, m1.gre_tunnel.name))
# B
m2.gre_tunnel.up()
m2.gre_tunnel.ip_add(b_ip4)
m2.gre_tunnel.ip_add(b_ip6)
m2.run("ip -4 route add {} dev {}".format(a_net4, m2.gre_tunnel.name))
m2.run("ip -6 route add {} dev {}".format(a_net6, m2.gre_tunnel.name))
configuration.tunnel_devices.extend([m1.gre_tunnel, m2.gre_tunnel])
self.wait_tentative_ips(configuration.tunnel_devices)
def generate_ping_endpoints(self, config):
"""
The ping endpoints for this recipe are simply the tunnel endpoints
Returned as::
[PingEndpoints(self.matched.host1.gre_tunnel, self.matched.host2.gre_tunnel)]
"""
return [
PingEndpoints(self.matched.host1.gre_tunnel, self.matched.host2.gre_tunnel)
]
def get_packet_assert_config(self, ping_config):
"""
The packet assert test configuration contains filter for gre protocol
and grep patterns to match the ICMP or ICMP6 echo requests.
"""
ip_filter = {"family": AF_INET}
m1_carrier = self.matched.host1.macvlan10
m2_carrier = self.matched.host2.macvlan10
m1_carrier_ip = m1_carrier.ips_filter(**ip_filter)[0]
m2_carrier_ip = m2_carrier.ips_filter(**ip_filter)[0]
ip1 = ping_config.client_bind
ip2 = ping_config.destination_address
pa_kwargs = {}
pa_kwargs["p_filter"] = "proto gre"
if isinstance(ip2, Ip4Address):
pat1 = "{} > {}: GREv0, .* IP {} > {}: ICMP echo request".format(
m1_carrier_ip, m2_carrier_ip, ip1, ip2
)
pat2 = "{} > {}: GREv0 \| {} > {}: ICMP echo request".format(
m1_carrier_ip, m2_carrier_ip, ip1, ip2
)
grep_pattern = ["({})|({})".format(pat1, pat2)]
elif isinstance(ip2, Ip6Address):
pat1 = "{} > {}: GREv0, .* IP6 {} > {}: ICMP6, echo request".format(
m1_carrier_ip, m2_carrier_ip, ip1, ip2
)
pat2 = "{} > {}: GREv0 \| {} > {}: ICMP6, echo request".format(
m1_carrier_ip, m2_carrier_ip, ip1, ip2
)
grep_pattern = ["({})|({})".format(pat1, pat2)]
else:
raise Exception("The destination address is nor IPv4 or IPv6 address")
pa_kwargs["grep_for"] = grep_pattern
if ping_config.count:
pa_kwargs["p_min"] = ping_config.count
m2 = ping_config.destination
pa_config = PacketAssertConf(m2, m2_carrier, **pa_kwargs)
return pa_config
@property
def offload_nics(self):
return [self.matched.host1.eth0, self.matched.host2.eth0]
@property
def pause_frames_dev_list(self):
return [self.matched.host1.eth0, self.matched.host2.eth0]
@property
def mtu_hw_config_dev_list(self):
return [self.matched.host1.gre_tunnel, self.matched.host2.gre_tunnel] |
298,715 | component specs | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An Optional type for representing potentially missing values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.data.util import structure
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.Optional")
@six.add_metaclass(abc.ABCMeta)
class Optional(composite_tensor.CompositeTensor):
"""Wraps a value that may/may not be present at runtime.
An `Optional` can represent the result of an operation that may fail as a
value, rather than raising an exception and halting execution. For example,
`tf.data.experimental.get_next_as_optional` returns an `Optional` that either
contains the next value from a `tf.compat.v1.data.Iterator` if one exists, or
a "none" value that indicates the end of the sequence has been reached.
`Optional` can only be used by values that are convertible to `Tensor` or
`CompositeTensor`.
"""
@abc.abstractmethod
def has_value(self, name=None):
"""Returns a tensor that evaluates to `True` if this optional has a value.
Args:
name: (Optional.) A name for the created operation.
Returns:
A scalar `tf.Tensor` of type `tf.bool`.
"""
raise NotImplementedError("Optional.has_value()")
@abc.abstractmethod
def get_value(self, name=None):
"""Returns the value wrapped by this optional.
If this optional does not have a value (i.e. `self.has_value()` evaluates
to `False`), this operation will raise `tf.errors.InvalidArgumentError`
at runtime.
Args:
name: (Optional.) A name for the created operation.
Returns:
The wrapped value.
"""
raise NotImplementedError("Optional.get_value()")
@abc.abstractproperty
def value_structure(self):
"""The structure of the components of this optional.
Returns:
A `Structure` object representing the structure of the components of this
optional.
"""
raise NotImplementedError("Optional.value_structure")
@staticmethod
def from_value(value):
"""Returns an `Optional` that wraps the given value.
Args:
value: A value to wrap. The value must be convertible to `Tensor` or
`CompositeTensor`.
Returns:
An `Optional` that wraps `value`.
"""
with ops.name_scope("optional") as scope:
with ops.name_scope("value"):
value_structure = structure.type_spec_from_value(value)
encoded_value = structure.to_tensor_list(value_structure, value)
return _OptionalImpl(
gen_dataset_ops.optional_from_value(encoded_value, name=scope),
value_structure)
@staticmethod
def none_from_structure(value_structure):
"""Returns an `Optional` that has no value.
NOTE: This method takes an argument that defines the structure of the value
that would be contained in the returned `Optional` if it had a value.
Args:
value_structure: A `Structure` object representing the structure of the
components of this optional.
Returns:
An `Optional` that has no value.
"""
return _OptionalImpl(gen_dataset_ops.optional_none(), value_structure)
class _OptionalImpl(Optional):
"""Concrete implementation of `tf.data.experimental.Optional`.
NOTE(mrry): This implementation is kept private, to avoid defining
`Optional.__init__()` in the public API.
"""
def __init__(self, variant_tensor, value_structure):
self._variant_tensor = variant_tensor
self._value_structure = value_structure
def has_value(self, name=None):
return gen_dataset_ops.optional_has_value(self._variant_tensor, name=name)
def get_value(self, name=None):
# TODO(b/110122868): Consolidate the restructuring logic with similar logic
# in `Iterator.get_next()` and `StructuredFunctionWrapper`.
with ops.name_scope(name, "OptionalGetValue",
[self._variant_tensor]) as scope:
return structure.from_tensor_list(
self._value_structure,
gen_dataset_ops.optional_get_value(
self._variant_tensor,
name=scope,
output_types=structure.get_flat_tensor_types(
self._value_structure),
output_shapes=structure.get_flat_tensor_shapes(
self._value_structure)))
@property
def value_structure(self):
return self._value_structure
@property
def _type_spec(self):
return OptionalSpec.from_value(self)
@tf_export(
"OptionalSpec", v1=["OptionalSpec", "data.experimental.OptionalStructure"])
class OptionalSpec(type_spec.TypeSpec):
"""Represents an optional potentially containing a structured value."""
__slots__ = ["_value_structure"]
def __init__(self, value_structure):
self._value_structure = value_structure
@property
def value_type(self):
return _OptionalImpl
def _serialize(self):
return (self._value_structure,)
@property
def METHOD_NAME(self):
return [tensor_spec.TensorSpec((), dtypes.variant)]
def _to_components(self, value):
return [value._variant_tensor] # pylint: disable=protected-access
def _from_components(self, flat_value):
# pylint: disable=protected-access
return _OptionalImpl(flat_value[0], self._value_structure)
@staticmethod
def from_value(value):
return OptionalSpec(value.value_structure)
def _to_legacy_output_types(self):
return self
def _to_legacy_output_shapes(self):
return self
def _to_legacy_output_classes(self):
return self |
298,716 | run games and record payoffs | # Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for computing gradient information: run games and record payoffs.
"""
import itertools
from absl import logging # pylint:disable=unused-import
import numpy as np
def construct_game_queries(base_profile, num_checkpts):
"""Constructs a list of checkpoint selection tuples to query value function.
Each query tuple (key, query) where key = (pi, pj) and query is
(p1's selected checkpt, ..., p7's selected checkpt) fixes the players in the
game of diplomacy to be played. It may be necessary to play several games with
the same players to form an accurate estimate of the value or payoff for each
player as checkpts contain stochastic policies.
Args:
base_profile: list of selected checkpts for each player, i.e.,
a sample from the player strategy profile ([x_i ~ p(x_i)])
num_checkpts: list of ints, number of strats (or ckpts) per player
Returns:
Set of query tuples containing a selected checkpoint index for each player.
"""
new_queries = set([])
num_players = len(base_profile)
for pi, pj in itertools.combinations(range(num_players), 2):
new_profile = list(base_profile)
for ai in range(num_checkpts[pi]):
new_profile[pi] = ai
for aj in range(num_checkpts[pj]):
new_profile[pj] = aj
query = tuple(new_profile)
pair = (pi, pj)
new_queries.update([(pair, query)])
return new_queries
def construct_game_queries_for_exp(base_profile, num_checkpts):
"""Constructs a list of checkpoint selection tuples to query value function.
Each query tuple (key, query) where key = (pi,) and query is
(p1's selected checkpt, ..., p7's selected checkpt) fixes the players in the
game of diplomacy to be played. It may be necessary to play several games with
the same players to form an accurate estimate of the value or payoff for each
player as checkpts contain stochastic policies.
Args:
base_profile: list of selected checkpts for each player, i.e.,
a sample from the player strategy profile ([x_i ~ p(x_i)])
num_checkpts: list of ints, number of strats (or ckpts) per player
Returns:
Set of query tuples containing a selected checkpoint index for each player.
"""
new_queries = set([])
num_players = len(base_profile)
for pi in range(num_players):
new_profile = list(base_profile)
for ai in range(num_checkpts[pi]):
new_profile[pi] = ai
query = tuple(new_profile)
new_queries.update([(pi, query)])
return new_queries
def METHOD_NAME(game_queries, evaluate_game, ckpt_to_policy):
"""Simulate games according to game queries and return results.
Args:
game_queries: set of tuples containing indices specifying each players strat
key_query = (agent_tuple, profile_tuple) format
evaluate_game: callable function that takes a list of policies as argument
ckpt_to_policy: list of maps from strat (or checkpoint) to a policy, one
map for each player
Returns:
dictionary: key=key_query, value=np.array of payoffs (1 for each player)
"""
game_results = {}
for key_query in game_queries:
_, query = key_query
policies = [ckpt_to_policy[pi][ckpt_i] for pi, ckpt_i in enumerate(query)]
payoffs = evaluate_game(policies)
game_results.update({key_query: payoffs})
return game_results
def form_payoff_matrices(game_results, num_checkpts):
"""Packages dictionary of game results into a payoff tensor.
Args:
game_results: dictionary of payoffs for each game evaluated, keys are
(pair, profile) where pair is a tuple of the two agents played against
each other and profile indicates pure joint action played by all agents
num_checkpts: list of ints, number of strats (or ckpts) per player
Returns:
payoff_matrices: dict of np.arrays (2 x num_checkpts x num_checkpts) with
payoffs for two players. keys are pairs above with lowest index agent
first
"""
num_players = len(num_checkpts)
payoff_matrices = {}
for pi, pj in itertools.combinations(range(num_players), 2):
key = (pi, pj)
payoff_matrices[key] = np.zeros((2, num_checkpts[pi], num_checkpts[pj]))
for key_profile, payoffs in game_results.items():
key, profile = key_profile
i, j = key
ai = profile[i]
aj = profile[j]
payoff_matrices[key][0, ai, aj] = payoffs[i]
payoff_matrices[key][1, ai, aj] = payoffs[j]
return payoff_matrices |
298,717 | test global ndarray | import numpy as np
from numba import jit, njit, errors
from numba.extending import register_jitable
from numba.tests import usecases
import unittest
X = np.arange(10)
def global_ndarray_func(x):
y = x + X.shape[0]
return y
# Create complex array with real and imaginary parts of distinct value
cplx_X = np.arange(10, dtype=np.complex128)
tmp = np.arange(10, dtype=np.complex128)
cplx_X += (tmp+10)*1j
def global_cplx_arr_copy(a):
for i in range(len(a)):
a[i] = cplx_X[i]
# Create a recarray with fields of distinct value
x_dt = np.dtype([('a', np.int32), ('b', np.float32)])
rec_X = np.recarray(10, dtype=x_dt)
for i in range(len(rec_X)):
rec_X[i].a = i
rec_X[i].b = i + 0.5
def global_rec_arr_copy(a):
for i in range(len(a)):
a[i] = rec_X[i]
def global_rec_arr_extract_fields(a, b):
for i in range(len(a)):
a[i] = rec_X[i].a
b[i] = rec_X[i].b
# Create additional global recarray
y_dt = np.dtype([('c', np.int16), ('d', np.float64)])
rec_Y = np.recarray(10, dtype=y_dt)
for i in range(len(rec_Y)):
rec_Y[i].c = i + 10
rec_Y[i].d = i + 10.5
def global_two_rec_arrs(a, b, c, d):
for i in range(len(a)):
a[i] = rec_X[i].a
b[i] = rec_X[i].b
c[i] = rec_Y[i].c
d[i] = rec_Y[i].d
# Test a global record
record_only_X = np.recarray(1, dtype=x_dt)[0]
record_only_X.a = 1
record_only_X.b = 1.5
@jit(nopython=True)
def global_record_func(x):
return x.a == record_only_X.a
@jit(nopython=True)
def global_module_func(x, y):
return usecases.andornopython(x, y)
# Test a global tuple
tup_int = (1, 2)
tup_str = ('a', 'b')
tup_mixed = (1, 'a')
tup_float = (1.2, 3.5)
tup_npy_ints = (np.uint64(12), np.int8(3))
tup_tup_array = ((np.ones(5),),)
mixed_tup_tup_array = (('Z', np.ones(5),), 2j, 'A')
def global_int_tuple():
return tup_int[0] + tup_int[1]
def global_str_tuple():
return tup_str[0] + tup_str[1]
def global_mixed_tuple():
idx = tup_mixed[0]
field = tup_mixed[1]
return rec_X[idx][field]
def global_float_tuple():
return tup_float[0] + tup_float[1]
def global_npy_int_tuple():
return tup_npy_ints[0] + tup_npy_ints[1]
def global_write_to_arr_in_tuple():
tup_tup_array[0][0][0] = 10.
def global_write_to_arr_in_mixed_tuple():
mixed_tup_tup_array[0][1][0] = 10.
_glbl_np_bool_T = np.bool_(True)
_glbl_np_bool_F = np.bool_(False)
@register_jitable # consumer function
def _sink(*args):
pass
def global_npy_bool():
_sink(_glbl_np_bool_T, _glbl_np_bool_F)
return _glbl_np_bool_T, _glbl_np_bool_F
class TestGlobals(unittest.TestCase):
def check_global_ndarray(self, **jitargs):
# (see github issue #448)
ctestfunc = jit(**jitargs)(global_ndarray_func)
self.assertEqual(ctestfunc(1), 11)
def METHOD_NAME(self):
# This also checks we can access an unhashable global value
# (see issue #697)
self.check_global_ndarray(forceobj=True)
def test_global_ndarray_npm(self):
self.check_global_ndarray(nopython=True)
def check_global_complex_arr(self, **jitargs):
# (see github issue #897)
ctestfunc = jit(**jitargs)(global_cplx_arr_copy)
arr = np.zeros(len(cplx_X), dtype=np.complex128)
ctestfunc(arr)
np.testing.assert_equal(arr, cplx_X)
def test_global_complex_arr(self):
self.check_global_complex_arr(forceobj=True)
def test_global_complex_arr_npm(self):
self.check_global_complex_arr(nopython=True)
def check_global_rec_arr(self, **jitargs):
# (see github issue #897)
ctestfunc = jit(**jitargs)(global_rec_arr_copy)
arr = np.zeros(rec_X.shape, dtype=x_dt)
ctestfunc(arr)
np.testing.assert_equal(arr, rec_X)
def test_global_rec_arr(self):
self.check_global_rec_arr(forceobj=True)
def test_global_rec_arr_npm(self):
self.check_global_rec_arr(nopython=True)
def check_global_rec_arr_extract(self, **jitargs):
# (see github issue #897)
ctestfunc = jit(**jitargs)(global_rec_arr_extract_fields)
arr1 = np.zeros(rec_X.shape, dtype=np.int32)
arr2 = np.zeros(rec_X.shape, dtype=np.float32)
ctestfunc(arr1, arr2)
np.testing.assert_equal(arr1, rec_X.a)
np.testing.assert_equal(arr2, rec_X.b)
def test_global_rec_arr_extract(self):
self.check_global_rec_arr_extract(forceobj=True)
def test_global_rec_arr_extract_npm(self):
self.check_global_rec_arr_extract(nopython=True)
def check_two_global_rec_arrs(self, **jitargs):
# (see github issue #897)
ctestfunc = jit(**jitargs)(global_two_rec_arrs)
arr1 = np.zeros(rec_X.shape, dtype=np.int32)
arr2 = np.zeros(rec_X.shape, dtype=np.float32)
arr3 = np.zeros(rec_Y.shape, dtype=np.int16)
arr4 = np.zeros(rec_Y.shape, dtype=np.float64)
ctestfunc(arr1, arr2, arr3, arr4)
np.testing.assert_equal(arr1, rec_X.a)
np.testing.assert_equal(arr2, rec_X.b)
np.testing.assert_equal(arr3, rec_Y.c)
np.testing.assert_equal(arr4, rec_Y.d)
def test_two_global_rec_arrs(self):
self.check_two_global_rec_arrs(forceobj=True)
def test_two_global_rec_arrs_npm(self):
self.check_two_global_rec_arrs(nopython=True)
def test_global_module(self):
# (see github issue #1059)
res = global_module_func(5, 6)
self.assertEqual(True, res)
def test_global_record(self):
# (see github issue #1081)
x = np.recarray(1, dtype=x_dt)[0]
x.a = 1
res = global_record_func(x)
self.assertEqual(True, res)
x.a = 2
res = global_record_func(x)
self.assertEqual(False, res)
def test_global_int_tuple(self):
pyfunc = global_int_tuple
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(), jitfunc())
def test_global_str_tuple(self):
pyfunc = global_str_tuple
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(), jitfunc())
def test_global_mixed_tuple(self):
pyfunc = global_mixed_tuple
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(), jitfunc())
def test_global_float_tuple(self):
pyfunc = global_float_tuple
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(), jitfunc())
def test_global_npy_int_tuple(self):
pyfunc = global_npy_int_tuple
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(), jitfunc())
def test_global_write_to_arr_in_tuple(self):
# Test writing to an array in a global tuple
# See issue https://github.com/numba/numba/issues/7120
for func in (global_write_to_arr_in_tuple,
global_write_to_arr_in_mixed_tuple):
jitfunc = njit(func)
with self.assertRaises(errors.TypingError) as e:
jitfunc()
msg = "Cannot modify readonly array of type:"
self.assertIn(msg, str(e.exception))
def test_global_npy_bool(self):
# Test global NumPy bool
# See issue https://github.com/numba/numba/issues/6979
pyfunc = global_npy_bool
jitfunc = njit(pyfunc)
self.assertEqual(pyfunc(), jitfunc())
if __name__ == '__main__':
unittest.main() |
298,718 | test log clear | import logging
from click.testing import CliRunner
from maestral.main import Maestral
from maestral.cli import main
from maestral.autostart import AutoStart
from maestral.notify import level_number_to_name, level_name_to_number
from maestral.daemon import MaestralProxy, start_maestral_daemon_process, Start
from maestral.logging import scoped_logger
from maestral.utils.appdirs import get_log_path
TEST_TIMEOUT = 60
def test_help() -> None:
"""Test help output without args and with --help arg."""
runner = CliRunner()
result_no_arg = runner.invoke(main)
result_help_arg = runner.invoke(main, ["--help"])
assert result_no_arg.exit_code == 0, result_no_arg.output
assert result_no_arg.output.startswith("Usage: main [OPTIONS] COMMAND [ARGS]")
assert result_no_arg.output == result_help_arg.output
def test_invalid_config() -> None:
"""Test failure of commands that require an existing config file"""
for command in [
("stop",),
("pause",),
("resume",),
("auth", "status"),
("auth", "unlink"),
("sharelink", "create"),
("sharelink", "list"),
("sharelink", "revoke"),
("status",),
("filestatus",),
("activity",),
("history",),
("ls",),
("autostart",),
("excluded", "add"),
("excluded", "list"),
("excluded", "remove"),
("notify", "level"),
("notify", "snooze"),
("move-dir",),
("rebuild-index",),
("revs",),
("diff",),
("restore",),
("log", "level"),
("log", "clear"),
("log", "show"),
("config", "get", "path"),
("config", "set", "path"),
("config", "show"),
]:
runner = CliRunner()
result = runner.invoke(main, [*command, "-c", "non-existent-config"])
assert result.exit_code == 1, command
assert (
result.output == "! Configuration 'non-existent-config' does not exist. "
"Use 'maestral config-files' to list all configurations.\n"
)
def test_start_already_running(config_name: str) -> None:
res = start_maestral_daemon_process(config_name, timeout=TEST_TIMEOUT)
assert res is Start.Ok
runner = CliRunner()
result = runner.invoke(main, ["start", "-c", config_name])
assert result.exit_code == 0, result.output
assert "already running" in result.output
def test_stop(config_name: str) -> None:
res = start_maestral_daemon_process(config_name, timeout=TEST_TIMEOUT)
assert res is Start.Ok
runner = CliRunner()
result = runner.invoke(main, ["stop", "-c", config_name])
assert result.exit_code == 0, result.output
def test_filestatus(m: Maestral) -> None:
runner = CliRunner()
result = runner.invoke(main, ["filestatus", "/usr", "-c", m.config_name])
assert result.exit_code == 0, result.output
assert result.output == "unwatched\n"
result = runner.invoke(main, ["filestatus", "/invalid-dir", "-c", m.config_name])
# the exception will be already raised by click's argument check
assert result.exit_code == 2
assert isinstance(result.exception, SystemExit)
assert "'/invalid-dir' does not exist" in result.output
def test_autostart(m: Maestral) -> None:
autostart = AutoStart(m.config_name)
autostart.disable()
runner = CliRunner()
result = runner.invoke(main, ["autostart", "-c", m.config_name])
assert result.exit_code == 0, result.output
assert "disabled" in result.output
result = runner.invoke(main, ["autostart", "-Y", "-c", m.config_name])
if autostart.implementation:
if result.exit_code == 0:
assert "Enabled" in result.output
assert autostart.enabled
else:
# TODO: be more specific here
assert result.exception is not None
else:
assert "not supported" in result.output
assert not autostart.enabled
result = runner.invoke(main, ["autostart", "-N", "-c", m.config_name])
assert result.exit_code == 0, result.output
assert "Disabled" in result.output
assert not autostart.enabled
def test_excluded_list(m: Maestral) -> None:
runner = CliRunner()
result = runner.invoke(main, ["excluded", "list", "-c", m.config_name])
assert result.exit_code == 0, result.output
assert result.output == "No excluded files or folders.\n"
def test_notify_level(config_name: str) -> None:
start_maestral_daemon_process(config_name, timeout=TEST_TIMEOUT)
m = MaestralProxy(config_name)
runner = CliRunner()
result = runner.invoke(main, ["notify", "level", "-c", m.config_name])
level_name = level_number_to_name(m.notification_level)
assert result.exit_code == 0, result.output
assert level_name in result.output
level_name = "SYNCISSUE"
level_number = level_name_to_number(level_name)
result = runner.invoke(main, ["notify", "level", level_name, "-c", m.config_name])
assert result.exit_code == 0, result.output
assert level_name in result.output
assert m.notification_level == level_number
result = runner.invoke(main, ["notify", "level", "INVALID", "-c", m.config_name])
assert result.exit_code == 2
assert isinstance(result.exception, SystemExit)
def test_notify_snooze(config_name: str) -> None:
start_maestral_daemon_process(config_name, timeout=TEST_TIMEOUT)
m = MaestralProxy(config_name)
runner = CliRunner()
result = runner.invoke(main, ["notify", "snooze", "20", "-c", m.config_name])
assert result.exit_code == 0, result.output
assert 0 < m.notification_snooze <= 20
result = runner.invoke(main, ["notify", "snooze", "0", "-c", m.config_name])
assert result.exit_code == 0, result.output
assert m.notification_snooze == 0
def test_log_level(m: Maestral) -> None:
runner = CliRunner()
result = runner.invoke(main, ["log", "level", "-c", m.config_name])
level_name = logging.getLevelName(m.log_level)
assert result.exit_code == 0, result.output
assert level_name in result.output
result = runner.invoke(main, ["log", "level", "DEBUG", "-c", m.config_name])
assert result.exit_code == 0, result.output
assert "DEBUG" in result.output
result = runner.invoke(main, ["notify", "level", "INVALID", "-c", m.config_name])
assert result.exit_code == 2
assert isinstance(result.exception, SystemExit)
def test_log_show(m: Maestral) -> None:
# log a message
logger = scoped_logger("maestral", m.config_name)
logger.info("Hello from pytest!")
runner = CliRunner()
result = runner.invoke(main, ["log", "show", "-c", m.config_name])
assert result.exit_code == 0, result.output
assert "Hello from pytest!" in result.output
def METHOD_NAME(m: Maestral) -> None:
# log a message
logger = scoped_logger("maestral", m.config_name)
logger.info("Hello from pytest!")
runner = CliRunner()
result = runner.invoke(main, ["log", "show", "-c", m.config_name])
assert result.exit_code == 0, result.output
assert "Hello from pytest!" in result.output
# Stop connection helper to prevent spurious log messages.
m.manager._connection_helper_running = False
m.manager.connection_helper.join()
# clear the logs
result = runner.invoke(main, ["log", "clear", "-c", m.config_name])
assert result.exit_code == 0, result.output
logfile = get_log_path("maestral", f"{m.config_name}.log")
with open(logfile) as f:
log_content = f.read()
assert log_content == "" |
298,719 | is etal | """Produces search service query strings for authors."""
import re
from typing import List, Tuple, Union
from arxiv.util.tex2utf import tex2utf
from arxiv.util.authors import split_authors, PREFIX_MATCH
AuthorList = List[Union[str, Tuple[str, str]]]
"""Type alias for list of authors or strings that is used to display
the author list.
"""
def is_affiliation(item: str) -> bool:
"""Return true if a string contains an affiliation."""
return item.startswith('(')
def is_short(item: str) -> bool:
"""Return true if the length of string is less than 4 characters long."""
return len(item) < 4
def METHOD_NAME(item: str) -> bool:
"""Return true if the string contains et al."""
return re.match(r'et\.? al\.?$', item) is not None
def is_divider(item: str) -> bool:
"""Return true if the string contains a divider character."""
return re.match(r'^(,|:)', item) is not None
def split_long_author_list(
authors: AuthorList, size: int) -> Tuple[AuthorList, AuthorList, int]:
"""Return two lists: first is of size, second is the remaining authors.
The author list has strings which are not part of the author
names, but commas between them to preserve the formatting that the
submitter used.
This function is used to split the list base on name count, not
just list element count.
"""
front = []
back = []
count = 0
back_count = 0
for item in authors:
if count >= size:
back.append(item)
if isinstance(item, tuple):
back_count = back_count + 1
else:
front.append(item)
if isinstance(item, tuple):
count = count + 1
# handle case where back doesn't have much ARXIVNG-2083
authors_in_back = len(list(filter(lambda x: isinstance(x, tuple), back)))
if authors_in_back < 2:
front = front + back
back = []
back_count = 0
return front, back, back_count
def queries_for_authors(authors: str) -> AuthorList:
"""Make search service query strings for authors.
The main challenge here is that the HTML output of this should match as
closely as possible the string input by the submitter.
Takes the authors string from a document metadata or .abs, split,
and return a structure of [ str|(name_text, author_search_query_str)...]
If the item in the list is just a string, it should just be placed in the
HTML output since it is something like whitespace, a comma or 'for the' or
a colon.
If a list item is a tuple, author_search_query_str will be something like
"Webb J E" which can be used to query the search service.
name_text will be the text to put in side the <a> tag. Such as
"James E. Webb,"
DO resolve tex to UTF8 in both the link and text.
DON'T URL_encode, do that in template
DON'T do entities, do that in template
DON'T escape utf8 for HTML, just return utf8
"""
out: AuthorList = []
splits: List[str] = split_authors(authors)
for item in splits:
if is_divider(item):
out.append(item + ' ')
elif is_affiliation(item):
out.append(' ' + tex2utf(item))
elif is_short(item) or METHOD_NAME(item):
out.append(item)
else:
out = [*out, *_link_for_name_or_collab(item)]
return out
def _link_for_name_or_collab(item: str) -> AuthorList:
out: List[Union[str, Tuple[str, str]]] = []
# deal with 'for the _whatever_' or 'for _whatever_' or 'the'
not_linked = re.match(r'\s*((for\s+the\s+)|(the\s+))(?P<rest>.*)',
item, flags=re.IGNORECASE)
if not_linked:
out.append(not_linked.group(1))
item = not_linked.group('rest')
item = tex2utf(item)
item = re.sub(r'\.(?!) ', '.', item)
item = re.sub(r'\\(,| )', ' ', item)
item = re.sub(r'([^\\])~', r'\1', item)
item = re.sub(r',\s*', ' ', item)
colab_m = re.match(r'^(.+)\s+(collaboration|group|team)(\s?.*)',
item, re.IGNORECASE)
if colab_m:
colab = f'{colab_m.group(1)} {colab_m.group(2)}'
out.append((item, colab))
return out
the_m = re.match('the (.*)', item, re.IGNORECASE)
if the_m:
out.append((item, the_m.group(1)))
return out
# else we'll treat it as a name
name_bits = item.split()
if len(name_bits) == 0:
query_str = item
else:
# Do not include SJ, Jr, Sr, III, IV, etc. in search
if re.match(r'SJ|Jr|Sr|[IV]{2,}$', name_bits[-1]) \
and len(name_bits) > 1:
name_bits.pop()
surname = ''
if len(name_bits) > 0:
surname = name_bits.pop()
name_bit_count = 0
surname_prefixes = []
initials = []
found_prefix = False
for name_bit in name_bits:
name_bit_count += 1
if (found_prefix or (name_bit_count > 1
and re.match('^(' + PREFIX_MATCH + ')$',
name_bit, re.IGNORECASE))):
surname_prefixes.append(name_bit)
found_prefix = True
else:
initials.append(name_bit[0:1])
sur_initials = surname + ', ' + \
' '.join(initials) if initials else surname
query_str = ' '.join([*surname_prefixes, sur_initials])
out.append((item, query_str))
return out |
298,720 | get manager | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=protected-access
from unittest import TestCase
from lte.protos.enodebd_pb2 import SingleEnodebStatus
from magma.enodebd.devices.device_utils import EnodebDeviceName
from magma.enodebd.enodeb_status import (
get_all_enb_status,
get_enb_status,
get_service_status_old,
get_single_enb_status,
)
from magma.enodebd.state_machines.enb_acs_manager import StateMachineManager
from magma.enodebd.tests.test_utils.enb_acs_builder import (
EnodebAcsStateMachineBuilder,
)
from magma.enodebd.tests.test_utils.spyne_builder import (
get_spyne_context_with_ip,
)
from magma.enodebd.tests.test_utils.tr069_msg_builder import Tr069MessageBuilder
class EnodebStatusTests(TestCase):
def test_get_service_status_old(self):
manager = self.METHOD_NAME()
status = get_service_status_old(manager)
self.assertTrue(
status['enodeb_connected'] == '0',
'Should report no eNB connected',
)
##### Start session for the first IP #####
ctx1 = get_spyne_context_with_ip("192.168.60.145")
# Send an Inform message, wait for an InformResponse
inform_msg = Tr069MessageBuilder.get_inform(
'48BF74',
'BaiBS_RTS_3.1.6',
'120200002618AGP0001',
)
manager.handle_tr069_message(ctx1, inform_msg)
status = get_service_status_old(manager)
self.assertTrue(
status['enodeb_connected'] == '1',
'Should report an eNB as conencted',
)
self.assertTrue(
status['enodeb_serial'] == '120200002618AGP0001',
'eNodeB serial should match the earlier Inform',
)
def test_get_enb_status(self):
acs_state_machine = \
EnodebAcsStateMachineBuilder\
.build_acs_state_machine(EnodebDeviceName.BAICELLS)
try:
get_enb_status(acs_state_machine)
except KeyError:
self.fail(
'Getting eNB status should succeed after constructor '
'runs.',
)
def test_get_single_enb_status(self):
manager = self.METHOD_NAME()
ctx1 = get_spyne_context_with_ip("192.168.60.145")
inform_msg = Tr069MessageBuilder.get_inform(
'48BF74',
'BaiBS_RTS_3.1.6',
'120200002618AGP0001',
)
manager.handle_tr069_message(ctx1, inform_msg)
status = get_single_enb_status('120200002618AGP0001', manager)
self.assertEqual(
status.connected,
SingleEnodebStatus.StatusProperty.Value('ON'),
'Status should be connected.',
)
self.assertEqual(
status.configured,
SingleEnodebStatus.StatusProperty.Value('OFF'),
'Status should be not configured.',
)
def test_get_enodeb_all_status(self):
manager = self.METHOD_NAME()
##### Test Empty #####
enb_status_by_serial = get_all_enb_status(manager)
self.assertTrue(enb_status_by_serial == {}, "No eNB connected")
##### Start session for the first IP #####
ctx1 = get_spyne_context_with_ip("192.168.60.145")
# Send an Inform message, wait for an InformResponse
inform_msg = Tr069MessageBuilder.get_inform(
'48BF74',
'BaiBS_RTS_3.1.6',
'120200002618AGP0001',
)
manager.handle_tr069_message(ctx1, inform_msg)
enb_status_by_serial = get_all_enb_status(manager)
enb_status = enb_status_by_serial.get('120200002618AGP0001')
self.assertEqual(
enb_status.enodeb_connected,
SingleEnodebStatus.StatusProperty.Value('ON'),
'Status should be connected.',
)
def METHOD_NAME(self) -> StateMachineManager:
service = EnodebAcsStateMachineBuilder.build_magma_service()
return StateMachineManager(service) |
298,721 | torch export call | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from functools import partial
from typing import Any, Tuple
import torch
from torch.onnx import OperatorExportTypes
from nncf.common.exporter import Exporter
from nncf.common.logging import nncf_logger
from nncf.telemetry import tracked_function
from nncf.telemetry.events import NNCF_PT_CATEGORY
from nncf.torch.dynamic_graph.graph_tracer import create_dummy_forward_fn
from nncf.torch.dynamic_graph.graph_tracer import create_mock_tensor
from nncf.torch.nested_objects_traversal import objwalk
from nncf.torch.utils import get_model_device
from nncf.torch.utils import is_tensor
def generate_input_names_list(num_inputs: int):
return [f"input.{idx}" for idx in range(0, num_inputs)]
def generate_output_names_list(num_outputs: int):
return [f"output.{idx}" for idx in range(0, num_outputs)]
def count_tensors(model_retval: Any) -> int:
count = 0
def counter_fn(x: torch.Tensor) -> torch.Tensor:
nonlocal count
count += 1
return x
objwalk(model_retval, is_tensor, counter_fn)
return count
class PTExportFormat:
ONNX = "onnx"
class PTExporter(Exporter):
"""
This class provides export of the compressed model to the ONNX format.
"""
_ONNX_DEFAULT_OPSET = 13
@staticmethod
def parse_format(save_format: str) -> Tuple[str, dict]:
"""
Parse saving format to a short form and additional arguments.
:param save_format: Saving format.
:return
str: short form of the save_format
dict: additional arguments for exporter
"""
if save_format.startswith(PTExportFormat.ONNX):
split_format = save_format.split("_")
opset = None
if len(split_format) == 1:
opset = PTExporter._ONNX_DEFAULT_OPSET
elif len(split_format) == 2:
opset = int(split_format[1])
if opset is not None and opset <= 0:
raise ValueError("Incorrect save_format, expected 'onnx' or 'onnx_<opset_version>'.")
if opset != PTExporter._ONNX_DEFAULT_OPSET:
nncf_logger.warning(
f"Exporting to ONNX opset {opset}, which is not guaranteed to work with NNCF. "
f"Recommended opset export version is {PTExporter._ONNX_DEFAULT_OPSET}."
)
return PTExportFormat.ONNX, {"opset_version": opset}
return save_format, {}
@tracked_function(NNCF_PT_CATEGORY, ["save_format"])
def export_model(self, save_path: str, save_format: str = PTExportFormat.ONNX) -> None:
"""
Exports the compressed model to the specified format.
:param save_path: The path where the model will be saved.
:param save_format: Saving format.
One of the following:
- `onnx` for export to the ONNX format.
- `onnx_<opset_version>` for export to the ONNX format with specific opset version.
The ONNX format will be used if `save_format` is not specified.
"""
fn_args = {"save_path": save_path}
save_format, extra_args = PTExporter.parse_format(save_format)
fn_args.update(extra_args)
format_to_export_fn = {
PTExportFormat.ONNX: self._export_to_onnx,
}
export_fn = format_to_export_fn.get(save_format)
if export_fn is None:
available_formats = list(format_to_export_fn.keys())
raise ValueError(f"Unsupported saving format: '{save_format}'. Available formats: {available_formats}")
export_fn(**fn_args)
def _export_to_onnx(self, save_path: str, opset_version: int) -> None:
"""
Exports the compressed model to the ONNX format.
:param save_path: The path where the model will be saved.
"""
original_device = get_model_device(self._model)
model = self._model.eval().cpu()
input_tensor_list = []
for info in self._model.nncf.input_infos:
single_batch_info = copy(info)
input_shape = tuple([1] + list(info.shape)[1:])
single_batch_info.shape = input_shape
input_tensor_list.append(create_mock_tensor(single_batch_info, "cpu"))
full_arg_forward = model.nncf.get_original_forward()
args = self._model_args[:-1]
kwargs = self._model_args[-1]
partial_forward = partial(full_arg_forward, *args, **kwargs)
with model.nncf.temporary_bound_original_forward(partial_forward):
if self._input_names is not None:
input_names = self._input_names
else:
input_names = generate_input_names_list(len(input_tensor_list))
# pylint:disable=unexpected-keyword-arg
with torch.no_grad():
# Should call this, otherwise the operations executed during export will end up in the graph.
model.nncf.disable_dynamic_graph_building()
if self._output_names is not None:
output_names = self._output_names
else:
# Will have to run a dummy forward call in order to determine the number of outputs.
dummy_forward = create_dummy_forward_fn(self._model.nncf.input_infos)
retval = dummy_forward(self._model)
output_names = generate_output_names_list(count_tensors(retval))
self.METHOD_NAME(model, input_tensor_list, save_path, input_names, output_names, opset_version)
model.nncf.enable_dynamic_graph_building()
model.to(original_device)
def METHOD_NAME(self, model, input_tensor_list, save_path, input_names, output_names, opset_version):
"""
Call of torch.onnx.export function.
:param model: torch.nn.Module to be exported.
:param input_tensor_list: the list containing model inputs.
:param save_path: a string containing a path for saving onnx model.
:param input_names: Names to be assigned to the input tensors of the model.
:param output_names: Names to be assigned to the output tensors of the model.
:param opset_version: the version of the onnx opset.
"""
fn = partial(
torch.onnx.export,
model,
tuple(input_tensor_list),
save_path,
input_names=input_names,
output_names=output_names,
opset_version=opset_version,
training=torch.onnx.TrainingMode.EVAL,
)
try:
fn()
except torch.onnx.errors.SymbolicValueError:
# May have failed for reasons of missing and unspecifiable shape inference
# for quantizer ops in torch==1.13, try to export with a workaround.
nncf_logger.warning(
"Encountered shape inferencing failures during ONNX export. "
"The model was exported with a workaround - some of the operations may have been exported using "
"the `org.pytorch.aten` domain."
)
fn(operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK) |
298,722 | save | #!/usr/bin/env python
# datetime:2020/10/23 18:13
import datetime
import json
import time
from dongtai_common.models.iast_overpower_user import IastOverpowerUserAuth
from dongtai_common.models.iast_vul_overpower import IastVulOverpower
from dongtai_common.models.vulnerablity import IastVulnerabilityModel
from dongtai_common.utils import const
from dongtai_protocol.report.handler.report_handler_interface import IReportHandler
from dongtai_protocol.report.report_handler_factory import ReportHandler
@ReportHandler.register(const.REPORT_VULN_OVER_POWER)
class OverPowerHandler(IReportHandler):
def parse(self):
"""
{
'server_name': '127.0.0.1',
'http_uri': '/overpower/read-02',
'cookie': 'csrftoken=A00l4Ok1bkiWiG1OWbPgneUiM5uFGnzVyfH4qllr1hTvw3QCmqjG0VqCnwfga8PF; _jspxcms=1b40610d1eb840498b9826c7b2809418; __utma=96992031.1435321630.1598931302.1598931302.1598931302.1; __utmz=96992031.1598931302.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); Hm_lvt_bdff1c1dcce971c3d986f9be0921a0ee=1598346920,1598434566,1599821526; JSESSIONID=2F55FC8B6CBEC0E0F38018AD211AFDC8',
'http_protocol': 'HTTP/1.1',
'http_url': 'http://127.0.0.1:8080/overpower/read-02',
'sql': 'SELECT * FROM article WHERE id=1',
'app_name': '127.0.0.1',
'x-trace-id': 'tomcat-docbase.14532572676169694122.8080-sql-3eea90fc-367c-47ab-9bb3-efa0683f58a3',
'http_method': 'GET',
'server_port': 8080,
'http_scheme': 'http',
'http_query_string': 'id=1',
'http_header': 'host:127.0.0.1:8080\nconnection:keep-alive\nupgrade-insecure-requests:1\nuser-agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36\naccept:text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\nsec-fetch-site:none\nsec-fetch-mode:navigate\nsec-fetch-user:?1\nsec-fetch-dest:document\naccept-encoding:gzip, deflate, br\naccept-language:zh-CN,zh-TW;q=0.9,zh;q=0.8,en-US;q=0.7,en;q=0.6\ncookie:csrftoken=A00l4Ok1bkiWiG1OWbPgneUiM5uFGnzVyfH4qllr1hTvw3QCmqjG0VqCnwfga8PF; _jspxcms=1b40610d1eb840498b9826c7b2809418; __utma=96992031.1435321630.1598931302.1598931302.1598931302.1; __utmz=96992031.1598931302.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); Hm_lvt_bdff1c1dcce971c3d986f9be0921a0ee=1598346920,1598434566,1599821526; JSESSIONID=2F55FC8B6CBEC0E0F38018AD211AFDC8\n'
}
:return:
"""
self.app_name = self.detail.get("app_name")
self.app_path = self.detail.get("app_path")
self.server_name = self.detail.get("server_name")
self.server_port = self.detail.get("server_port")
self.http_url = self.detail.get("http_url")
self.http_uri = self.detail.get("http_uri")
self.http_query_string = self.detail.get("http_query_string")
self.http_method = self.detail.get("http_method")
self.http_scheme = self.detail.get("http_scheme")
self.http_protocol = self.detail.get("http_protocol")
self.http_header = self.detail.get("http_header")
self.x_trace_id = self.detail.get("x-trace-id")
self.cookie = self.detail.get("cookie")
self.sql = self.detail.get("sql")
def METHOD_NAME(self):
# 检查trace_id是否存于数据库中
vul_model = IastVulOverpower.objects.filter(
app_name=self.app_name,
server_name=self.server_name,
server_port=self.server_port,
http_url=self.http_url,
http_query_string=self.http_query_string,
http_method=self.http_method,
x_trace_id=self.x_trace_id,
sql=self.sql,
)
if len(vul_model):
# 检查越权
if vul_model[0].cookie != self.cookie:
detail_report = {
"trace-id": self.x_trace_id,
"server-name": self.server_name,
"server-port": self.server_port,
"http-method": self.http_method,
"http-url": self.http_url,
"http-query-string": self.http_query_string,
"http-original-auth": vul_model[0].cookie,
"http-original-user": self.get_user_from_auth(vul_model[0].cookie),
"http-current-auth": self.cookie,
"http-current-user": self.get_user_from_auth(self.cookie),
"http-sql": self.server_name,
}
# 通过cookie查询原始的用户
# 检查是否已存在漏洞,如果存在,则忽略,如果不存在则上报漏洞
vuls = IastVulnerabilityModel.objects.filter(
vul_url=self.http_url,
vul_type="越权漏洞",
vul_req_method=self.http_method,
protocol=self.http_protocol,
)
if len(vuls) > 0:
vuls[0].vul_count = vuls[0].vul_count + 1
vuls[0].vul_last_time = int(time.time())
vuls[0].METHOD_NAME()
else:
IastVulnerabilityModel(
type="越权漏洞",
vul_level="中危",
url=self.http_url,
uri=self.http_uri,
http_method=self.http_method,
http_scheme=self.http_scheme,
http_protocol=self.http_protocol,
req_header=self.http_header,
req_params=self.http_query_string,
req_data="", # fixme 请求体 数据保存
res_header="", # fixme 响应头,暂时没有,后续补充
res_body="", # fixme 响应体数据
full_stack=json.dumps(detail_report, ensure_ascii=False),
top_stack="",
bottom_stack="",
taint_value=None,
taint_position=None,
app_id=self.app_id, # fixme app id 暂时不存该字段
app_name=self.app_name,
server_id=self.server_id, # fixme app id 暂时不存该字段
server_name=self.server_name,
counts=1,
status="已上报",
first_time=int(time.time()),
latest_time=int(time.time()),
).METHOD_NAME()
else:
IastVulOverpower(
app_name=self.app_name,
server_name=self.server_name,
server_port=self.server_port,
http_url=self.http_url,
http_uri=self.http_uri,
http_query_string=self.http_query_string,
http_method=self.http_method,
http_scheme=self.http_scheme,
http_protocol=self.http_protocol,
http_header=self.http_header,
x_trace_id=self.x_trace_id,
cookie=self.cookie,
sql=self.sql,
user_id=self.user_id,
created_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
updated_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
).METHOD_NAME()
def get_user_from_auth(self, auth_value):
auths = IastOverpowerUserAuth.objects.filter(auth_value=auth_value)
if len(auths) > 0:
return auths[0].http_query_string
return None |
298,723 | build | #!/usr/bin/python3
import argparse
import datetime
import os
from builtins import NotADirectoryError
import shutil
import tempfile
boards = {
"XLITE_FCC": {
"PCB": "XLITE",
"MODULE_SIZE_STD": "NO",
"PPM": "NO",
"DSM2": "NO",
"SBUS": "NO",
},
"XLITE_LBT": {
"PCB": "XLITE",
"MODULE_PROTOCOL_D8": "NO",
"MODULE_SIZE_STD": "NO",
"PPM": "NO",
"DSM2": "NO",
"SBUS": "NO",
},
"XLITES": {
"PCB": "XLITES",
"AUTOUPDATE": "YES",
"PXX1": "YES",
"XJT": "NO",
"MODULE_SIZE_STD": "NO",
"PPM": "NO",
"DSM2": "NO",
"SBUS": "NO",
},
"X9LITE": {
"PCB": "X9LITE",
"AUTOUPDATE": "YES",
"PXX1": "YES",
"XJT": "NO",
"MODULE_SIZE_STD": "NO",
"PPM": "NO",
"DSM2": "NO",
"SBUS": "NO",
"DEFAULT_MODE": "2",
},
"X9LITES": {
"PCB": "X9LITES",
"AUTOUPDATE": "YES",
"PXX1": "YES",
"XJT": "NO",
"MODULE_SIZE_STD": "NO",
"PPM": "NO",
"DSM2": "NO",
"SBUS": "NO",
"DEFAULT_MODE": "2",
},
"X9D+2019": {
"PCB": "X9D+",
"PCBREV": "2019",
"AUTOUPDATE": "YES",
"PXX1": "YES",
"DEFAULT_MODE": "2",
},
"X9D+": {
"PCB": "X9D+",
"DEFAULT_MODE": "2",
},
"X9E": {
"PCB": "X9E",
"DEFAULT_MODE": "2",
},
"X7_FCC": {
"PCB": "X7",
"DEFAULT_MODE": "2",
},
"X7_LBT": {
"PCB": "X7",
"MODULE_PROTOCOL_D8": "NO",
"DEFAULT_MODE": "2",
},
"X7ACCESS": {
"PCB": "X7",
"PCBREV": "ACCESS",
"AUTOUPDATE": "YES",
"PXX1": "YES",
"DEFAULT_MODE": "2",
},
"X10S": {
"PCB": "X10",
"DEFAULT_MODE": "2",
},
"X10SExpress": {
"PCB": "X10",
"PCBREV": "EXPRESS",
"DEFAULT_MODE": "2",
},
"X12S": {
"PCB": "X12S",
"DEFAULT_MODE": "2",
},
}
translations = [
"EN",
"CZ"
]
common_options = {
"MULTIMODULE": "NO",
"CROSSFIRE": "NO",
"AFHDS3": "NO",
"GVARS": "YES",
"LUA": "NO_MODEL_SCRIPTS",
}
def timestamp():
return datetime.datetime.now().strftime("%y%m%d")
def METHOD_NAME(board, translation, srcdir):
cmake_options = " ".join(["-D%s=%s" % (key, value) for key, value in list(boards[board].items()) + list(common_options.items())])
cwd = os.getcwd()
if not os.path.exists("output"):
os.mkdir("output")
path = tempfile.mkdtemp()
os.chdir(path)
command = "cmake %s -DTRANSLATIONS=%s -DFRSKY_RELEASE=YES -DDEFAULT_TEMPLATE_SETUP=17 %s" % (cmake_options, translation, srcdir)
print(command)
os.system(command)
os.system("make firmware -j6")
os.chdir(cwd)
index = 0
while 1:
suffix = "" if index == 0 else "_%d" % index
filename = "output/firmware_%s_%s_%s%s.bin" % (board.lower(), translation.lower(), timestamp(), suffix)
if not os.path.exists(filename):
shutil.copy("%s/firmware.bin" % path, filename)
break
index += 1
shutil.rmtree(path)
def dir_path(string):
if os.path.isdir(string):
return string
else:
raise NotADirectoryError(string)
def main():
parser = argparse.ArgumentParser(description="Build FrSky firmware")
parser.add_argument("-b", "--boards", action="append", help="Destination boards", required=True)
parser.add_argument("-t", "--translations", action="append", help="Translations", required=True)
parser.add_argument("srcdir", type=dir_path)
args = parser.parse_args()
for board in (boards.keys() if "ALL" in args.boards else args.boards):
for translation in (translations if "ALL" in args.translations else args.translations):
METHOD_NAME(board, translation, args.srcdir)
if __name__ == "__main__":
main() |
298,724 | french cursive | from manim import *
# French Cursive LaTeX font example from http://jf.burnol.free.fr/showcase.html
# Example 1 Manually creating a Template
TemplateForFrenchCursive = TexTemplate(
preamble=r"""
\usepackage[english]{babel}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage[T1]{fontenc}
\usepackage[default]{frcursive}
\usepackage[eulergreek,noplusnominus,noequal,nohbar,%
nolessnomore,noasterisk]{mathastext}
""",
)
def METHOD_NAME(*tex_strings, **kwargs):
return Tex(*tex_strings, tex_template=TemplateForFrenchCursive, **kwargs)
class TexFontTemplateManual(Scene):
"""An example scene that uses a manually defined TexTemplate() object to create
LaTeX output in French Cursive font"""
def construct(self):
self.add(Tex("Tex Font Example").to_edge(UL))
self.play(Create(METHOD_NAME("$f: A \\longrightarrow B$").shift(UP)))
self.play(Create(METHOD_NAME("Behold! We can write math in French Cursive")))
self.wait(1)
self.play(
Create(
Tex(
"See more font templates at \\\\ http://jf.burnol.free.fr/showcase.html",
).shift(2 * DOWN),
),
)
self.wait(2)
# Example 2, using a Template from the collection
class TexFontTemplateLibrary(Scene):
"""An example scene that uses TexTemplate objects from the TexFontTemplates collection
to create sample LaTeX output in every font that will compile on the local system.
Please Note:
Many of the in the TexFontTemplates collection require that specific fonts
are installed on your local machine.
For example, choosing the template TexFontTemplates.comic_sans will
not compile if the Comic Sans Micrososft font is not installed.
This scene will only render those Templates that do not cause a TeX
compilation error on your system. Furthermore, some of the ones that do render,
may still render incorrectly. This is beyond the scope of manim.
Feel free to experiment.
"""
def construct(self):
def write_one_line(template):
x = Tex(template.description, tex_template=template).shift(UP)
self.play(Create(x))
self.wait(1)
self.play(FadeOut(x))
examples = [
TexFontTemplates.american_typewriter, # "American Typewriter"
TexFontTemplates.antykwa, # "Antykwa Półtawskiego (TX Fonts for Greek and math symbols)"
TexFontTemplates.apple_chancery, # "Apple Chancery"
TexFontTemplates.auriocus_kalligraphicus, # "Auriocus Kalligraphicus (Symbol Greek)"
TexFontTemplates.baskervald_adf_fourier, # "Baskervald ADF with Fourier"
TexFontTemplates.baskerville_it, # "Baskerville (Italic)"
TexFontTemplates.biolinum, # "Biolinum"
TexFontTemplates.brushscriptx, # "BrushScriptX-Italic (PX math and Greek)"
TexFontTemplates.chalkboard_se, # "Chalkboard SE"
TexFontTemplates.chalkduster, # "Chalkduster"
TexFontTemplates.comfortaa, # "Comfortaa"
TexFontTemplates.comic_sans, # "Comic Sans MS"
TexFontTemplates.droid_sans, # "Droid Sans"
TexFontTemplates.droid_sans_it, # "Droid Sans (Italic)"
TexFontTemplates.droid_serif, # "Droid Serif"
TexFontTemplates.droid_serif_px_it, # "Droid Serif (PX math symbols) (Italic)"
TexFontTemplates.ecf_augie, # "ECF Augie (Euler Greek)"
TexFontTemplates.ecf_jd, # "ECF JD (with TX fonts)"
TexFontTemplates.ecf_skeetch, # "ECF Skeetch (CM Greek)"
TexFontTemplates.ecf_tall_paul, # "ECF Tall Paul (with Symbol font)"
TexFontTemplates.ecf_webster, # "ECF Webster (with TX fonts)"
TexFontTemplates.electrum_adf, # "Electrum ADF (CM Greek)"
TexFontTemplates.epigrafica, # Epigrafica
TexFontTemplates.fourier_utopia, # "Fourier Utopia (Fourier upright Greek)"
TexFontTemplates.french_cursive, # "French Cursive (Euler Greek)"
TexFontTemplates.gfs_bodoni, # "GFS Bodoni"
TexFontTemplates.gfs_didot, # "GFS Didot (Italic)"
TexFontTemplates.gfs_neoHellenic, # "GFS NeoHellenic"
TexFontTemplates.gnu_freesans_tx, # "GNU FreeSerif (and TX fonts symbols)"
TexFontTemplates.gnu_freeserif_freesans, # "GNU FreeSerif and FreeSans"
TexFontTemplates.helvetica_fourier_it, # "Helvetica with Fourier (Italic)"
TexFontTemplates.latin_modern_tw_it, # "Latin Modern Typewriter Proportional (CM Greek) (Italic)"
TexFontTemplates.latin_modern_tw, # "Latin Modern Typewriter Proportional"
TexFontTemplates.libertine, # "Libertine"
TexFontTemplates.libris_adf_fourier, # "Libris ADF with Fourier"
TexFontTemplates.minion_pro_myriad_pro, # "Minion Pro and Myriad Pro (and TX fonts symbols)"
TexFontTemplates.minion_pro_tx, # "Minion Pro (and TX fonts symbols)"
TexFontTemplates.new_century_schoolbook, # "New Century Schoolbook (Symbol Greek)"
TexFontTemplates.new_century_schoolbook_px, # "New Century Schoolbook (Symbol Greek, PX math symbols)"
TexFontTemplates.noteworthy_light, # "Noteworthy Light"
TexFontTemplates.palatino, # "Palatino (Symbol Greek)"
TexFontTemplates.papyrus, # "Papyrus"
TexFontTemplates.romande_adf_fourier_it, # "Romande ADF with Fourier (Italic)"
TexFontTemplates.slitex, # "SliTeX (Euler Greek)"
TexFontTemplates.times_fourier_it, # "Times with Fourier (Italic)"
TexFontTemplates.urw_avant_garde, # "URW Avant Garde (Symbol Greek)"
TexFontTemplates.urw_zapf_chancery, # "URW Zapf Chancery (CM Greek)"
TexFontTemplates.venturis_adf_fourier_it, # "Venturis ADF with Fourier (Italic)"
TexFontTemplates.verdana_it, # "Verdana (Italic)"
TexFontTemplates.vollkorn_fourier_it, # "Vollkorn with Fourier (Italic)"
TexFontTemplates.vollkorn, # "Vollkorn (TX fonts for Greek and math symbols)"
TexFontTemplates.zapf_chancery, # "Zapf Chancery"
]
self.add(Tex("Tex Font Template Example").to_edge(UL))
for font in examples:
try:
write_one_line(font)
except Exception:
print("FAILURE on ", font.description, " - skipping.")
self.play(
Create(
Tex(
"See more font templates at \\\\ http://jf.burnol.free.fr/showcase.html",
).shift(2 * DOWN),
),
)
self.wait(2) |
298,725 | load | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""This module defines the data structures used to represent a grammar.
These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.
There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
# Python imports
import collections
import pickle
# Local imports
from . import token, tokenize
class Grammar(object):
"""Pgen parsing tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly. The class here does not
provide initialization of the tables; several subclasses exist to
do this (see the conv and pgen modules).
The load() method reads the tables from a pickle file, which is
much faster than the other ways offered by subclasses. The pickle
file is written by calling dump() (after loading the grammar
tables using a subclass). The report() method prints a readable
representation of the tables to stdout, for debugging.
The instance variables are as follows:
symbol2number -- a dict mapping symbol names to numbers. Symbol
numbers are always 256 or higher, to distinguish
them from token numbers, which are between 0 and
255 (inclusive).
number2symbol -- a dict mapping numbers to symbol names;
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
states, each state is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
Final states are represented by a special arc of
the form (0, j) where j is its own state number.
dfas -- a dict mapping symbol numbers to (DFA, first)
pairs, where DFA is an item from the states list
above, and first is a set of tokens that can
begin this grammar rule (represented by a dict
whose values are always 1).
labels -- a list of (x, y) pairs where x is either a token
number or a symbol number, and y is either None
or a string; the strings are keywords. The label
number is the index in this list; label numbers
are used to mark state transitions (arcs) in the
DFAs.
start -- the number of the grammar's start symbol.
keywords -- a dict mapping keyword strings to arc labels.
tokens -- a dict mapping token numbers to arc labels.
"""
def __init__(self):
self.symbol2number = {}
self.number2symbol = {}
self.states = []
self.dfas = {}
self.labels = [(0, "EMPTY")]
self.keywords = {}
self.tokens = {}
self.symbol2label = {}
self.start = 256
def dump(self, filename):
"""Dump the grammar tables to a pickle file.
dump() recursively changes all dict to OrderedDict, so the pickled file
is not exactly the same as what was passed in to dump(). load() uses the
pickled file to create the tables, but only changes OrderedDict to dict
at the top level; it does not recursively change OrderedDict to dict.
So, the loaded tables are different from the original tables that were
passed to load() in that some of the OrderedDict (from the pickled file)
are not changed back to dict. For parsing, this has no effect on
performance because OrderedDict uses dict's __getitem__ with nothing in
between.
"""
with open(filename, "wb") as f:
d = _make_deterministic(self.__dict__)
pickle.dump(d, f, 2)
def METHOD_NAME(self, filename):
"""Load the grammar tables from a pickle file."""
f = open(filename, "rb")
d = pickle.METHOD_NAME(f)
f.close()
self.__dict__.update(d)
def copy(self):
"""
Copy the grammar.
"""
new = self.__class__()
for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords",
"tokens", "symbol2label"):
setattr(new, dict_attr, getattr(self, dict_attr).copy())
new.labels = self.labels[:]
new.states = self.states[:]
new.start = self.start
return new
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print "s2n"
pprint(self.symbol2number)
print "n2s"
pprint(self.number2symbol)
print "states"
pprint(self.states)
print "dfas"
pprint(self.dfas)
print "labels"
pprint(self.labels)
print "start", self.start
def _make_deterministic(top):
if isinstance(top, dict):
return collections.OrderedDict(
sorted(((k, _make_deterministic(v)) for k, v in top.iteritems())))
if isinstance(top, list):
return [_make_deterministic(e) for e in top]
if isinstance(top, tuple):
return tuple(_make_deterministic(e) for e in top)
return top
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
"""
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name) |
298,726 | test pipeline kernel columns ranked by shap | import pytest
import pandas as pd
import numpy as np
def test_pipeline_columns_ranked_by_shap(classifier_pipeline_explainer):
assert isinstance(classifier_pipeline_explainer.columns_ranked_by_shap(), list)
def test_pipeline_permutation_importances(classifier_pipeline_explainer):
assert isinstance(classifier_pipeline_explainer.get_permutation_importances_df(), pd.DataFrame)
def test_pipeline_metrics(classifier_pipeline_explainer):
assert isinstance(classifier_pipeline_explainer.metrics(), dict)
assert isinstance(classifier_pipeline_explainer.metrics_descriptions(), dict)
def test_pipeline_mean_abs_shap_df(classifier_pipeline_explainer):
assert isinstance(classifier_pipeline_explainer.get_mean_abs_shap_df(), pd.DataFrame)
def test_pipeline_contrib_df(classifier_pipeline_explainer):
assert isinstance(classifier_pipeline_explainer.get_contrib_df(0), pd.DataFrame)
assert isinstance(classifier_pipeline_explainer.get_contrib_df(X_row=classifier_pipeline_explainer.X.iloc[[0]]), pd.DataFrame)
def test_pipeline_shap_base_value(classifier_pipeline_explainer):
assert isinstance(classifier_pipeline_explainer.shap_base_value(), (np.floating, float))
def test_pipeline_shap_values_shape(classifier_pipeline_explainer):
assert (classifier_pipeline_explainer.get_shap_values_df().shape == (len(classifier_pipeline_explainer), len(classifier_pipeline_explainer.merged_cols)))
def test_pipeline_shap_values(classifier_pipeline_explainer):
assert isinstance(classifier_pipeline_explainer.get_shap_values_df(), pd.DataFrame)
def test_pipeline_pdp_df(classifier_pipeline_explainer):
assert isinstance(classifier_pipeline_explainer.pdp_df("num__age"), pd.DataFrame)
assert isinstance(classifier_pipeline_explainer.pdp_df("cat__sex"), pd.DataFrame)
assert isinstance(classifier_pipeline_explainer.pdp_df("num__age", index=0), pd.DataFrame)
assert isinstance(classifier_pipeline_explainer.pdp_df("cat__sex", index=0), pd.DataFrame)
def METHOD_NAME(classifier_pipeline_kernel_explainer):
assert isinstance(classifier_pipeline_kernel_explainer.columns_ranked_by_shap(), list)
def test_pipeline_kernel_permutation_importances(classifier_pipeline_kernel_explainer):
assert isinstance(classifier_pipeline_kernel_explainer.get_permutation_importances_df(), pd.DataFrame)
def test_pipeline_kernel_metrics(classifier_pipeline_kernel_explainer):
assert isinstance(classifier_pipeline_kernel_explainer.metrics(), dict)
assert isinstance(classifier_pipeline_kernel_explainer.metrics_descriptions(), dict)
def test_pipeline_kernel_mean_abs_shap_df(classifier_pipeline_kernel_explainer):
assert isinstance(classifier_pipeline_kernel_explainer.get_mean_abs_shap_df(), pd.DataFrame)
def test_pipeline_kernel_contrib_df(classifier_pipeline_kernel_explainer):
assert isinstance(classifier_pipeline_kernel_explainer.get_contrib_df(0), pd.DataFrame)
assert isinstance(classifier_pipeline_kernel_explainer.get_contrib_df(X_row=classifier_pipeline_kernel_explainer.X.iloc[[0]]), pd.DataFrame)
def test_pipeline_kernel_shap_base_value(classifier_pipeline_kernel_explainer):
assert isinstance(classifier_pipeline_kernel_explainer.shap_base_value(), (np.floating, float))
def test_pipeline_kernel_shap_values_shape(classifier_pipeline_kernel_explainer):
assert (classifier_pipeline_kernel_explainer.get_shap_values_df().shape == (len(classifier_pipeline_kernel_explainer), len(classifier_pipeline_kernel_explainer.merged_cols)))
def test_pipeline_kernel_shap_values(classifier_pipeline_kernel_explainer):
assert isinstance(classifier_pipeline_kernel_explainer.get_shap_values_df(), pd.DataFrame)
def test_pipeline_kernel_pdp_df(classifier_pipeline_kernel_explainer):
assert isinstance(classifier_pipeline_kernel_explainer.pdp_df("age"), pd.DataFrame)
assert isinstance(classifier_pipeline_kernel_explainer.pdp_df("sex"), pd.DataFrame)
assert isinstance(classifier_pipeline_kernel_explainer.pdp_df("age", index=0), pd.DataFrame)
assert isinstance(classifier_pipeline_kernel_explainer.pdp_df("sex", index=0), pd.DataFrame)
|
298,727 | driver capabilities | import os
import core.status
import core.exceptions as ex
from . import BASE_KEYWORDS
from core.capabilities import capabilities
from core.resource import Resource
from core.objects.svcdict import KEYS
DRIVER_GROUP = "share"
DRIVER_BASENAME = "nfs"
KEYS.register_driver(
DRIVER_GROUP,
DRIVER_BASENAME,
name=__name__,
keywords=BASE_KEYWORDS,
)
def METHOD_NAME(node=None):
from utilities.proc import which
from env import Env
if Env.sysname != "HP-UX":
return []
if which("share"):
return ["share.nfs"]
return []
class ShareNfs(Resource):
def __init__(self, path=None, opts=None, **kwargs):
Resource.__init__(self, type="share.nfs", **kwargs)
self.sharetab = "/etc/dfs/sharetab"
self.dfstab = "/etc/dfs/dfstab"
if "node.x.share" not in capabilities:
raise ex.InitError("share is not installed")
self.label = "nfs:%s" % path
self.path = path
try:
self.opts = self.parse_opts(opts)
except ex.Error as e:
raise ex.InitError(str(e))
def get_opts(self):
if not os.path.exists(self.sharetab):
return ""
with open(self.sharetab, 'r') as f:
buff = f.read()
for line in buff.split('\n'):
words = line.split()
if len(words) != 4:
continue
path = words[0]
if path != self.path:
continue
res = words[1]
fstype = words[2]
if fstype != "nfs":
continue
opts = words[3]
return self.parse_opts(opts)
return ""
def is_up(self):
self.issues = ""
opts = self.get_opts()
if len(opts) == 0:
return False
if opts != self.opts:
self.issues = "%s exported with unexpected options: %s, expected %s"%(self.path, opts, self.opts)
return False
return True
def start(self):
try:
up = self.is_up()
except ex.Error as e:
self.log.error("skip start because the share is in unknown state")
return
if up:
self.log.info("%s is already up" % self.path)
return
if "unexpected options" in self.issues:
self.log.info("reshare %s because unexpected options were detected"%self.path)
cmd = [ 'unshare', '-F', 'nfs', self.path ]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.Error(err)
self.can_rollback = True
cmd = [ 'share', '-F', 'nfs', '-o', self.opts, self.path ]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.Error(err)
def stop(self):
try:
up = self.is_up()
except ex.Error as e:
self.log.error("continue with stop even if the share is in unknown state")
if not up:
self.log.info("%s is already down" % self.path)
return 0
cmd = [ 'unshare', '-F', 'nfs', self.path ]
ret, out, err = self.vcall(cmd)
if ret != 0:
raise ex.Error
def _status(self, verbose=False):
try:
up = self.is_up()
except ex.Error as e:
self.status_log(str(e))
return core.status.WARN
if len(self.issues) > 0:
self.status_log(self.issues)
return core.status.WARN
if up:
return core.status.UP
else:
return core.status.DOWN
def parse_opts(self, opts):
o = sorted(opts.split(','))
out = []
for e in o:
if e.startswith('ro=') or e.startswith('rw=') or e.startswith('access='):
opt, clients = e.split('=')
clients = ':'.join(sorted(clients.split(':')))
if len(clients) == 0:
continue
out.append('='.join((opt, clients)))
else:
out.append(e)
return ','.join(out)
def post_provision_start(self):
pass |
298,728 | set | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# gpodder.minidb - A simple SQLite store for Python objects
# Thomas Perl, 2010-01-28
# based on: "ORM wie eine Kirchenmaus - a very poor ORM implementation
# by thp, 2009-11-29 (thp.io/about)"
# This module is also available separately at:
# http://thp.io/2010/minidb/
# For Python 2.5, we need to request the "with" statement
try:
import sqlite3.dbapi2 as sqlite
except ImportError:
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError:
raise Exception('Please install SQLite3 support.')
import threading
class Store(object):
def __init__(self, filename=':memory:'):
self.db = sqlite.connect(filename, check_same_thread=False)
self.lock = threading.RLock()
def _schema(self, class_):
return class_.__name__, list(sorted(class_.__slots__))
def METHOD_NAME(self, o, slot, value):
# Set a slot on the given object to value, doing a cast if
# necessary. The value None is special-cased and never cast.
cls = o.__class__.__slots__[slot]
if value is not None:
if isinstance(value, bytes):
value = value.decode('utf-8')
value = cls(value)
setattr(o, slot, value)
def commit(self):
with self.lock:
self.db.commit()
def close(self):
with self.lock:
self.db.isolation_level = None
self.db.execute('VACUUM')
self.db.isolation_level = ''
self.db.close()
def _register(self, class_):
with self.lock:
table, slots = self._schema(class_)
cur = self.db.execute('PRAGMA table_info(%s)' % table)
available = cur.fetchall()
if available:
available = [row[1] for row in available]
missing_slots = (s for s in slots if s not in available)
for slot in missing_slots:
self.db.execute('ALTER TABLE %s ADD COLUMN %s TEXT' % (table,
slot))
else:
self.db.execute('CREATE TABLE %s (%s)' % (table,
', '.join('%s TEXT' % s for s in slots)))
def convert(self, v):
if isinstance(v, str):
return v
elif isinstance(v, str):
# XXX: Rewrite ^^^ as "isinstance(v, bytes)" in Python 3
return v.decode('utf-8')
else:
return str(v)
def update(self, o, **kwargs):
self.remove(o)
for k, v in list(kwargs.items()):
setattr(o, k, v)
self.save(o)
def save(self, o):
if hasattr(o, '__iter__'):
klass = None
for child in o:
if klass is None:
klass = child.__class__
self._register(klass)
table, slots = self._schema(klass)
if not isinstance(child, klass):
raise ValueError('Only one type of object allowed')
used = [s for s in slots if getattr(child, s, None) is not None]
values = [self.convert(getattr(child, slot)) for slot in used]
self.db.execute('INSERT INTO %s (%s) VALUES (%s)' % (table,
', '.join(used), ', '.join('?' * len(used))), values)
return
with self.lock:
self._register(o.__class__)
table, slots = self._schema(o.__class__)
values = [self.convert(getattr(o, slot)) for slot in slots]
self.db.execute('INSERT INTO %s (%s) VALUES (%s)' % (table,
', '.join(slots), ', '.join('?' * len(slots))), values)
def delete(self, class_, **kwargs):
with self.lock:
self._register(class_)
table, slots = self._schema(class_)
sql = 'DELETE FROM %s' % (table,)
if kwargs:
sql += ' WHERE %s' % (' AND '.join('%s=?' % k for k in kwargs))
try:
self.db.execute(sql, list(kwargs.values()))
return True
except Exception as e:
return False
def remove(self, o):
if hasattr(o, '__iter__'):
for child in o:
self.remove(child)
return
with self.lock:
self._register(o.__class__)
table, slots = self._schema(o.__class__)
# Use "None" as wildcard selector in remove actions
slots = [s for s in slots if getattr(o, s, None) is not None]
values = [self.convert(getattr(o, slot)) for slot in slots]
self.db.execute('DELETE FROM %s WHERE %s' % (table,
' AND '.join('%s=?' % s for s in slots)), values)
def load(self, class_, **kwargs):
with self.lock:
self._register(class_)
table, slots = self._schema(class_)
sql = 'SELECT %s FROM %s' % (', '.join(slots), table)
if kwargs:
sql += ' WHERE %s' % (' AND '.join('%s=?' % k for k in kwargs))
try:
cur = self.db.execute(sql, list(kwargs.values()))
except Exception as e:
raise
def apply(row):
o = class_.__new__(class_)
for attr, value in zip(slots, row):
try:
self.METHOD_NAME(o, attr, value)
except ValueError as ve:
return None
return o
return [x for x in [apply(row) for row in cur] if x is not None]
def get(self, class_, **kwargs):
result = self.load(class_, **kwargs)
if result:
return result[0]
else:
return None
if __name__ == '__main__':
class Person(object):
__slots__ = {'username': str, 'id': int}
def __init__(self, username, id):
self.username = username
self.id = id
def __repr__(self):
return '<Person "%s" (%d)>' % (self.username, self.id)
m = Store()
m.save(Person('User %d' % x, x * 20) for x in range(50))
p = m.get(Person, id=200)
print(p)
m.remove(p)
p = m.get(Person, id=200)
# Remove some persons again (deletion by value!)
m.remove(Person('User %d' % x, x * 20) for x in range(40))
class Person(object):
__slots__ = {'username': str, 'id': int, 'mail': str}
def __init__(self, username, id, mail):
self.username = username
self.id = id
self.mail = mail
def __repr__(self):
return '<Person "%s" (%s)>' % (self.username, self.mail)
# A schema update takes place here
m.save(Person('User %d' % x, x * 20, 'user@home.com') for x in range(50))
print(m.load(Person)) |
298,729 | test tensor op layer | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow.compat.v2 as tf
import keras
from keras import backend
from keras.engine import base_layer_utils
from keras.testing_infra import test_combinations
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class TrackableWeightHandlerTest(test_combinations.TestCase):
def get_table_handler(self):
# Note: There is some repetition in these tests' setup. However,
# Tensorflow does not play nicely with a separate setUp() call (causing
# errors related to graph building), so we have to use a called setup
# instead of a setUp() call.
table = tf.lookup.experimental.MutableHashTable(
key_dtype=tf.string, value_dtype=tf.int32, default_value=0
)
return base_layer_utils.TrackableWeightHandler(table)
def test_get_num_tensors(self):
table_handler = self.get_table_handler()
self.assertEqual(2, table_handler.num_tensors)
def test_get_and_set_weights(self):
table_handler = self.get_table_handler()
table_data = {b"a": 1, b"b": 2, b"c": 3}
table_handler.set_weights(
[list(table_data.keys()), list(table_data.values())]
)
weights = backend.batch_get_value(table_handler.get_tensors())
weight_data = {key: value for key, value in zip(weights[0], weights[1])}
self.assertDictEqual(table_data, weight_data)
def test_get_and_set_weights_does_not_add_ops(self):
table_handler = self.get_table_handler()
table_data = {b"a": 1, b"b": 2, b"c": 3}
table_handler.set_weights(
[list(table_data.keys()), list(table_data.values())]
)
_ = backend.batch_get_value(table_handler.get_tensors())
backend.get_session().graph.finalize()
table_handler.set_weights(
[list(table_data.keys()), list(table_data.values())]
)
_ = backend.batch_get_value(table_handler.get_tensors())
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
class OpLayerTest(test_combinations.TestCase):
def METHOD_NAME(self):
int_values = keras.Input(shape=(2,), dtype=tf.int32)
float_values = tf.cast(int_values, tf.float32)
model = keras.Model(int_values, float_values)
model.compile(loss="mse")
input_data = np.array([[1, 2], [3, 4]], dtype=np.int32)
expected = [[1.0, 2.0], [3.0, 4.0]]
output = model.predict(input_data)
self.assertAllClose(expected, output)
def test_ragged_op_layer_keras_tensors(self):
int_values = keras.Input(shape=(None,), dtype=tf.int32, ragged=True)
float_values = tf.cast(int_values, tf.float32)
model = keras.Model(int_values, float_values)
model.compile(loss="mse")
input_data = tf.ragged.constant([[1, 2], [3, 4]], dtype=np.int32)
expected = [[1.0, 2.0], [3.0, 4.0]]
output = model.predict(input_data)
self.assertIsInstance(output, tf.RaggedTensor)
self.assertAllClose(expected, output)
def test_sparse_op_layer_keras_tensors(self):
int_values = keras.Input(shape=(None,), dtype=tf.int32, sparse=True)
float_values = tf.cast(int_values, tf.float32)
_ = keras.Model(int_values, float_values)
model = keras.Model(int_values, float_values)
model.compile(loss="mse")
input_data = tf.sparse.from_dense(
np.array([[1, 2], [3, 4]], dtype=np.int32)
)
expected = [[1.0, 2.0], [3.0, 4.0]]
output = model.predict(input_data)
self.assertIsInstance(output, tf.SparseTensor)
self.assertAllClose(expected, tf.sparse.to_dense(output))
if __name__ == "__main__":
tf.test.main() |
298,730 | todo service | # ~~DOAJ:Service~~
class DOAJ(object):
"""
Primary entry point to the services which back up the DOAJ Business Logic Layer.
This is, in effect, a factory for generating the services for the various areas
of the DOAJ.
To use it, request the service or services for the area that you are working with, and then
call functions on the resulting service object. For example:
applicationService = DOAJ.applicationService()
applicationService.application_2_journal(....)
"""
@classmethod
def applicationService(cls):
"""
Obtain an instance of the application service ~~->Application:Service~~
:return: ApplicationService
"""
# Note the use of delayed imports to minimise code pre-loading, and to allow services loaded
# via this factory to also use the factory to load other services.
from portality.bll.services import application
return application.ApplicationService()
@classmethod
def journalService(cls):
"""
Obtain an instance of the journal service ~~->Journal:Service~~
:return: JournalService
"""
# Note the use of delayed imports to minimise code pre-loading, and to allow services loaded
# via this factory to also use the factory to load other services.
from portality.bll.services import journal
return journal.JournalService()
@classmethod
def authorisationService(cls):
"""
Obtain an instance of the authorisation service ~~->AuthNZ:Service~~
:return: AuthorisationService
"""
# Note the use of delayed imports to minimise code pre-loading, and to allow services loaded
# via this factory to also use the factory to load other services.
from portality.bll.services import authorisation
return authorisation.AuthorisationService()
@classmethod
def queryService(cls):
"""
Obtain an instance of the query service ~~->Query:Service~~
:return: QueryService
"""
# Note the use of delayed imports to minimise code pre-loading, and to allow services loaded
# via this factory to also use the factory to load other services.
from portality.bll.services import query
return query.QueryService()
@classmethod
def articleService(cls):
"""
Obtain an instance of the article service ~~->Article:Service~~
:return: ArticleService
"""
# Note the use of delayed imports to minimise code pre-loading, and to allow services loaded
# via this factory to also use the factory to load other services.
from portality.bll.services import article
return article.ArticleService()
@classmethod
def siteService(cls):
"""
Obtain an instance of the site service ~~->Site:Service~~
:return: SiteService
"""
from portality.bll.services import site
return site.SiteService()
@classmethod
def eventsService(cls):
"""
Obtain an instance of the events service
:return: SiteService
"""
from portality.bll.services import events
return events.EventsService()
@classmethod
def notificationsService(cls):
"""
Obtain an instance of the notifications service ~~->Notifications:Service~~
:return: NotificationsService
"""
from portality.bll.services import notifications
return notifications.NotificationsService()
@classmethod
def METHOD_NAME(cls):
"""
Obtain an instance of the todo service ~~->Todo:Service~~
:return: SiteService
"""
from portality.bll.services import todo
return todo.TodoService()
@classmethod
def backgroundTaskStatusService(cls):
"""
Obtain an instance of the background_task_status service
~~->BackgroundTask:Monitoring~~
:return: BackgroundTaskStatusService
"""
from portality.bll.services import background_task_status
return background_task_status.BackgroundTaskStatusService()
@classmethod
def tourService(cls):
"""
Obtain an instance of the tour service ~~->Tour:Service~~
:return: SiteService
"""
from portality.bll.services import tour
return tour.TourService() |
298,731 | test check averaging | # -*- coding: utf-8 -*-
from pmdarima.arima import ARIMA
from pmdarima.warnings import ModelFitWarning
from pmdarima.compat.pytest import pytest_error_str
from pmdarima.pipeline import Pipeline
from pmdarima.preprocessing import FourierFeaturizer
from pmdarima.model_selection._split import RollingForecastCV, \
SlidingWindowForecastCV
from pmdarima.model_selection._validation import cross_val_score, \
_check_scoring, cross_validate, cross_val_predict, _check_averaging
from pmdarima.datasets import load_airpassengers
import pytest
import numpy as np
from unittest import mock
y = load_airpassengers()
exogenous = np.random.RandomState(1).rand(y.shape[0], 2)
@pytest.mark.parametrize('cv', [
SlidingWindowForecastCV(window_size=100, step=24, h=1),
RollingForecastCV(initial=120, step=12, h=1),
])
@pytest.mark.parametrize(
'est', [
ARIMA(order=(2, 1, 1), maxiter=2, simple_differencing=True),
ARIMA(order=(1, 1, 2),
seasonal_order=(0, 1, 1, 12),
maxiter=2,
simple_differencing=True,
suppress_warnings=True),
Pipeline([
("fourier", FourierFeaturizer(m=12)),
("arima", ARIMA(order=(2, 1, 0),
maxiter=2,
simple_differencing=True))
])
]
)
@pytest.mark.parametrize('verbose', [0, 2, 4])
@pytest.mark.parametrize('X', [None, exogenous])
def test_cv_scores(cv, est, verbose, X):
scores = cross_val_score(
est, y, X=X, scoring='mean_squared_error',
cv=cv, verbose=verbose)
assert isinstance(scores, np.ndarray)
@pytest.mark.parametrize('cv', [
SlidingWindowForecastCV(window_size=100, step=12, h=12),
RollingForecastCV(initial=120, step=12, h=12),
])
@pytest.mark.parametrize(
'est', [
ARIMA(order=(2, 1, 1), simple_differencing=True),
ARIMA(order=(1, 1, 2),
seasonal_order=(0, 1, 1, 12),
simple_differencing=True,
suppress_warnings=True),
Pipeline([
("fourier", FourierFeaturizer(m=12)),
("arima", ARIMA(order=(2, 1, 0),
maxiter=2,
simple_differencing=True))
])
]
)
@pytest.mark.parametrize('avg', ["mean", "median"])
@pytest.mark.parametrize('return_raw_predictions', [True, False])
def test_cv_predictions(cv, est, avg, return_raw_predictions):
preds = cross_val_predict(
est, y, cv=cv, verbose=4, averaging=avg,
return_raw_predictions=return_raw_predictions)
assert isinstance(preds, np.ndarray)
if return_raw_predictions:
assert preds.shape[0] == len(y)
assert preds.shape[1] == cv.horizon
else:
assert preds.ndim == 1
def test_check_scoring():
# This will work since it's a callable
scorer = (lambda true, pred: np.nan)
assert _check_scoring(scorer) is scorer
# fails for bad metric
with pytest.raises(ValueError):
_check_scoring('bad metric')
# fails for anything else
with pytest.raises(TypeError):
_check_scoring(123)
def METHOD_NAME():
# This will work since it's a callable
avg = (lambda x, axis: x)
assert _check_averaging(avg) is avg
# fails for bad method
with pytest.raises(ValueError):
_check_averaging('bad method')
# fails for anything else
with pytest.raises(TypeError):
_check_averaging(123)
def test_cross_val_predict_error():
cv = SlidingWindowForecastCV(step=24, h=1)
with pytest.raises(ValueError):
cross_val_predict(ARIMA(order=(2, 1, 0), maxiter=3), y, cv=cv)
def test_model_error_returns_nan():
with mock.patch('sklearn.base.clone', lambda x: x):
mock_model = mock.MagicMock()
def mock_fit(*args, **kwargs):
raise ValueError()
mock_model.fit = mock_fit
with pytest.warns(ModelFitWarning):
scores = cross_val_score(
mock_model, y, scoring='mean_squared_error',
cv=SlidingWindowForecastCV(window_size=100, step=24, h=1),
verbose=0)
assert np.isnan(scores).all()
# if the error_score is 'raise', we will raise
with pytest.raises(ValueError):
cross_val_score(
mock_model, y, scoring='mean_squared_error',
cv=SlidingWindowForecastCV(window_size=100, step=24, h=1),
verbose=0, error_score='raise')
def test_error_action_validation():
est = ARIMA(order=(1, 1, 2), seasonal_order=(0, 1, 1, 12))
with pytest.raises(ValueError) as ve:
cross_validate(
est, y, error_score=None, scoring='mean_squared_error',
cv=SlidingWindowForecastCV(window_size=100, step=24, h=1))
assert 'error_score should be' in pytest_error_str(ve) |
298,732 | search with sorting | import random
import uuid
class StoreApi:
context: None
def __init__(self, client, context):
self.context = context
self.client = client
self.currency_id = random.choice(self.context.sales_channel['currencies'])
self.language_id = random.choice(self.context.sales_channel['languages'])
self.token = str(uuid.uuid4()).replace('-', '')
self.switch_context({
'currencyId': self.currency_id,
'languageId': self.language_id
})
def home(self):
return self.request('/store-api/category/home', name='home')
def navigation(self, activeId = 'main-navigation'):
return self.request('/store-api/navigation/' + activeId + '/main-navigation', name = 'main-navigation')
def footer(self, activeId = 'footer-navigation'):
return self.request('/store-api/navigation/' + activeId + '/footer-navigation', name = 'footer-navigation')
def service(self, activeId = 'service-navigation'):
return self.request('/store-api/navigation/' + activeId + '/service-navigation', name = 'service-navigation')
def shipping_methods(self):
return self.request('/store-api/shipping-method', name='shipping-methods')
def payment_methods(self):
return self.request('/store-api/payment-method', name='payment-methods')
def languages(self):
return self.request('/store-api/language', name='languages')
def currencies(self):
return self.request('/store-api/currency', name='currencies')
def salutations(self):
return self.request('/store-api/salutation', name='salutations')
def countries(self):
return self.request('/store-api/country', name='countries')
def search(self):
return self.request('/store-api/search', name='search', parameters = {'search': random.choice(self.context.keywords)})
def METHOD_NAME(self):
field = random.choice(['name', 'price']);
order = random.choice(['asc', 'desc']);
return self.request('/store-api/search', name='search-sorting', parameters = {'search': random.choice(self.context.keywords), 'order': field + '-' + order})
def suggest(self):
return self.request('/store-api/search-suggest', name='suggest', parameters = {'search': random.choice(self.context.keywords)})
def cart(self):
return self.request('/store-api/checkout/cart', name='cart')
def product(self):
return self.request('/store-api/product/' + random.choice(self.context.product_ids), name='product')
def listing(self):
return self.request('/store-api/category/' + random.choice(self.context.category_ids), name='listing')
def add_product_to_cart(self):
id = random.choice(self.context.product_ids)
return self.request(
'/store-api/checkout/cart/line-item',
name='add-product-to-cart',
parameters = {
'items': [{'type': 'product', 'id': id, 'referencedId': id}]
}
)
def order(self):
return self.request('/store-api/checkout/order', name='order')
def register(self):
self.email = 'user-' + str(uuid.uuid4()).replace('-', '') + '@example.com'
response = self.request('/store-api/account/register', name='register', parameters={
'storefrontUrl': self.context.sales_channel['domain'],
'salutationId': self.context.sales_channel['salutationId'],
'firstName': 'Firstname',
'lastName': 'Lastname',
'email': self.email,
'password': 'shopware',
'acceptedDataProtection': True,
'billingAddress': {
'salutationId': self.context.sales_channel['salutationId'],
'street': 'Test street',
'zipcode': '11111',
'city': 'Test city',
'countryId': self.context.sales_channel['countryId']
}
})
self.token = response.headers['sw-context-token']
return response
def switch_context(self, parameters):
response = self.request('/store-api/context', name='context-switch', parameters=parameters, method='PATCH')
self.token = response.headers['sw-context-token']
return response
def get_headers(self):
return {
'Accept': 'application/json',
'Content-Type': 'application/json',
'sw-context-token': self.token,
'sw-access-key': self.context.sales_channel['access_key']
}
def request(self, url, name, parameters = {}, method = 'POST'):
headers = self.get_headers()
if method == 'POST':
response = self.client.post(self.context.url + url, json=parameters, headers=headers, name=name)
elif method == 'PATCH':
response = self.client.patch(self.context.url + url, json=parameters, headers=headers, name=name)
else:
response = self.client.get(self.context.url + url, headers=headers, name=name)
if response.status_code in [200, 204]:
return response
return response |
298,733 | run bind test | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running Munt-daemon with the -rpcbind and -rpcallowip options."""
import sys
from test_framework.netutil import all_interfaces, addr_to_hex, get_bind_addrs, test_ipv6_local
from test_framework.test_framework import MuntTestFramework, SkipTest
from test_framework.util import assert_equal, assert_raises_rpc_error, get_rpc_proxy, rpc_port, rpc_url
class RPCBindTest(MuntTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.bind_to_localhost_only = False
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def add_options(self, parser):
parser.add_argument("--ipv4", action='store_true', dest="run_ipv4", help="Run ipv4 tests only", default=False)
parser.add_argument("--ipv6", action='store_true', dest="run_ipv6", help="Run ipv6 tests only", default=False)
parser.add_argument("--nonloopback", action='store_true', dest="run_nonloopback", help="Run non-loopback tests only", default=False)
def METHOD_NAME(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
node_args = \
['-disablewallet', '-nolisten'] + \
['-rpcallowip='+x for x in allow_ips] + \
['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
self.nodes[0].rpchost = None
self.start_nodes([node_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if sum([self.options.run_ipv4, self.options.run_ipv6, self.options.run_nonloopback]) > 1:
raise AssertionError("Only one of --ipv4, --ipv6 and --nonloopback can be set")
self.log.info("Check for linux")
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
self.log.info("Check for ipv6")
have_ipv6 = test_ipv6_local()
if not have_ipv6 and not (self.options.run_ipv4 or self.options.run_nonloopback):
raise SkipTest("This test requires ipv6 support.")
self.log.info("Check for non-loopback interface")
self.non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
self.non_loopback_ip = ip
break
if self.non_loopback_ip is None and self.options.run_nonloopback:
raise SkipTest("This test requires a non-loopback ip address.")
self.defaultport = rpc_port(0)
if not self.options.run_nonloopback:
self._run_loopback_tests()
if not self.options.run_ipv4 and not self.options.run_ipv6:
self._run_nonloopback_tests()
def _run_loopback_tests(self):
if self.options.run_ipv4:
# check only IPv4 localhost (explicit)
self.METHOD_NAME(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', self.defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.METHOD_NAME(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.METHOD_NAME(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
else:
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.METHOD_NAME(None, '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check default with rpcallowip (IPv4 and IPv6 localhost)
self.METHOD_NAME(['127.0.0.1'], '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check only IPv6 localhost (explicit)
self.METHOD_NAME(['[::1]'], '[::1]', ['[::1]'],
[('::1', self.defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.METHOD_NAME(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
def _run_nonloopback_tests(self):
self.log.info("Using interface %s for testing" % self.non_loopback_ip)
# check only non-loopback interface
self.METHOD_NAME([self.non_loopback_ip], self.non_loopback_ip, [self.non_loopback_ip],
[(self.non_loopback_ip, self.defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([self.non_loopback_ip], self.non_loopback_ip, self.defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], self.non_loopback_ip, self.defaultport)
if __name__ == '__main__':
RPCBindTest().main() |
298,734 | filter args | # Stubs for sublime_plugin (Python 3.5)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
import sublime
from typing import Any, Optional
api_ready = ... # type: bool
application_command_classes = ... # type: Any
window_command_classes = ... # type: Any
text_command_classes = ... # type: Any
view_event_listener_classes = ... # type: Any
view_event_listeners = ... # type: Any
all_command_classes = ... # type: Any
all_callbacks = ... # type: Any
profile = ... # type: Any
def unload_module(module): ...
def unload_plugin(modulename): ...
def reload_plugin(modulename): ...
def create_application_commands(): ...
def create_window_commands(window_id): ...
def create_text_commands(view_id): ...
def on_api_ready(): ...
def is_view_event_listener_applicable(cls, view): ...
def create_view_event_listeners(classes, view): ...
def check_view_event_listeners(view): ...
def attach_view(view): ...
check_all_view_event_listeners_scheduled = ... # type: bool
def check_all_view_event_listeners(): ...
def detach_view(view): ...
def event_listeners_for_view(view): ...
def find_view_event_listener(view, cls): ...
def on_new(view_id): ...
def on_new_async(view_id): ...
def on_clone(view_id): ...
def on_clone_async(view_id): ...
class Summary:
max = ... # type: float
sum = ... # type: float
count = ... # type: int
def __init__(self) -> None: ...
def record(self, x): ...
def run_callback(event, callback, expr): ...
def run_view_listener_callback(view, name): ...
def run_async_view_listener_callback(view, name): ...
def on_load(view_id): ...
def on_load_async(view_id): ...
def on_pre_close(view_id): ...
def on_close(view_id): ...
def on_pre_save(view_id): ...
def on_pre_save_async(view_id): ...
def on_post_save(view_id): ...
def on_post_save_async(view_id): ...
def on_modified(view_id): ...
def on_modified_async(view_id): ...
def on_selection_modified(view_id): ...
def on_selection_modified_async(view_id): ...
def on_activated(view_id): ...
def on_activated_async(view_id): ...
def on_deactivated(view_id): ...
def on_deactivated_async(view_id): ...
def on_query_context(view_id, key, operator, operand, match_all): ...
def normalise_completion(c): ...
def on_query_completions(view_id, prefix, locations): ...
def on_hover(view_id, point, hover_zone): ...
def on_text_command(view_id, name, args): ...
def on_window_command(window_id, name, args): ...
def on_post_text_command(view_id, name, args): ...
def on_post_window_command(window_id, name, args): ...
class Command:
def name(self): ...
def is_enabled_(self, args): ...
def is_enabled(self) -> bool: ...
def is_visible_(self, args): ...
def is_visible(self): ...
def is_checked_(self, args): ...
def is_checked(self): ...
def description_(self, args): ...
def description(self): ...
def METHOD_NAME(self, args): ...
def want_event(self): ...
class ApplicationCommand(Command):
def run_(self, edit_token, args): ...
# def run(self): ...
class WindowCommand(Command):
window = ... # type: sublime.Window
def __init__(self, window) -> None: ...
def run_(self, edit_token, args): ...
def is_enabled(self) -> bool: ...
# def run(self, ...): ...
class TextCommand(Command):
view = ... # type: sublime.View
def __init__(self, view) -> None: ...
def run_(self, edit_token, args): ...
# def run(self, edit: sublime.Edit, ...) -> None: ...
class EventListener:
def on_activated_async(self, view: sublime.View): ...
def on_load_async(self, view: sublime.View): ...
class ViewEventListener:
@classmethod
def is_applicable(cls, settings): ...
@classmethod
def applies_to_primary_view_only(cls): ...
view = ... # type: Any
def __init__(self, view) -> None: ...
class MultizipImporter:
loaders = ... # type: Any
file_loaders = ... # type: Any
def __init__(self) -> None: ...
def find_module(self, fullname, path: Optional[Any] = ...): ...
class ZipLoader:
zippath = ... # type: Any
name = ... # type: Any
def __init__(self, zippath) -> None: ...
def has(self, fullname): ...
def load_module(self, fullname): ...
override_path = ... # type: Any
multi_importer = ... # type: Any
def update_compressed_packages(pkgs): ...
def set_override_path(path): ... |
298,735 | get date | """
Store backfill data.
Author: Jingjing Tang
Created: 2022-08-03
"""
import os
import glob
from datetime import datetime
# third party
import pandas as pd
from delphi_utils import GeoMapper
from .config import Config
gmpr = GeoMapper()
def store_backfill_file(claims_filepath, _end_date, backfill_dir):
"""
Store county level backfill data into backfill_dir.
Parameter:
claims_filepath: str
path to the aggregated claims data
_end_date: datetime
The most recent date when the raw data is received
backfill_dir: str
specified path to store backfill files.
"""
backfilldata = pd.read_csv(
claims_filepath,
usecols=Config.CLAIMS_DTYPES.keys(),
dtype=Config.CLAIMS_DTYPES,
parse_dates=[Config.CLAIMS_DATE_COL],
)
backfilldata.rename({"ServiceDate": "time_value",
"PatCountyFIPS": "fips",
"Denominator": "den",
"Covid_like": "num"},
axis=1, inplace=True)
backfilldata = gmpr.add_geocode(backfilldata, from_code="fips", new_code="state_id",
from_col="fips", new_col="state_id")
#Store one year's backfill data
_start_date = _end_date.replace(year=_end_date.year-1)
selected_columns = ['time_value', 'fips', 'state_id',
'den', 'num']
backfilldata = backfilldata.loc[(backfilldata["time_value"] >= _start_date)
& (~backfilldata["fips"].isnull()),
selected_columns]
backfilldata["lag"] = [(_end_date - x).days for x in backfilldata["time_value"]]
backfilldata["time_value"] = backfilldata.time_value.dt.strftime("%Y-%m-%d")
backfilldata["issue_date"] = datetime.strftime(_end_date, "%Y-%m-%d")
backfilldata = backfilldata.astype({
"time_value": "string",
"issue_date": "string",
"fips": "string",
"state_id": "string"
})
path = backfill_dir + \
"/claims_hosp_as_of_%s.parquet"%datetime.strftime(_end_date, "%Y%m%d")
# Store intermediate file into the backfill folder
backfilldata.to_parquet(path, index=False)
def merge_backfill_file(backfill_dir, backfill_merge_day, today,
test_mode=False, check_nd=25):
"""
Merge ~4 weeks' backfill data into one file.
Usually this function should merge 28 days' data into a new file so as to
save the reading time when running the backfill pipelines. We set a softer
threshold to allow flexibility in data delivery.
Parameters
----------
today : datetime
The most recent date when the raw data is received
backfill_dir : str
specified path to store backfill files.
backfill_merge_day: int
The day of a week that we used to merge the backfill files. e.g. 0
is Monday.
test_mode: bool
check_nd: int
The criteria of the number of unmerged files. Ideally, we want the
number to be 28, but we use a looser criteria from practical
considerations
"""
new_files = glob.glob(backfill_dir + "/claims_hosp_as_of_*")
if len(new_files) == 0: # if no any daily file is stored
return
def METHOD_NAME(file_link):
# Keep the function here consistent with the backfill path in
# function `store_backfill_file`
fn = file_link.split("/")[-1].split(".parquet")[0].split("_")[-1]
return datetime.strptime(fn, "%Y%m%d")
date_list = list(map(METHOD_NAME, new_files))
earliest_date = min(date_list)
latest_date = max(date_list)
# Check whether to merge
# Check the number of files that are not merged
if today.weekday() != backfill_merge_day or (today-earliest_date).days <= check_nd:
return
# Start to merge files
pdList = []
for fn in new_files:
df = pd.read_parquet(fn, engine='pyarrow')
pdList.append(df)
merged_file = pd.concat(pdList).sort_values(["time_value", "fips"])
path = backfill_dir + "/claims_hosp_from_%s_to_%s.parquet"%(
datetime.strftime(earliest_date, "%Y%m%d"),
datetime.strftime(latest_date, "%Y%m%d"))
merged_file.to_parquet(path, index=False)
# Delete daily files once we have the merged one.
if not test_mode:
for fn in new_files:
os.remove(fn)
return |
298,736 | get entities get timeline | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetEntitiesGetTimelineResult',
'AwaitableGetEntitiesGetTimelineResult',
'get_entities_get_timeline',
'get_entities_get_timeline_output',
]
@pulumi.output_type
class GetEntitiesGetTimelineResult:
"""
The entity timeline result operation response.
"""
def __init__(__self__, meta_data=None, value=None):
if meta_data and not isinstance(meta_data, dict):
raise TypeError("Expected argument 'meta_data' to be a dict")
pulumi.set(__self__, "meta_data", meta_data)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="metaData")
def meta_data(self) -> Optional['outputs.TimelineResultsMetadataResponse']:
"""
The metadata from the timeline operation results.
"""
return pulumi.get(self, "meta_data")
@property
@pulumi.getter
def value(self) -> Optional[Sequence[Any]]:
"""
The timeline result values.
"""
return pulumi.get(self, "value")
class AwaitableGetEntitiesGetTimelineResult(GetEntitiesGetTimelineResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEntitiesGetTimelineResult(
meta_data=self.meta_data,
value=self.value)
def METHOD_NAME(end_time: Optional[str] = None,
entity_id: Optional[str] = None,
kinds: Optional[Sequence[Union[str, 'EntityTimelineKind']]] = None,
number_of_bucket: Optional[int] = None,
resource_group_name: Optional[str] = None,
start_time: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEntitiesGetTimelineResult:
"""
Timeline for an entity.
:param str end_time: The end timeline date, so the results returned are before this date.
:param str entity_id: entity ID
:param Sequence[Union[str, 'EntityTimelineKind']] kinds: Array of timeline Item kinds.
:param int number_of_bucket: The number of bucket for timeline queries aggregation.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str start_time: The start timeline date, so the results returned are after this date.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['endTime'] = end_time
__args__['entityId'] = entity_id
__args__['kinds'] = kinds
__args__['numberOfBucket'] = number_of_bucket
__args__['resourceGroupName'] = resource_group_name
__args__['startTime'] = start_time
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230201preview:getEntitiesGetTimeline', __args__, opts=opts, typ=GetEntitiesGetTimelineResult).value
return AwaitableGetEntitiesGetTimelineResult(
meta_data=pulumi.get(__ret__, 'meta_data'),
value=pulumi.get(__ret__, 'value'))
@_utilities.lift_output_func(METHOD_NAME)
def get_entities_get_timeline_output(end_time: Optional[pulumi.Input[str]] = None,
entity_id: Optional[pulumi.Input[str]] = None,
kinds: Optional[pulumi.Input[Optional[Sequence[Union[str, 'EntityTimelineKind']]]]] = None,
number_of_bucket: Optional[pulumi.Input[Optional[int]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
start_time: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetEntitiesGetTimelineResult]:
"""
Timeline for an entity.
:param str end_time: The end timeline date, so the results returned are before this date.
:param str entity_id: entity ID
:param Sequence[Union[str, 'EntityTimelineKind']] kinds: Array of timeline Item kinds.
:param int number_of_bucket: The number of bucket for timeline queries aggregation.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str start_time: The start timeline date, so the results returned are after this date.
:param str workspace_name: The name of the workspace.
"""
... |
298,737 | test c si sdr half cpu | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import pytest
import torch
from scipy.io import wavfile
from torchmetrics.audio import ComplexScaleInvariantSignalNoiseRatio
from torchmetrics.functional.audio import complex_scale_invariant_signal_noise_ratio
from unittests import BATCH_SIZE, NUM_BATCHES
from unittests.audio import _SAMPLE_AUDIO_SPEECH, _SAMPLE_AUDIO_SPEECH_BAB_DB
from unittests.helpers import seed_all
from unittests.helpers.testers import MetricTester
seed_all(42)
Input = namedtuple("Input", ["preds", "target"])
inputs = Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, 129, 20, 2),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, 129, 20, 2),
)
@pytest.mark.parametrize(
"preds, target, ref_metric, zero_mean",
[
(inputs.preds, inputs.target, None, True),
(inputs.preds, inputs.target, None, False),
],
)
class TestComplexSISNR(MetricTester):
"""Test class for `ComplexScaleInvariantSignalNoiseRatio` metric."""
atol = 1e-2
def test_c_si_snr_differentiability(self, preds, target, ref_metric, zero_mean):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=ComplexScaleInvariantSignalNoiseRatio,
metric_functional=complex_scale_invariant_signal_noise_ratio,
metric_args={"zero_mean": zero_mean},
)
def METHOD_NAME(self, preds, target, ref_metric, zero_mean):
"""Test dtype support of the metric on CPU."""
pytest.xfail("C-SI-SDR metric does not support cpu + half precision")
def test_c_si_sdr_half_gpu(self, preds, target, ref_metric, zero_mean):
"""Test dtype support of the metric on GPU."""
pytest.xfail("C-SI-SDR metric does not support gpu + half precision")
def test_on_real_audio():
"""Test that metric works as expected on real audio signals."""
rate, ref = wavfile.read(_SAMPLE_AUDIO_SPEECH)
rate, deg = wavfile.read(_SAMPLE_AUDIO_SPEECH_BAB_DB)
ref = torch.tensor(ref, dtype=torch.float32)
deg = torch.tensor(deg, dtype=torch.float32)
ref_stft = torch.stft(ref, n_fft=256, hop_length=128, return_complex=True)
deg_stft = torch.stft(deg, n_fft=256, hop_length=128, return_complex=True)
v = complex_scale_invariant_signal_noise_ratio(deg_stft, ref_stft, zero_mean=False)
assert torch.allclose(v, torch.tensor(0.03019072115421295, dtype=v.dtype), atol=1e-4), v
v = complex_scale_invariant_signal_noise_ratio(deg_stft, ref_stft, zero_mean=True)
assert torch.allclose(v, torch.tensor(0.030391741544008255, dtype=v.dtype), atol=1e-4), v
def test_error_on_incorrect_shape(metric_class=ComplexScaleInvariantSignalNoiseRatio):
"""Test that error is raised on incorrect shapes of input."""
metric = metric_class()
with pytest.raises(
RuntimeError,
match="Predictions and targets are expected to have the shape (..., frequency, time, 2)*",
):
metric(torch.randn(100), torch.randn(50))
def test_error_on_different_shape(metric_class=ComplexScaleInvariantSignalNoiseRatio):
"""Test that error is raised on different shapes of input."""
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape*"):
metric(torch.randn(129, 100, 2), torch.randn(129, 101, 2)) |
298,738 | copy | import unittest
import shelve
import glob
from test import support
from collections.abc import MutableMapping
from test.test_dbm import dbm_iterator
def L1(s):
return s.decode("latin-1")
class byteskeydict(MutableMapping):
"Mapping that supports bytes keys"
def __init__(self):
self.d = {}
def __getitem__(self, key):
return self.d[L1(key)]
def __setitem__(self, key, value):
self.d[L1(key)] = value
def __delitem__(self, key):
del self.d[L1(key)]
def __len__(self):
return len(self.d)
def iterkeys(self):
for k in self.d.keys():
yield k.encode("latin-1")
__iter__ = iterkeys
def keys(self):
return list(self.iterkeys())
def METHOD_NAME(self):
return byteskeydict(self.d)
class TestCase(unittest.TestCase):
fn = "shelftemp.db"
def tearDown(self):
for f in glob.glob(self.fn+"*"):
support.unlink(f)
def test_close(self):
d1 = {}
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
s.close()
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
def test_ascii_file_shelf(self):
s = shelve.open(self.fn, protocol=0)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_binary_file_shelf(self):
s = shelve.open(self.fn, protocol=1)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_proto2_file_shelf(self):
s = shelve.open(self.fn, protocol=2)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_in_memory_shelf(self):
d1 = byteskeydict()
s = shelve.Shelf(d1, protocol=0)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
d2 = byteskeydict()
s = shelve.Shelf(d2, protocol=1)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
self.assertNotEqual(d1.items(), d2.items())
def test_mutable_entry(self):
d1 = byteskeydict()
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4])
s.close()
d2 = byteskeydict()
s = shelve.Shelf(d2, protocol=2, writeback=True)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4,5])
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
def test_keyencoding(self):
d = {}
key = 'Pöp'
# the default keyencoding is utf-8
shelve.Shelf(d)[key] = [1]
self.assertIn(key.encode('utf-8'), d)
# but a different one can be given
shelve.Shelf(d, keyencoding='latin-1')[key] = [1]
self.assertIn(key.encode('latin-1'), d)
# with all consequences
s = shelve.Shelf(d, keyencoding='ascii')
self.assertRaises(UnicodeEncodeError, s.__setitem__, key, [1])
def test_writeback_also_writes_immediately(self):
# Issue 5754
d = {}
key = 'key'
encodedkey = key.encode('utf-8')
s = shelve.Shelf(d, writeback=True)
s[key] = [1]
p1 = d[encodedkey] # Will give a KeyError if backing store not updated
s['key'].append(2)
s.close()
p2 = d[encodedkey]
self.assertNotEqual(p1, p2) # Write creates new object in store
def test_with(self):
d1 = {}
with shelve.Shelf(d1, protocol=2, writeback=False) as s:
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
from test import mapping_tests
class TestShelveBase(mapping_tests.BasicTestMappingProtocol):
fn = "shelftemp.db"
counter = 0
def __init__(self, *args, **kw):
self._db = []
mapping_tests.BasicTestMappingProtocol.__init__(self, *args, **kw)
type2test = shelve.Shelf
def _reference(self):
return {"key1":"value1", "key2":2, "key3":(1,2,3)}
def _empty_mapping(self):
if self._in_mem:
x= shelve.Shelf(byteskeydict(), **self._args)
else:
self.counter+=1
x= shelve.open(self.fn+str(self.counter), **self._args)
self._db.append(x)
return x
def tearDown(self):
for db in self._db:
db.close()
self._db = []
if not self._in_mem:
for f in glob.glob(self.fn+"*"):
support.unlink(f)
class TestAsciiFileShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = False
class TestBinaryFileShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = False
class TestProto2FileShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = False
class TestAsciiMemShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = True
class TestBinaryMemShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = True
class TestProto2MemShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = True
def test_main():
for module in dbm_iterator():
support.run_unittest(
TestAsciiFileShelve,
TestBinaryFileShelve,
TestProto2FileShelve,
TestAsciiMemShelve,
TestBinaryMemShelve,
TestProto2MemShelve,
TestCase
)
if __name__ == "__main__":
test_main() |
298,739 | get beta | #!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Radio Impairments Model
# Author: mettus
# Generated: Thu Aug 1 12:46:10 2013
##################################################
import math
from gnuradio import analog
from gnuradio import blocks
from gnuradio import gr
from gnuradio.filter import firdes
# Import locally
from .phase_noise_gen import phase_noise_gen
from .iqbal_gen import iqbal_gen
from .distortion_2_gen import distortion_2_gen
from .distortion_3_gen import distortion_3_gen
class impairments(gr.hier_block2):
def __init__(self,
phase_noise_mag=0,
magbal=0,
phasebal=0,
q_ofs=0,
i_ofs=0,
freq_offset=0,
gamma=0,
beta=0):
gr.hier_block2.__init__(
self, "Radio Impairments Model",
gr.io_signature(1, 1, gr.sizeof_gr_complex * 1),
gr.io_signature(1, 1, gr.sizeof_gr_complex * 1),
)
##################################################
# Parameters
##################################################
self.phase_noise_mag = phase_noise_mag
self.magbal = magbal
self.phasebal = phasebal
self.q_ofs = q_ofs
self.i_ofs = i_ofs
self.freq_offset = freq_offset
self.gamma = gamma
self.beta = beta
##################################################
# Blocks
##################################################
self.phase_noise = phase_noise_gen(10.0**(phase_noise_mag / 20.0), .01)
self.iq_imbalance = iqbal_gen(magbal, phasebal)
self.channels_distortion_3_gen_0 = distortion_3_gen(beta)
self.channels_distortion_2_gen_0 = distortion_2_gen(gamma)
self.freq_modulator = blocks.multiply_cc()
self.freq_offset_gen = analog.sig_source_c(
1.0, analog.GR_COS_WAVE, freq_offset, 1, 0)
self.freq_modulator_dcoffs = blocks.multiply_cc()
self.freq_offset_conj = blocks.conjugate_cc()
self.dc_offset = blocks.add_const_vcc((i_ofs + q_ofs * 1j, ))
##################################################
# Frequency offset
self.connect((self, 0), (self.freq_modulator, 1))
self.connect((self.freq_offset_gen, 0), (self.freq_offset_conj, 0))
self.connect((self.freq_offset_conj, 0), (self.freq_modulator, 0))
# Most distortions can be strung in a row
self.connect(
(self.freq_modulator, 0),
(self.phase_noise, 0),
(self.channels_distortion_3_gen_0, 0),
(self.channels_distortion_2_gen_0, 0),
(self.iq_imbalance, 0),
(self.dc_offset, 0),
)
# Frequency offset again
self.connect((self.freq_offset_gen, 0),
(self.freq_modulator_dcoffs, 0))
self.connect((self.dc_offset, 0), (self.freq_modulator_dcoffs, 1))
self.connect((self.freq_modulator_dcoffs, 0), (self, 0))
def get_phase_noise_mag(self):
return self.phase_noise_mag
def set_phase_noise_mag(self, phase_noise_mag):
self.phase_noise_mag = phase_noise_mag
self.phase_noise.set_noise_mag(10**(self.phase_noise_mag / 20.0))
def get_magbal(self):
return self.magbal
def set_magbal(self, magbal):
self.magbal = magbal
self.iq_imbalance.set_magnitude(self.magbal)
def get_phasebal(self):
return self.phasebal
def set_phasebal(self, phasebal):
self.phasebal = phasebal
self.iq_imbalance.set_phase(self.phasebal)
def get_q_ofs(self):
return self.q_ofs
def set_q_ofs(self, q_ofs):
self.q_ofs = q_ofs
self.dc_offset.set_k((self.i_ofs + self.q_ofs * 1j, ))
def get_i_ofs(self):
return self.i_ofs
def set_i_ofs(self, i_ofs):
"""Set inphase part of DC offset"""
self.i_ofs = i_ofs
self.dc_offset.set_k((self.i_ofs + self.q_ofs * 1j, ))
def get_freq_offset(self):
"""Return frequency offset (normalized to 1.0)"""
return self.freq_offset
def set_freq_offset(self, freq_offset):
"""Set frequency offset (normalized to 1.0)"""
self.freq_offset = freq_offset
self.freq_offset_gen.set_frequency(self.freq_offset)
def get_gamma(self):
return self.gamma
def set_gamma(self, gamma):
self.gamma = gamma
self.channels_distortion_2_gen_0.set_beta(self.gamma)
def METHOD_NAME(self):
return self.beta
def set_beta(self, beta):
self.beta = beta
self.channels_distortion_3_gen_0.set_beta(self.beta) |
298,740 | wrap it | # Copyright (c) 2022 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
import time
import math
import os
import threading
from contextlib import contextmanager
import functools
from typing import List, Callable, Any
from PyQt6.QtCore import pyqtSlot as PyQt6PyqtSlot
from UM.Logger import Logger
# A simple profiler which produces data suitable for viewing as a flame graph
# when using the Big Flame Graph plugin.
#
# An example of code which uses this profiling data is this Cura plugin:
# https://github.com/sedwards2009/cura-big-flame-graph
#
# Set the environment variable URANIUM_FLAME_PROFILER to something before
# starting the application to make the profiling code available.
def enabled() -> bool:
return "URANIUM_FLAME_PROFILER" in os.environ
record_profile = False # Flag to keep track of whether we are recording data.
# Profiling data is build up of a tree of these kinds of nodes. Each node
# has a name, start time, end time, and a list of children nodes which are
# other functions/methods which were called by this function.
class _ProfileCallNode:
def __init__(self, name, line_number, start_time, end_time, children):
self.__name = name
self.__line_number = line_number
self.__start_time = start_time
self.__end_time = end_time
self.__children = children if children is not None else [] # type: List[_ProfileCallNode]
def getStartTime(self):
return self.__start_time
def getEndTime(self):
return self.__end_time
def getDuration(self):
return self.__end_time - self.__start_time
def toJSON(self, root=False):
if root:
return """
{
"c": {
"callStats": """ + self._plainToJSON() + """,
"sampleIterval": 1,
"objectName": "Cura",
"runTime": """ + str(self.getDuration()) + """,
"totalSamples": """ + str(self.getDuration()) + """
},
"version": "0.34"
}
"""
else:
return self._plainToJSON()
def _plainToJSON(self):
return '''{
"stack": [
"''' + self.__name + '''",
"Code: ''' + self.__name + '''",
''' + str(self.__line_number) + ''',
''' + str(self.getDuration()) + '''
],
"sampleCount": '''+ str(self.getDuration()) + ''',
"children": [
''' + ",\n".join( [kid.toJSON() for kid in self.__children]) + '''
]
}
'''
child_accu_stack = [ [] ] # type: List[List[_ProfileCallNode]]
clear_profile_requested = False
record_profile_requested = False
stop_record_profile_requested = False
def getProfileData():
"""Fetch the accumulated profile data.
:return: :type{ProfileCallNode} or None if there is no data.
"""
raw_profile_calls = child_accu_stack[0]
if len(raw_profile_calls) == 0:
return None
start_time = raw_profile_calls[0].getStartTime()
end_time = raw_profile_calls[-1].getEndTime()
fill_children = _fillInProfileSpaces(start_time, end_time, raw_profile_calls)
return _ProfileCallNode("", 0, start_time, end_time, fill_children)
def clearProfileData():
"""Erase any profile data."""
global clear_profile_requested
clear_profile_requested = True
def startRecordingProfileData():
"""Start recording profile data."""
global record_profile_requested
global stop_record_profile_requested
stop_record_profile_requested = False
record_profile_requested = True
def stopRecordingProfileData():
"""Stop recording profile data."""
global stop_record_profile_requested
stop_record_profile_requested = True
def _fillInProfileSpaces(start_time, end_time, profile_call_list):
result = []
time_counter = start_time
for profile_call in profile_call_list:
if secondsToMS(profile_call.getStartTime()) != secondsToMS(time_counter):
result.append(_ProfileCallNode("", 0, time_counter, profile_call.getStartTime(), []))
result.append(profile_call)
time_counter = profile_call.getEndTime()
if secondsToMS(time_counter) != secondsToMS(end_time):
result.append(_ProfileCallNode("", 0, time_counter, end_time, []))
return result
def secondsToMS(value):
return math.floor(value *1000)
@contextmanager
def profileCall(name):
"""Profile a block of code.
Use this context manager to wrap and profile a block of code.
:param name: :type{str} The name to use to identify this code in the profile report.
"""
if enabled():
start_time = time.perf_counter()
child_accu_stack.append([])
yield
end_time = time.perf_counter()
child_values = child_accu_stack.pop()
if (end_time - start_time) > 0.001: # Filter out small durations (< 1ms)
call_stat = _ProfileCallNode(name, 0, start_time, end_time, _fillInProfileSpaces(start_time, end_time,
child_values))
child_accu_stack[-1].append(call_stat)
else:
yield
def isRecordingProfile() -> bool:
"""Return whether we are recording profiling information.
:return: :type{bool} True if we are recording.
"""
global record_profile
return record_profile and threading.main_thread() is threading.current_thread()
def updateProfileConfig():
global child_accu_stack
global record_profile
# We can only update the active profiling config when we are not deeply nested inside profiled calls.
if len(child_accu_stack) <= 1:
global clear_profile_requested
if clear_profile_requested:
clear_profile_requested = False
child_accu_stack = [[]]
global record_profile_requested
if record_profile_requested:
record_profile_requested = False
record_profile = True
Logger.log('d', 'Starting record record_profile_requested')
global stop_record_profile_requested
if stop_record_profile_requested:
stop_record_profile_requested = False
record_profile = False
Logger.log('d', 'Stopping record stop_record_profile_requested')
def profile(function):
"""Decorator which can be manually applied to methods to record profiling information."""
if enabled():
@functools.wraps(function)
def runIt(*args, ** kwargs):
if isRecordingProfile():
with profileCall(function.__qualname__):
return function(*args, ** kwargs)
else:
return function(*args, **kwargs)
return runIt
else:
return function
def pyqtSlot(*args, **kwargs) -> Callable[..., Any]:
"""Drop in replacement for PyQt6's pyqtSlot decorator which records profiling information.
See the PyQt6 documentation for information about pyqtSlot.
"""
if enabled():
def METHOD_NAME(function):
@functools.wraps(function)
def wrapped(*args2, **kwargs2):
if isRecordingProfile():
with profileCall("[SLOT] "+ function.__qualname__):
return function(*args2, **kwargs2)
else:
return function(*args2, **kwargs2)
return PyQt6PyqtSlot(*args, **kwargs)(wrapped)
return METHOD_NAME
else:
def dontWrapIt(function):
return PyQt6PyqtSlot(*args, **kwargs)(function)
return dontWrapIt |
298,741 | raw websocket | import asyncio
import logging
import time
from collections import deque
from contextlib import asynccontextmanager
from typing import Any, AsyncIterator, Deque, Dict, List, Optional, Type, Union
from uuid import uuid4
from fastapi import WebSocketDisconnect
from websockets.exceptions import ConnectionClosed
from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy
from freqtrade.rpc.api_server.ws.serializer import (HybridJSONWebSocketSerializer,
WebSocketSerializer)
from freqtrade.rpc.api_server.ws.types import WebSocketType
from freqtrade.rpc.api_server.ws_schemas import WSMessageSchemaType
logger = logging.getLogger(__name__)
class WebSocketChannel:
"""
Object to help facilitate managing a websocket connection
"""
def __init__(
self,
websocket: WebSocketType,
channel_id: Optional[str] = None,
serializer_cls: Type[WebSocketSerializer] = HybridJSONWebSocketSerializer,
send_throttle: float = 0.01
):
self.channel_id = channel_id if channel_id else uuid4().hex[:8]
self._websocket = WebSocketProxy(websocket)
# Internal event to signify a closed websocket
self._closed = asyncio.Event()
# The async tasks created for the channel
self._channel_tasks: List[asyncio.Task] = []
# Deque for average send times
self._send_times: Deque[float] = deque([], maxlen=10)
# High limit defaults to 3 to start
self._send_high_limit = 3
self._send_throttle = send_throttle
# The subscribed message types
self._subscriptions: List[str] = []
# Wrap the WebSocket in the Serializing class
self._wrapped_ws = serializer_cls(self._websocket)
def __repr__(self):
return f"WebSocketChannel({self.channel_id}, {self.remote_addr})"
@property
def METHOD_NAME(self):
return self._websocket.METHOD_NAME
@property
def remote_addr(self):
return self._websocket.remote_addr
@property
def avg_send_time(self):
return sum(self._send_times) / len(self._send_times)
def _calc_send_limit(self):
"""
Calculate the send high limit for this channel
"""
# Only update if we have enough data
if len(self._send_times) == self._send_times.maxlen:
# At least 1s or twice the average of send times, with a
# maximum of 3 seconds per message
self._send_high_limit = min(max(self.avg_send_time * 2, 1), 3)
async def send(
self,
message: Union[WSMessageSchemaType, Dict[str, Any]],
timeout: bool = False
):
"""
Send a message on the wrapped websocket. If the sending
takes too long, it will raise a TimeoutError and
disconnect the connection.
:param message: The message to send
:param timeout: Enforce send high limit, defaults to False
"""
try:
_ = time.time()
# If the send times out, it will raise
# a TimeoutError and bubble up to the
# message_endpoint to close the connection
await asyncio.wait_for(
self._wrapped_ws.send(message),
timeout=self._send_high_limit if timeout else None
)
total_time = time.time() - _
self._send_times.append(total_time)
self._calc_send_limit()
except asyncio.TimeoutError:
logger.info(f"Connection for {self} timed out, disconnecting")
raise
# Explicitly give control back to event loop as
# websockets.send does not
# Also throttles how fast we send
await asyncio.sleep(self._send_throttle)
async def recv(self):
"""
Receive a message on the wrapped websocket
"""
return await self._wrapped_ws.recv()
async def ping(self):
"""
Ping the websocket
"""
return await self._websocket.ping()
async def accept(self):
"""
Accept the underlying websocket connection,
if the connection has been closed before we can
accept, just close the channel.
"""
try:
return await self._websocket.accept()
except RuntimeError:
await self.close()
async def close(self):
"""
Close the WebSocketChannel
"""
self._closed.set()
try:
await self._websocket.close()
except RuntimeError:
pass
def is_closed(self) -> bool:
"""
Closed flag
"""
return self._closed.is_set()
def set_subscriptions(self, subscriptions: List[str] = []) -> None:
"""
Set which subscriptions this channel is subscribed to
:param subscriptions: List of subscriptions, List[str]
"""
self._subscriptions = subscriptions
def subscribed_to(self, message_type: str) -> bool:
"""
Check if this channel is subscribed to the message_type
:param message_type: The message type to check
"""
return message_type in self._subscriptions
async def run_channel_tasks(self, *tasks, **kwargs):
"""
Create and await on the channel tasks unless an exception
was raised, then cancel them all.
:params *tasks: All coros or tasks to be run concurrently
:param **kwargs: Any extra kwargs to pass to gather
"""
if not self.is_closed():
# Wrap the coros into tasks if they aren't already
self._channel_tasks = [
task if isinstance(task, asyncio.Task) else asyncio.create_task(task)
for task in tasks
]
try:
return await asyncio.gather(*self._channel_tasks, **kwargs)
except Exception:
# If an exception occurred, cancel the rest of the tasks
await self.cancel_channel_tasks()
async def cancel_channel_tasks(self):
"""
Cancel and wait on all channel tasks
"""
for task in self._channel_tasks:
task.cancel()
# Wait for tasks to finish cancelling
try:
await task
except (
asyncio.CancelledError,
asyncio.TimeoutError,
WebSocketDisconnect,
ConnectionClosed,
RuntimeError
):
pass
except Exception as e:
logger.info(f"Encountered unknown exception: {e}", exc_info=e)
self._channel_tasks = []
async def __aiter__(self):
"""
Generator for received messages
"""
# We can not catch any errors here as websocket.recv is
# the first to catch any disconnects and bubble it up
# so the connection is garbage collected right away
while not self.is_closed():
yield await self.recv()
@asynccontextmanager
async def create_channel(
websocket: WebSocketType,
**kwargs
) -> AsyncIterator[WebSocketChannel]:
"""
Context manager for safely opening and closing a WebSocketChannel
"""
channel = WebSocketChannel(websocket, **kwargs)
try:
await channel.accept()
logger.info(f"Connected to channel - {channel}")
yield channel
finally:
await channel.close()
logger.info(f"Disconnected from channel - {channel}") |
298,742 | test distr slice axis | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import mxnet as mx
import numpy as np
import pytest
import gluonts.mx.distribution.bijection as bij
from gluonts.mx.distribution import (
Beta,
Binned,
Dirichlet,
Gamma,
Laplace,
MixtureDistribution,
NegativeBinomial,
PiecewiseLinear,
Poisson,
StudentT,
TransformedDistribution,
Uniform,
)
from gluonts.mx.distribution.box_cox_transform import BoxCoxTransform
from gluonts.mx.distribution.gaussian import Gaussian
@pytest.mark.parametrize(
"slice_axis_args, expected_axis_length",
[[(0, 0, None), 3], [(0, 1, 3), 2], [(1, -1, None), 1]],
)
@pytest.mark.parametrize(
"distr",
[
Gaussian(
mu=mx.nd.random.normal(shape=(3, 4)),
sigma=mx.nd.random.uniform(shape=(3, 4)),
)
],
)
def METHOD_NAME(distr, slice_axis_args, expected_axis_length):
axis, begin, end = slice_axis_args
distr_sliced = distr.slice_axis(axis, begin, end)
assert distr_sliced.batch_shape[axis] == expected_axis_length
class SliceHelper:
def __getitem__(self, item):
return item
sh = SliceHelper()
BATCH_SHAPE = (3, 4, 5)
DISTRIBUTIONS_WITH_QUANTILE_FUNCTION = (Gaussian, Uniform, Laplace, Binned)
@pytest.mark.parametrize(
"distr",
[
TransformedDistribution(
Gaussian(
mu=mx.nd.random.uniform(shape=BATCH_SHAPE),
sigma=mx.nd.ones(shape=BATCH_SHAPE),
),
[
bij.AffineTransformation(
scale=1e-1 + mx.nd.random.uniform(shape=BATCH_SHAPE)
),
bij.softrelu,
],
),
Binned(
bin_log_probs=mx.nd.uniform(shape=BATCH_SHAPE + (23,)),
bin_centers=mx.nd.array(np.logspace(-1, 1, 23))
+ mx.nd.zeros(BATCH_SHAPE + (23,)),
),
TransformedDistribution(
Binned(
bin_log_probs=mx.nd.uniform(shape=BATCH_SHAPE + (23,)),
bin_centers=mx.nd.array(np.logspace(-1, 1, 23))
+ mx.nd.zeros(BATCH_SHAPE + (23,)),
),
[
bij.AffineTransformation(
scale=1e-1 + mx.nd.random.uniform(shape=BATCH_SHAPE)
),
bij.softrelu,
],
),
Gaussian(
mu=mx.nd.zeros(shape=BATCH_SHAPE),
sigma=mx.nd.ones(shape=BATCH_SHAPE),
),
Gamma(
alpha=mx.nd.ones(shape=BATCH_SHAPE),
beta=mx.nd.ones(shape=BATCH_SHAPE),
),
Beta(
alpha=0.5 * mx.nd.ones(shape=BATCH_SHAPE),
beta=0.5 * mx.nd.ones(shape=BATCH_SHAPE),
),
StudentT(
mu=mx.nd.zeros(shape=BATCH_SHAPE),
sigma=mx.nd.ones(shape=BATCH_SHAPE),
nu=mx.nd.ones(shape=BATCH_SHAPE),
),
Dirichlet(alpha=mx.nd.ones(shape=BATCH_SHAPE)),
Laplace(
mu=mx.nd.zeros(shape=BATCH_SHAPE), b=mx.nd.ones(shape=BATCH_SHAPE)
),
NegativeBinomial(
mu=mx.nd.zeros(shape=BATCH_SHAPE),
alpha=mx.nd.ones(shape=BATCH_SHAPE),
),
Poisson(rate=mx.nd.ones(shape=BATCH_SHAPE)),
Uniform(
low=-mx.nd.ones(shape=BATCH_SHAPE),
high=mx.nd.ones(shape=BATCH_SHAPE),
),
PiecewiseLinear(
gamma=mx.nd.ones(shape=BATCH_SHAPE),
slopes=mx.nd.ones(shape=(3, 4, 5, 10)),
knot_spacings=mx.nd.ones(shape=(3, 4, 5, 10)) / 10,
),
MixtureDistribution(
mixture_probs=mx.nd.stack(
0.2 * mx.nd.ones(shape=BATCH_SHAPE),
0.8 * mx.nd.ones(shape=BATCH_SHAPE),
axis=-1,
),
components=[
Gaussian(
mu=mx.nd.zeros(shape=BATCH_SHAPE),
sigma=mx.nd.ones(shape=BATCH_SHAPE),
),
StudentT(
mu=mx.nd.zeros(shape=BATCH_SHAPE),
sigma=mx.nd.ones(shape=BATCH_SHAPE),
nu=mx.nd.ones(shape=BATCH_SHAPE),
),
],
),
TransformedDistribution(
StudentT(
mu=mx.nd.zeros(shape=BATCH_SHAPE),
sigma=mx.nd.ones(shape=BATCH_SHAPE),
nu=mx.nd.ones(shape=BATCH_SHAPE),
),
[
bij.AffineTransformation(
scale=1e-1 + mx.nd.random.uniform(shape=BATCH_SHAPE)
)
],
),
TransformedDistribution(
Uniform(
low=mx.nd.zeros(shape=BATCH_SHAPE),
high=mx.nd.ones(shape=BATCH_SHAPE),
),
[
BoxCoxTransform(
lambda_1=mx.nd.ones(shape=BATCH_SHAPE),
lambda_2=mx.nd.zeros(shape=BATCH_SHAPE),
)
],
),
],
)
@pytest.mark.parametrize(
"slice_item", [sh[1:2], sh[1, :], sh[:, 0], sh[0, -1]]
)
def test_slice_axis_results(distr, slice_item):
s = distr.sample().asnumpy()
sliced = distr[slice_item]
s_sliced = sliced.sample().asnumpy()
assert s_sliced.shape == s[slice_item].shape
y = np.random.uniform(size=BATCH_SHAPE)
lp_expected = distr.loss(mx.nd.array(y)).asnumpy()[slice_item]
lp_actual = sliced.loss(mx.nd.array(y[slice_item])).asnumpy()
assert np.allclose(lp_actual, lp_expected)
tmp = (
distr.base_distribution
if isinstance(distr, TransformedDistribution)
else distr
)
has_quantile_fn = isinstance(tmp, DISTRIBUTIONS_WITH_QUANTILE_FUNCTION)
if has_quantile_fn:
for ql in [0.01, 0.1, 0.5, 0.9, 0.99]:
qs_actual = sliced.quantile(mx.nd.array([ql])).asnumpy()[0]
qs_expected = distr.quantile(mx.nd.array([ql])).asnumpy()[0][
slice_item
]
assert np.allclose(qs_actual, qs_expected) |
298,743 | touch | from contextlib import contextmanager
import random
import pylibmc
# project
import ddtrace
from ddtrace import config
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.constants import SPAN_KIND
from ddtrace.constants import SPAN_MEASURED_KEY
from ddtrace.contrib.pylibmc.addrs import parse_addresses
from ddtrace.ext import SpanKind
from ddtrace.ext import SpanTypes
from ddtrace.ext import db
from ddtrace.ext import memcached
from ddtrace.ext import net
from ddtrace.internal.compat import Iterable
from ddtrace.internal.constants import COMPONENT
from ddtrace.internal.logger import get_logger
from ddtrace.internal.schema import schematize_cache_operation
from ddtrace.internal.schema import schematize_service_name
from ddtrace.vendor.wrapt import ObjectProxy
# Original Client class
_Client = pylibmc.Client
log = get_logger(__name__)
class TracedClient(ObjectProxy):
"""TracedClient is a proxy for a pylibmc.Client that times it's network operations."""
def __init__(self, client=None, service=memcached.SERVICE, tracer=None, *args, **kwargs):
"""Create a traced client that wraps the given memcached client."""
# The client instance/service/tracer attributes are kept for compatibility
# with the old interface: TracedClient(client=pylibmc.Client(['localhost:11211']))
# TODO(Benjamin): Remove these in favor of patching.
if not isinstance(client, _Client):
# We are in the patched situation, just pass down all arguments to the pylibmc.Client
# Note that, in that case, client isn't a real client (just the first argument)
client = _Client(client, *args, **kwargs)
else:
log.warning(
"TracedClient instantiation is deprecated and will be remove "
"in future versions (0.6.0). Use patching instead (see the docs)."
)
super(TracedClient, self).__init__(client)
schematized_service = schematize_service_name(service)
pin = ddtrace.Pin(service=schematized_service, tracer=tracer)
pin.onto(self)
# attempt to collect the pool of urls this client talks to
try:
self._addresses = parse_addresses(client.addresses)
except Exception:
log.debug("error setting addresses", exc_info=True)
def clone(self, *args, **kwargs):
# rewrap new connections.
cloned = self.__wrapped__.clone(*args, **kwargs)
traced_client = TracedClient(cloned)
pin = ddtrace.Pin.get_from(self)
if pin:
pin.clone().onto(traced_client)
return traced_client
def get(self, *args, **kwargs):
return self._trace_cmd("get", *args, **kwargs)
def set(self, *args, **kwargs):
return self._trace_cmd("set", *args, **kwargs)
def delete(self, *args, **kwargs):
return self._trace_cmd("delete", *args, **kwargs)
def gets(self, *args, **kwargs):
return self._trace_cmd("gets", *args, **kwargs)
def METHOD_NAME(self, *args, **kwargs):
return self._trace_cmd("touch", *args, **kwargs)
def cas(self, *args, **kwargs):
return self._trace_cmd("cas", *args, **kwargs)
def incr(self, *args, **kwargs):
return self._trace_cmd("incr", *args, **kwargs)
def decr(self, *args, **kwargs):
return self._trace_cmd("decr", *args, **kwargs)
def append(self, *args, **kwargs):
return self._trace_cmd("append", *args, **kwargs)
def prepend(self, *args, **kwargs):
return self._trace_cmd("prepend", *args, **kwargs)
def get_multi(self, *args, **kwargs):
return self._trace_multi_cmd("get_multi", *args, **kwargs)
def set_multi(self, *args, **kwargs):
return self._trace_multi_cmd("set_multi", *args, **kwargs)
def delete_multi(self, *args, **kwargs):
return self._trace_multi_cmd("delete_multi", *args, **kwargs)
def _trace_cmd(self, method_name, *args, **kwargs):
"""trace the execution of the method with the given name and will
patch the first arg.
"""
method = getattr(self.__wrapped__, method_name)
with self._span(method_name) as span:
if span and args:
span.set_tag_str(memcached.QUERY, "%s %s" % (method_name, args[0]))
if method_name == "get":
result = method(*args, **kwargs)
span.set_metric(db.ROWCOUNT, 1 if result else 0)
return result
elif method_name == "gets":
result = method(*args, **kwargs)
# returns a tuple object that may be (None, None)
span.set_metric(db.ROWCOUNT, 1 if isinstance(result, Iterable) and len(result) > 0 and result[0] else 0)
return result
else:
return method(*args, **kwargs)
def _trace_multi_cmd(self, method_name, *args, **kwargs):
"""trace the execution of the multi command with the given name."""
method = getattr(self.__wrapped__, method_name)
with self._span(method_name) as span:
pre = kwargs.get("key_prefix")
if span and pre:
span.set_tag_str(memcached.QUERY, "%s %s" % (method_name, pre))
if method_name == "get_multi":
result = method(*args, **kwargs)
# returns mapping of key -> value if key exists, but does not include a missing key. Empty result = {}
span.set_metric(
db.ROWCOUNT, sum(1 for doc in result if doc) if result and isinstance(result, Iterable) else 0
)
return result
else:
return method(*args, **kwargs)
@contextmanager
def _no_span(self):
yield None
def _span(self, cmd_name):
"""Return a span timing the given command."""
pin = ddtrace.Pin.get_from(self)
if not pin or not pin.enabled():
return self._no_span()
span = pin.tracer.trace(
schematize_cache_operation("memcached.cmd", cache_provider="memcached"),
service=pin.service,
resource=cmd_name,
span_type=SpanTypes.CACHE,
)
span.set_tag_str(COMPONENT, config.pylibmc.integration_name)
span.set_tag_str(db.SYSTEM, memcached.DBMS_NAME)
# set span.kind to the type of operation being performed
span.set_tag_str(SPAN_KIND, SpanKind.CLIENT)
span.set_tag(SPAN_MEASURED_KEY)
try:
self._tag_span(span)
except Exception:
log.debug("error tagging span", exc_info=True)
return span
def _tag_span(self, span):
# FIXME[matt] the host selection is buried in c code. we can't tell what it's actually
# using, so fallback to randomly choosing one. can we do better?
if self._addresses:
_, host, port, _ = random.choice(self._addresses) # nosec
span.set_tag_str(net.TARGET_HOST, host)
span.set_tag(net.TARGET_PORT, port)
# set analytics sample rate
span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.pylibmc.get_analytics_sample_rate()) |
298,744 | get source | # pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
import contextlib
import logging
from pathlib import Path
from typing import AsyncIterator, Iterable
from uuid import uuid4
import aiodocker
import pytest
import simcore_service_agent
from aiodocker.volumes import DockerVolume
from models_library.basic_types import BootModeEnum
from models_library.services import RunID
from moto.server import ThreadedMotoServer
from pydantic import HttpUrl, parse_obj_as
from pytest import LogCaptureFixture, MonkeyPatch
from settings_library.r_clone import S3Provider
from simcore_service_agent.core.settings import ApplicationSettings
pytestmark = pytest.mark.asyncio
pytest_plugins = [
"pytest_simcore.aws_services",
"pytest_simcore.repository_paths",
]
@pytest.fixture(scope="session")
def project_slug_dir(osparc_simcore_root_dir: Path) -> Path:
# fixtures in pytest_simcore.environs
service_folder = osparc_simcore_root_dir / "services" / "agent"
assert service_folder.exists()
assert any(service_folder.glob("src/simcore_service_agent"))
return service_folder
@pytest.fixture(scope="session")
def installed_package_dir() -> Path:
dirpath = Path(simcore_service_agent.__file__).resolve().parent
assert dirpath.exists()
return dirpath
@pytest.fixture
def swarm_stack_name() -> str:
return "test-simcore"
@pytest.fixture
def study_id() -> str:
return f"{uuid4()}"
@pytest.fixture
def node_uuid() -> str:
return f"{uuid4()}"
@pytest.fixture
def run_id() -> RunID:
return RunID.create()
@pytest.fixture
def bucket() -> str:
return f"test-bucket-{uuid4()}"
@pytest.fixture
def used_volume_path(tmp_path: Path) -> Path:
return tmp_path / "used_volume"
@pytest.fixture
def unused_volume_path(tmp_path: Path) -> Path:
return tmp_path / "unused_volume"
def METHOD_NAME(run_id: str, node_uuid: str, volume_path: Path) -> str:
reversed_path = f"{volume_path}"[::-1].replace("/", "_")
return f"dyv_{run_id}_{node_uuid}_{reversed_path}"
@pytest.fixture
async def unused_volume(
swarm_stack_name: str,
study_id: str,
node_uuid: str,
run_id: RunID,
unused_volume_path: Path,
) -> AsyncIterator[DockerVolume]:
async with aiodocker.Docker() as docker_client:
source = METHOD_NAME(run_id, node_uuid, unused_volume_path)
volume = await docker_client.volumes.create(
{
"Name": source,
"Labels": {
"node_uuid": node_uuid,
"run_id": run_id,
"source": source,
"study_id": study_id,
"swarm_stack_name": swarm_stack_name,
"user_id": "1",
},
}
)
# attach to volume and create some files!!!
yield volume
with contextlib.suppress(aiodocker.DockerError):
await volume.delete()
@pytest.fixture
async def used_volume(
swarm_stack_name: str,
study_id: str,
node_uuid: str,
run_id: RunID,
used_volume_path: Path,
) -> AsyncIterator[DockerVolume]:
async with aiodocker.Docker() as docker_client:
source = METHOD_NAME(run_id, node_uuid, used_volume_path)
volume = await docker_client.volumes.create(
{
"Name": source,
"Labels": {
"node_uuid": node_uuid,
"run_id": run_id,
"source": source,
"study_id": study_id,
"swarm_stack_name": swarm_stack_name,
"user_id": "1",
},
}
)
container = await docker_client.containers.run(
config={
"Cmd": ["/bin/ash", "-c", "sleep 10000"],
"Image": "alpine:latest",
"HostConfig": {"Binds": [f"{volume.name}:{used_volume_path}"]},
},
name=f"using_volume_{volume.name}",
)
await container.start()
yield volume
await container.delete(force=True)
await volume.delete()
@pytest.fixture
def env( # noqa: PT004
monkeypatch: MonkeyPatch,
mocked_s3_server_url: HttpUrl,
bucket: str,
swarm_stack_name: str,
) -> None:
mock_dict = {
"LOGLEVEL": "DEBUG",
"SC_BOOT_MODE": BootModeEnum.DEBUG,
"AGENT_VOLUMES_CLEANUP_TARGET_SWARM_STACK_NAME": swarm_stack_name,
"AGENT_VOLUMES_CLEANUP_S3_ENDPOINT": mocked_s3_server_url,
"AGENT_VOLUMES_CLEANUP_S3_ACCESS_KEY": "xxx",
"AGENT_VOLUMES_CLEANUP_S3_SECRET_KEY": "xxx",
"AGENT_VOLUMES_CLEANUP_S3_BUCKET": bucket,
"AGENT_VOLUMES_CLEANUP_S3_PROVIDER": S3Provider.MINIO,
}
for key, value in mock_dict.items():
monkeypatch.setenv(key, value)
@pytest.fixture
def settings(env: None) -> ApplicationSettings:
return ApplicationSettings.create_from_envs()
@pytest.fixture()
def caplog_info_debug(caplog: LogCaptureFixture) -> Iterable[LogCaptureFixture]:
with caplog.at_level(logging.DEBUG):
yield caplog
@pytest.fixture(scope="module")
def mocked_s3_server_url(mocked_s3_server: ThreadedMotoServer) -> HttpUrl:
# pylint: disable=protected-access
return parse_obj_as(
HttpUrl,
f"http://{mocked_s3_server._ip_address}:{mocked_s3_server._port}", # noqa: SLF001
) |
298,745 | get editable release | # Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import os
import requests
import json
from ybops.common.exceptions import YBOpsRuntimeError
class Replicated(object):
REPLICATED_VENDOR_API = 'https://api.replicated.com/vendor/v1/app/'
"""Replicated class is used to fetch existing release information in replicated and to promote
new release."""
def __init__(self):
api_token = os.environ.get('REPLICATED_API_TOKEN')
self.app_id = os.environ.get('REPLICATED_APP_ID')
assert api_token is not None, 'Environment Variable REPLICATED_API_TOKEN not set.'
assert self.app_id is not None, 'Environment Variable REPLICATED_APP_ID not set.'
self.auth_header = {'authorization': api_token, 'content-type': 'application/json'}
self.releases = self._get_releases()
self.current_release_sequence = None
def _get_request_endpoint(self, request_type):
base_url = os.path.join(self.REPLICATED_VENDOR_API, self.app_id)
if request_type == 'LIST':
return os.path.join(base_url, 'releases', 'paged')
elif request_type == 'CREATE':
return os.path.join(base_url, 'release')
elif request_type == 'UPDATE':
return os.path.join(base_url, str(self.current_release_sequence), 'raw')
elif request_type == 'PROMOTE':
return os.path.join(base_url, str(self.current_release_sequence), 'promote')
else:
raise TypeError('Invalid request type')
def _get_releases(self):
params = {'start': 0, 'count': 1}
# Fetch the current releases and channel information
response = requests.get(self._get_request_endpoint('LIST'),
headers=self.auth_header, json=params)
response.raise_for_status()
return json.loads(response.text)['releases']
def _get_active_channel(self, channel_name='Alpha'):
active_channel = None
for release in self.releases:
active_channel = next(iter([channel for channel in release['ActiveChannels']
if channel['Name'] == channel_name]), None)
if active_channel:
break
return active_channel
def _get_tagged_channels(self, tag):
tagged_channels = []
for release in self.releases:
tagged_channels = [channel for channel in release['ActiveChannels']
if channel['ReleaseLabel'] == tag]
if tagged_channels:
break
return tagged_channels
def _get_or_create_release(self, tag):
draft_release = self.METHOD_NAME()
# If we already have a release version created and in Editable state, just use that.
if draft_release:
return draft_release
# Create a new release in replicated
params = {'name': tag, 'source': 'latest', 'sourcedata': 0}
response = requests.post(self._get_request_endpoint('CREATE'),
headers=self.auth_header, json=params)
response.raise_for_status()
return json.loads(response.text)
def METHOD_NAME(self):
return next(iter([release for release in self.releases
if release['Editable']]), None)
def publish_release(self, tag, raw_data):
release = self._get_or_create_release(tag)
self.current_release_sequence = release['Sequence']
# In case of update release we publish the yaml in the body so we need to make the put
# request with content-type text/plain
auth_header = self.auth_header.copy()
auth_header['content-type'] = 'text/plain'
response = requests.put(self._get_request_endpoint('UPDATE'),
headers=auth_header, data=raw_data)
response.raise_for_status()
def promote_release(self, tag, channel_name='Alpha', release_notes=[]):
tagged_channels = self._get_tagged_channels(tag)
active_channel = self._get_active_channel(channel_name)
if any(tagged_channels):
is_promoted = any([channel for channel in tagged_channels
if channel['Name'] == channel_name])
self.current_release_sequence = tagged_channels[0]['ReleaseSequence']
if is_promoted:
raise YBOpsRuntimeError(
'Release {} already promoted for channel {}'.format(tag, channel_name))
elif self.current_release_sequence is None:
current_release = self.METHOD_NAME()
self.current_release_sequence = current_release['Sequence']
active_channel_id = active_channel['Id']
params = {'channels': [active_channel_id], 'label': tag,
'release_notes': '\n'.join(release_notes), 'required': False}
# Promote the release for the provided channel
response = requests.post(self._get_request_endpoint('PROMOTE'),
headers=self.auth_header, json=params)
response.raise_for_status() |
298,746 | get result | import pytest
import logging
from cobaya.model import get_model
from cobaya.theory import Theory
from cobaya.likelihood import Likelihood
from cobaya.log import LoggedError, NoLogging
from cobaya.typing import InputDict
debug = False
# Aderived = 1
# Aout = [Ain]
# Bpar = 3
# Bout = (3, [Ain])
# Bderived = 10
# Cout = Bout
class A(Theory):
def get_requirements(self):
return {'Ain'}
def calculate(self, state, want_derived=True, **params_values_dict):
state['Aout'] = [self.provider.get_param('Ain')]
if want_derived:
state['derived'] = {'Aderived': 1}
def get_Aresult(self):
return self.current_state['Aout']
def get_can_provide_params(self):
return ['Aderived']
class B(Theory):
params = {'Bpar': None, 'Bderived': {'derived': True}}
def get_requirements(self):
return {'Aderived', 'Aresult'}
def calculate(self, state, want_derived=True, **params_values_dict):
state['Bout'] = (self.provider.get_param('Aderived') * params_values_dict['Bpar']
, self.provider.get_Aresult())
if want_derived:
state['derived'] = {'Bderived': 10}
def get_Bout(self):
return self.current_state['Bout']
class Balt(Theory):
params = {'Bpar': None, 'Aderived': None}
def get_requirements(self):
return {'Aresult'}
def calculate(self, state, want_derived=True, **params_values_dict):
state['Bout'] = (params_values_dict['Aderived'] * params_values_dict['Bpar']
, self.provider.get_Aresult())
def get_Bout(self):
return self.current_state['Bout']
class B2(Theory):
def get_requirements(self):
return {'Aderived', 'Aresult', 'Bpar'}
def calculate(self, state, want_derived=True, **params_values_dict):
state['Bout'] = (self.provider.get_param('Aderived') * params_values_dict['Bpar'],
self.provider.get_Aresult())
if want_derived:
state['derived'] = {'Bderived': 10}
def get_Bout(self):
return self.current_state['Bout']
class A2(Theory): # circular
def get_requirements(self):
return ('Ain', None), ('Bout', None)
def get_can_provide_params(self):
return ['Aderived', 'Aresult']
class C(Theory): # ambiguous
def get_requirements(self):
return {'Aresult'}
def calculate(self, state, want_derived=True, **params_values_dict):
state['Cout'] = (3, [5])
def get_Bout(self):
return self.current_state['Cout']
class Like(Likelihood):
def get_requirements(self):
return {'Bout'}
def calculate(self, state, want_derived=True, **params_values_dict):
res = self.provider.get_Bout()
state["logp"] = res[0] + res[1][0]
info: InputDict = {'likelihood': {'like': Like},
'params': {'Bpar': 3, 'Ain': 5},
'debug': debug}
def _test_loglike(theories):
for th in theories, theories[::-1]:
info['theory'] = dict(th)
model = get_model(info)
assert model.loglikes({})[0] == 8, "test loglike failed for %s" % th
assert model.loglikes({}, return_derived=False,
cached=False) == 8, "non-derived loglike failed for %s" % th
def test_dependencies():
theories = [('A', A), ('B', B)]
_test_loglike(theories)
_test_loglike([('A', A), ('B', B2)])
_test_loglike([('A', A), ('B', Balt)])
info['params']['Aderived'] = None
_test_loglike([('A', A), ('B', Balt)])
info['params']['Aderived'] = {'derived': True}
_test_loglike([('A', A), ('B', Balt)])
del info['params']['Aderived']
info['params']['Bderived'] = {'derived': True}
info['theory'] = dict(theories)
model = get_model(info)
assert model.loglikes({})[1] == [10], "failed"
info['params'].pop('Bderived')
with pytest.raises(LoggedError) as e, NoLogging(logging.ERROR):
_test_loglike([('A', A2), ('B', B)])
assert "Circular dependency" in str(e.value)
_test_loglike([('A', {'external': A}), ('B', B2)])
with pytest.raises(LoggedError) as e, NoLogging(logging.ERROR):
_test_loglike([('A', A), ('B', B2), ('C', C)])
assert "Bout is provided by more than one component" in str(e.value)
_test_loglike([('A', A), ('B', B2), ('C', {'external': C, 'provides': 'Bout'})])
_test_loglike([('A', A), ('B', {'external': B2, 'provides': ['Bout']}),
('C', {'external': C})])
with pytest.raises(LoggedError) as e, NoLogging(logging.ERROR):
_test_loglike([('A', A), ('B', {'external': B2, 'provides': ['Bout']}),
('C', {'external': C, 'provides': ['Bout']})])
assert "more than one component provides Bout" in str(e.value)
inf: InputDict = info.copy()
inf['params'] = info['params'].copy()
inf['params']['notused'] = [1, 10, 2, 5, 1]
inf['theory'] = dict(theories)
with pytest.raises(LoggedError) as e, NoLogging(logging.ERROR):
get_model(inf)
assert "Could not find anything to use input parameter" in str(e.value)
inf['params']['notused'] = [1, 10, 2]
with pytest.raises(LoggedError) as e, NoLogging(logging.ERROR):
get_model(inf)
assert "Parameter info length not valid" in str(e.value)
# test conditional requirements
class D(Theory):
def calculate(self, state, want_derived=True, **params_values_dict):
state['D'] = self.provider.get_Aresult()[0] * 2
def METHOD_NAME(self, result_name, **kwargs):
if result_name == 'Dresult':
return self.current_state['D']
def get_can_provide(self):
return ['Dresult']
def must_provide(self, **must_provide):
if 'Dresult' in must_provide:
return {'Aresult'}
class E(Theory):
def calculate(self, state, want_derived=True, **params_values_dict):
state['E'] = self.provider.METHOD_NAME('Dresult') * 2
def get_Eresult(self):
return self.current_state['E']
def must_provide(self, **must_provide):
if 'Eresult' in must_provide:
return {'Dresult'}
class Like2(Likelihood):
def get_requirements(self):
return {'Dresult'}
def calculate(self, state, want_derived=True, **params_values_dict):
state["logp"] = self.provider.METHOD_NAME('Dresult') * 2
class Like3(Likelihood):
def get_requirements(self):
return {'Eresult'}
def calculate(self, state, want_derived=True, **params_values_dict):
state["logp"] = self.provider.get_Eresult()
# circular
class F(Theory):
def get_Fresult(self):
pass
def must_provide(self, **must_provide):
if 'Fresult' in must_provide:
return {'LikeDerived'}
class Like4(Likelihood):
def get_requirements(self):
return {'Fresult'}
def get_LikeDerived(self):
pass
info2: InputDict = {'likelihood': {'like': Like2},
'params': {'Ain': 5},
'debug': debug, 'stop_at_error': True}
def _test_loglike2(theories):
for th in theories, theories[::-1]:
info2['theory'] = dict(th)
model = get_model(info2)
assert model.loglike()[0] == 20., "fail conditional dependency for %s" % th
def test_conditional_dependencies():
theories = [('A', A), ('D', D)]
_test_loglike2(theories)
theories = [('A', A), ('D', D), ('E', E)]
with pytest.raises(LoggedError) as e, NoLogging(logging.ERROR):
_test_loglike2(theories)
assert "seems not to depend on any parameters" in str(e.value)
info2['likelihood']['like'] = Like3
theories = [('A', A), ('D', D), ('E', E)]
_test_loglike2(theories)
info2['likelihood']['like'] = Like4
theories = [('A', A), ('E', E), ('F', F), ('D', D)]
with pytest.raises(LoggedError) as e, NoLogging(logging.ERROR):
_test_loglike2(theories)
assert "Circular dependency" in str(e.value) |
298,747 | application runner | # Copyright (c) 2015 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from spinn_utilities.log import FormatAdapter
from spinnman.messages.scp.enums import Signal
from spinnman.model.enums import ExecutableType
from spinn_front_end_common.data import FecDataView
from spinn_front_end_common.utilities.exceptions import ConfigurationException
from spinn_front_end_common.utilities.constants import (
MICRO_TO_MILLISECOND_CONVERSION)
SAFETY_FINISH_TIME = 0.1
logger = FormatAdapter(logging.getLogger(__name__))
def METHOD_NAME(runtime, time_threshold, run_until_complete):
"""
Ensures all cores are initialised correctly, ran, and completed
successfully.
:param int runtime:
:param int time_threshold:
:param bool run_until_complete:
:raises ConfigurationException:
"""
runner = _ApplicationRunner()
# pylint: disable=protected-access
runner._run(runtime, time_threshold, run_until_complete)
class _ApplicationRunner(object):
"""
Ensures all cores are initialised correctly, ran, and completed
successfully.
"""
__slots__ = ["__txrx", "__app_id"]
def __init__(self):
self.__txrx = FecDataView.get_transceiver()
self.__app_id = FecDataView.get_app_id()
# Wraps up as a PACMAN algorithm
def _run(
self, runtime, time_threshold, run_until_complete=False):
"""
:param int runtime:
:param int time_threshold:
:param bool run_until_complete:
:return: Number of synchronisation changes
:rtype: int
:raises ConfigurationException:
"""
# pylint: disable=too-many-arguments
logger.info("*** Running simulation... *** ")
# wait for all cores to be ready
self._wait_for_start()
buffer_manager = FecDataView.get_buffer_manager()
notification_interface = FecDataView.get_notification_protocol()
# set the buffer manager into a resume state, so that if it had ran
# before it'll work again
buffer_manager.resume()
# every thing is in sync0 so load the initial buffers
buffer_manager.load_initial_buffers()
# clear away any router diagnostics that have been set due to all
# loading applications
for chip in FecDataView.get_machine().chips:
self.__txrx.clear_router_diagnostic_counters(chip.x, chip.y)
# wait till external app is ready for us to start if required
notification_interface.wait_for_confirmation()
# set off the executables that are in sync state
# (sending to all is just as safe)
self._send_sync_signal()
# Send start notification to external applications
notification_interface.send_start_resume_notification()
if runtime is None and not run_until_complete:
# Do NOT stop the buffer manager at end; app is using it still
logger.info("Application is set to run forever; exiting")
else:
# Wait for the application to finish
self._run_wait(
run_until_complete, runtime, time_threshold)
# Send stop notification to external applications
notification_interface.send_stop_pause_notification()
def _run_wait(self, run_until_complete, runtime, time_threshold):
"""
:param bool run_until_complete:
:param int runtime:
:param float time_threshold:
"""
if not run_until_complete:
factor = (FecDataView.get_time_scale_factor() /
MICRO_TO_MILLISECOND_CONVERSION)
scaled_runtime = runtime * factor
time_to_wait = scaled_runtime + SAFETY_FINISH_TIME
logger.info(
"Application started; waiting {}s for it to stop",
time_to_wait)
time.sleep(time_to_wait)
self._wait_for_end(timeout=time_threshold)
else:
logger.info("Application started; waiting until finished")
self._wait_for_end()
def _wait_for_start(self, timeout=None):
"""
:param timeout:
:type timeout: float or None
"""
for ex_type, cores in FecDataView.get_executable_types().items():
self.__txrx.wait_for_cores_to_be_in_state(
cores, self.__app_id, ex_type.start_state, timeout=timeout)
def _send_sync_signal(self):
"""
Let apps that use the simulation interface or sync signals commence
running their main processing loops. This is done with a very fast
synchronisation barrier and a signal.
"""
executable_types = FecDataView.get_executable_types()
if (ExecutableType.USES_SIMULATION_INTERFACE in executable_types
or ExecutableType.SYNC in executable_types):
# locate all signals needed to set off executables
sync_signal = self._determine_simulation_sync_signals()
# fire all signals as required
self.__txrx.send_signal(self.__app_id, sync_signal)
def _wait_for_end(self, timeout=None):
"""
:param timeout:
:type timeout: float or None
"""
for ex_type, cores in FecDataView.get_executable_types().items():
self.__txrx.wait_for_cores_to_be_in_state(
cores, self.__app_id, ex_type.end_state, timeout=timeout)
def _determine_simulation_sync_signals(self):
"""
Determines the start states, and creates core subsets of the
states for further checks.
:return: the sync signal
:rtype: ~.Signal
:raises ConfigurationException:
"""
sync_signal = None
executable_types = FecDataView.get_executable_types()
if ExecutableType.USES_SIMULATION_INTERFACE in executable_types:
sync_signal = FecDataView.get_next_sync_signal()
# handle the sync states, but only send once if they work with
# the simulation interface requirement
if ExecutableType.SYNC in executable_types:
if sync_signal == Signal.SYNC1:
raise ConfigurationException(
"There can only be one SYNC signal per run. This is "
"because we cannot ensure the cores have not reached the "
"next SYNC state before we send the next SYNC. Resulting "
"in uncontrolled behaviour")
sync_signal = Signal.SYNC0
return sync_signal |
298,748 | iter content | import datetime
from _typeshed import Unused
from collections.abc import Callable, Iterator
from json import JSONDecoder
from typing import Any
from typing_extensions import Self
from urllib3 import exceptions as urllib3_exceptions, fields, filepost, util
from . import auth, cookies, exceptions, hooks, status_codes, utils
from .cookies import RequestsCookieJar
from .structures import CaseInsensitiveDict as CaseInsensitiveDict
default_hooks = hooks.default_hooks
HTTPBasicAuth = auth.HTTPBasicAuth
cookiejar_from_dict = cookies.cookiejar_from_dict
get_cookie_header = cookies.get_cookie_header
RequestField = fields.RequestField
encode_multipart_formdata = filepost.encode_multipart_formdata
parse_url = util.parse_url
DecodeError = urllib3_exceptions.DecodeError
ReadTimeoutError = urllib3_exceptions.ReadTimeoutError
ProtocolError = urllib3_exceptions.ProtocolError
LocationParseError = urllib3_exceptions.LocationParseError
HTTPError = exceptions.HTTPError
MissingSchema = exceptions.MissingSchema
InvalidURL = exceptions.InvalidURL
ChunkedEncodingError = exceptions.ChunkedEncodingError
ContentDecodingError = exceptions.ContentDecodingError
ConnectionError = exceptions.ConnectionError
StreamConsumedError = exceptions.StreamConsumedError
guess_filename = utils.guess_filename
get_auth_from_url = utils.get_auth_from_url
requote_uri = utils.requote_uri
stream_decode_response_unicode = utils.stream_decode_response_unicode
to_key_val_list = utils.to_key_val_list
parse_header_links = utils.parse_header_links
iter_slices = utils.iter_slices
guess_json_utf = utils.guess_json_utf
super_len = utils.super_len
to_native_string = utils.to_native_string
codes = status_codes.codes
REDIRECT_STATI: Any
DEFAULT_REDIRECT_LIMIT: Any
CONTENT_CHUNK_SIZE: Any
ITER_CHUNK_SIZE: Any
class RequestEncodingMixin:
@property
def path_url(self) -> str: ...
class RequestHooksMixin:
def register_hook(self, event, hook): ...
def deregister_hook(self, event, hook): ...
class Request(RequestHooksMixin):
hooks: Any
method: Any
url: Any
headers: Any
files: Any
data: Any
json: Any
params: Any
auth: Any
cookies: Any
def __init__(
self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None,
) -> None: ...
def prepare(self) -> PreparedRequest: ...
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
method: str | None
url: str | None
headers: CaseInsensitiveDict[str]
body: bytes | str | None
hooks: Any
def __init__(self) -> None: ...
def prepare(
self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None,
) -> None: ...
def copy(self) -> PreparedRequest: ...
def prepare_method(self, method) -> None: ...
def prepare_url(self, url, params) -> None: ...
def prepare_headers(self, headers) -> None: ...
def prepare_body(self, data, files, json=None) -> None: ...
def prepare_content_length(self, body) -> None: ...
def prepare_auth(self, auth, url="") -> None: ...
def prepare_cookies(self, cookies) -> None: ...
def prepare_hooks(self, hooks) -> None: ...
class Response:
__attrs__: Any
_content: bytes | None # undocumented
status_code: int
headers: CaseInsensitiveDict[str]
raw: Any
url: str
encoding: str | None
history: list[Response]
reason: str
cookies: RequestsCookieJar
elapsed: datetime.timedelta
request: PreparedRequest
def __init__(self) -> None: ...
def __bool__(self) -> bool: ...
def __nonzero__(self) -> bool: ...
def __iter__(self) -> Iterator[bytes]: ...
def __enter__(self) -> Self: ...
def __exit__(self, *args: Unused) -> None: ...
@property
def next(self) -> PreparedRequest | None: ...
@property
def ok(self) -> bool: ...
@property
def is_redirect(self) -> bool: ...
@property
def is_permanent_redirect(self) -> bool: ...
@property
def apparent_encoding(self) -> str: ...
def METHOD_NAME(self, chunk_size: int | None = 1, decode_unicode: bool = False) -> Iterator[Any]: ...
def iter_lines(
self, chunk_size: int | None = 512, decode_unicode: bool = False, delimiter: str | bytes | None = None
) -> Iterator[Any]: ...
@property
def content(self) -> bytes: ...
@property
def text(self) -> str: ...
def json(
self,
*,
cls: type[JSONDecoder] | None = ...,
object_hook: Callable[[dict[Any, Any]], Any] | None = ...,
parse_float: Callable[[str], Any] | None = ...,
parse_int: Callable[[str], Any] | None = ...,
parse_constant: Callable[[str], Any] | None = ...,
object_pairs_hook: Callable[[list[tuple[Any, Any]]], Any] | None = ...,
**kwds: Any,
) -> Any: ...
@property
def links(self) -> dict[Any, Any]: ...
def raise_for_status(self) -> None: ...
def close(self) -> None: ... |
298,749 | get all other parameters | import json
import logging
from pathlib import Path
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class BaseStore(dict):
def __init__(self):
self.update([("user_properties", {}),("session_parameters", {})])
def save(self):
raise NotImplementedError("Subclass should be using this function, but it was called through the base class instead.")
def _check_exists(self, key):
# Helper function to make sure a key exists before trying to work with values within it.
if key not in self.keys():
self[key] = {}
def _set(self, param_type, name, value):
# Helper function to set a single parameter (user or session or other).
self._check_exists(key=param_type)
self[param_type][name] = value
def _get_one(self, param_type, name):
# Helper function to get a single parameter value (user or session).
self._check_exists(key=param_type)
return self[param_type].get(name, None)
def _get_all(self, param_type=None):
# Helper function to get all user or session parameters - or the entire dictionary if not specified.
if param_type is not None:
return self[param_type]
else:
return self
# While redundant, the following make sure the distinction between session and user items is easier for the end user.
def set_user_property(self, name, value):
self._set(param_type="user_properties", name=name, value=value)
def get_user_property(self, name):
return self._get_one(param_type="user_properties", name=name)
def get_all_user_properties(self):
return self._get_all(param_type="user_properties")
def clear_user_properties(self):
self["user_properties"] = {}
def set_session_parameter(self, name, value):
self._set(param_type="session_parameters", name=name, value=value)
def get_session_parameter(self, name):
return self._get_one(param_type="session_parameters", name=name)
def get_all_session_parameters(self):
return self._get_all(param_type="session_parameters")
def clear_session_parameters(self):
self["session_parameters"] = {}
# Similar functions for other items the user wants to store that don't fit the other two categories.
def set_other_parameter(self, name, value):
self._set(param_type="other", name=name, value=value)
def get_other_parameter(self, name):
return self._get_one(param_type="other", name=name)
def METHOD_NAME(self):
return self._get_all(param_type="other")
def clear_other_parameters(self):
self["other"] = {}
class DictStore(BaseStore):
# Class for working with dictionaries that persist for the life of the class.
def __init__(self, data: dict = None):
super().__init__()
if data:
self.update(data)
def save(self):
# Give the user back what's in the dictionary so they can decide how to save it.
self._get_all()
class FileStore(BaseStore):
# Class for working with dictionaries that get saved to a JSON file.
def __init__(self, data_location: str = None):
super().__init__()
self.data_location = data_location
try:
self._load_file(data_location)
except:
logger.info(f"Failed to find file at location: {data_location}")
def _load_file(self):
# Function to get data from the object's initialized location.
# If the provided or stored data_location exists, read the file and overwrite the object's contents.
if Path(self.data_location).exists():
with open(self.data_location, "r") as json_file:
self = json.load(json_file)
# If the data_location doesn't exist, try to create a new starter JSON file at the location given.
else:
starter_dict = '{"user_properties":{}, "session_parameters":{}}'
starter_json = json.loads(starter_dict)
Path(self.data_location).touch()
with open(self.data_location, "w") as json_file:
json.dumps(starter_json, json_file)
def save(self):
# Function to save the current dictionary to a JSON file at the object's initialized location.
try:
with open(self.data_location, "w") as outfile:
json.dump(self, outfile)
except:
logger.info(f"Failed to save file at location: {self.data_location}" |
298,750 | doc type matches filter | import json
from collections import Counter
from couchdbkit.exceptions import ResourceNotFound
from corehq.apps.dump_reload.exceptions import DataExistsException
from corehq.apps.dump_reload.interface import DataLoader
from corehq.util.couch import (
IterDB,
IterDBCallback,
get_db_by_doc_type,
get_document_class_by_doc_type,
)
from corehq.util.exceptions import DocumentClassNotFound
def drop_suffix(doc_type):
if any(doc_type.endswith(suffix) for suffix in ('-Failed', '-Deleted')):
doc_type, __ = doc_type.split('-')
return doc_type
class CouchDataLoader(DataLoader):
slug = 'couch'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._dbs = {}
self._success_counter = Counter()
def load_objects(self, object_strings, force=False, dry_run=False):
for obj_string in object_strings:
doc = json.loads(obj_string)
doc_type = drop_suffix(doc['doc_type'])
if self.METHOD_NAME(doc_type):
if dry_run:
self._success_counter[doc_type] += 1
else:
db = self._get_db_for_doc_type(doc_type)
db.save(doc)
for db in self._dbs.values():
db.commit()
return self._success_counter
def METHOD_NAME(self, doc_type):
return not self.object_filter or self.object_filter.findall(doc_type)
def _get_db_for_doc_type(self, doc_type):
if doc_type not in self._dbs:
couch_db = get_db_by_doc_type(doc_type)
if couch_db is None:
raise DocumentClassNotFound('No Document class with name "{}" could be found.'.format(doc_type))
callback = LoaderCallback(self._success_counter, self.stdout)
db = IterDB(couch_db, new_edits=False, callback=callback)
db.__enter__()
self._dbs[doc_type] = db
return self._dbs[doc_type]
class LoaderCallback(IterDBCallback):
def __init__(self, _success_counter, stdout=None):
self._success_counter = _success_counter
self.stdout = stdout
def post_commit(self, operation, committed_docs, success_ids, errors):
if errors:
raise Exception("Errors loading data", errors)
success_doc_types = []
for doc in committed_docs:
doc_id = doc['_id']
doc_type = drop_suffix(doc['doc_type'])
doc_class = get_document_class_by_doc_type(doc_type)
doc_label = '{}.{}'.format(doc_class._meta.app_label, doc_type)
if doc_id in success_ids:
success_doc_types.append(doc_label)
self._success_counter.update(success_doc_types)
if self.stdout:
self.stdout.write('Loaded {} couch docs'.format(sum(self._success_counter.values())))
class ToggleLoader(DataLoader):
slug = 'toggles'
def load_objects(self, object_strings, force=False, dry_run=False):
from corehq.toggles.models import Toggle
count = 0
for toggle_json in object_strings:
if dry_run:
count += 1
continue
toggle_dict = json.loads(toggle_json)
slug = toggle_dict['slug']
try:
existing_toggle = Toggle.get(slug)
except ResourceNotFound:
Toggle.wrap(toggle_dict).save()
else:
existing_items = set(existing_toggle.enabled_users)
items_to_load = set(toggle_dict['enabled_users'])
enabled_for = existing_items | items_to_load
existing_toggle.enabled_users = list(enabled_for)
existing_toggle.save()
count += 1
self.stdout.write('Loaded {} Toggles'.format(count))
return Counter({'Toggle': count})
class DomainLoader(DataLoader):
slug = 'domain'
def load_objects(self, object_strings, force=False, dry_run=False):
from corehq.apps.domain.models import Domain
objects = list(object_strings)
assert len(objects) == 1, "Only 1 domain allowed per dump"
domain_dict = json.loads(objects[0])
domain_name = domain_dict['name']
try:
existing_domain = Domain.get_by_name(domain_name, strict=True)
except ResourceNotFound:
pass
else:
if existing_domain:
if force:
self.stderr.write('Loading data for existing domain: {}'.format(domain_name))
else:
raise DataExistsException("Domain: {}".format(domain_name))
if not dry_run:
Domain.get_db().bulk_save([domain_dict], new_edits=False)
self.stdout.write('Loaded Domain')
return Counter({'Domain': 1}) |
298,751 | find format list | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import requests
from requests import Session
from sqlalchemy import Column, String, ForeignKey, Integer
from monitorrent.db import Base, DBSession
from monitorrent.plugin_managers import register_plugin
from monitorrent.plugins import Topic
from monitorrent.plugins.trackers import WithCredentialsMixin, ExecuteWithHashChangeMixin, TrackerPluginBase, \
LoginResult
from monitorrent.utils.soup import get_soup
PLUGIN_NAME = 'anidub.com'
class AnidubCredentials(Base):
__tablename__ = "anidub_credentials"
username = Column(String, primary_key=True)
password = Column(String, primary_key=True)
dle_uid = Column(String, nullable=True)
dle_pwd = Column(String, nullable=True)
class AnidubTopic(Topic):
__tablename__ = "anidub_topics"
id = Column(Integer, ForeignKey('topics.id'), primary_key=True)
hash = Column(String, nullable=True)
format = Column(String, nullable=False)
format_list = Column(String, nullable=False)
__mapper_args__ = {
'polymorphic_identity': PLUGIN_NAME
}
class AnidubLoginFailedException(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
class AnidubTracker(object):
tracker_settings = None
_regex = re.compile(r'^http(s?)://tr\.*anidub.com/(?:.*/\d+-.*\.html|(?:index\.php)?\?newsid=\d+)$')
root_url = "https://tr.anidub.com"
def __init__(self, dle_uid=None, dle_pwd=None):
self.dle_uid = dle_uid
self.dle_pwd = dle_pwd
def setup(self, dle_uid, dle_pwd):
self.dle_uid = dle_uid
self.dle_pwd = dle_pwd
def can_parse_url(self, url):
return self._regex.match(url) is not None
def parse_url(self, url):
match = self._regex.match(url)
if match is None:
return None
r = requests.get(url, allow_redirects=False, **self.tracker_settings.get_requests_kwargs())
soup = get_soup(r.text)
title = soup.find('span', id='news-title')
if title is None:
return None
title = title.text.strip()
result = {'original_name': title}
# Format
format_list = []
flist = self.METHOD_NAME(soup)
for q in flist:
format_list.append(q.text.strip())
result['format_list'] = format_list
return result
def login(self, username, password):
s = Session()
data = {"login_name": username, "login_password": password, "login": "submit"}
login_result = s.post(self.root_url, data, **self.tracker_settings.get_requests_kwargs())
if not self._is_logged_in(login_result.text):
raise AnidubLoginFailedException(1, "Invalid login or password")
else:
dle_uid = s.cookies.get('dle_user_id')
dle_pwd = s.cookies.get('dle_password')
if not dle_uid or not dle_pwd:
raise AnidubLoginFailedException(2, "Failed to retrieve cookies")
self.dle_uid = dle_uid
self.dle_pwd = dle_pwd
def get_cookies(self):
if not self.dle_uid or not self.dle_pwd:
return False
return {'dle_user_id': self.dle_uid, 'dle_password': self.dle_pwd}
def verify(self):
cookies = self.get_cookies()
if not cookies:
return False
r = requests.get(self.root_url, cookies=cookies, **self.tracker_settings.get_requests_kwargs())
return self._is_logged_in(r.text)
def get_download_url(self, url, vformat):
cookies = self.get_cookies()
page = requests.get(url, cookies=cookies, **self.tracker_settings.get_requests_kwargs())
page_soup = get_soup(page.text)
flist = self.METHOD_NAME(page_soup)
for f in flist:
if f.text.strip() == vformat:
href = f['href'][1:]
at = page_soup.select_one('div[class="torrent"] div#'+href+' a')
return self.root_url + at['href']
return None
@staticmethod
def METHOD_NAME(soup):
return soup.select('div#tabs ul[class="lcol"] a')
def _is_logged_in(self, page):
return "/index.php?action=logout\"" in page
class AnidubPlugin(WithCredentialsMixin, ExecuteWithHashChangeMixin, TrackerPluginBase):
tracker = AnidubTracker()
topic_class = AnidubTopic
credentials_class = AnidubCredentials
topic_public_fields = ['id', 'url', 'last_update', 'display_name', 'status', 'format', 'format_list']
topic_private_fields = ['display_name', 'format', 'format_list']
topic_form = [{
'type': 'row',
'content': [{
'type': 'text',
'model': 'display_name',
'label': 'Name',
'flex': 70
}, {
'type': 'select',
'model': 'format',
'label': 'Format',
'options': [],
'flex': 30
}]
}]
def login(self):
with DBSession() as db:
cred = db.query(self.credentials_class).first()
if not cred:
return LoginResult.CredentialsNotSpecified
username = cred.username
password = cred.password
if not username or not password:
return LoginResult.CredentialsNotSpecified
try:
self.tracker.login(username, password)
with DBSession() as db:
cred = db.query(self.credentials_class).first()
cred.dle_uid = self.tracker.dle_uid
cred.dle_pwd = self.tracker.dle_pwd
return LoginResult.Ok
except AnidubLoginFailedException as e:
if e.code == 1:
return LoginResult.IncorrentLoginPassword
return LoginResult.Unknown
except Exception as e:
return LoginResult.Unknown
def verify(self):
with DBSession() as db:
cred = db.query(self.credentials_class).first()
if not cred:
return False
username = cred.username
password = cred.password
if not username or not password or not cred.dle_uid or not cred.dle_pwd:
return False
self.tracker.setup(cred.dle_uid, cred.dle_pwd)
return self.tracker.verify()
def can_parse_url(self, url):
return self.tracker.can_parse_url(url)
def parse_url(self, url):
return self.tracker.parse_url(url)
def get_topic(self, id):
result = super(AnidubPlugin, self).get_topic(id)
if result is None:
return None
# format list
self.topic_form[0]['content'][1]['options'] = result['format_list'].split(',')
return result
def prepare_add_topic(self, url):
parsed_url = self.tracker.parse_url(url)
if not parsed_url:
return None
# format list
self.topic_form[0]['content'][1]['options'] = parsed_url['format_list']
settings = {
'display_name': parsed_url['original_name'],
'format': parsed_url['format_list'][0]
}
return settings
def _set_topic_params(self, url, parsed_url, topic, params):
"""
:param url: str
:type topic: AnidubTopic
"""
super(AnidubPlugin, self)._set_topic_params(url, parsed_url, topic, params)
if parsed_url is not None:
topic.format_list = ",".join(parsed_url['format_list'])
def _prepare_request(self, topic):
url = self.tracker.get_download_url(topic.url, topic.format)
if url is None:
return None
headers = {'referer': topic.url}
cookies = self.tracker.get_cookies()
request = requests.Request('GET', url, cookies=cookies, headers=headers)
return request.prepare()
register_plugin('tracker', PLUGIN_NAME, AnidubPlugin()) |
298,752 | prop descriptions | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the color of the line enclosing each sector.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the line enclosing each sector.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for width .
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "pie.marker"
# Self properties description
# ---------------------------
@property
def METHOD_NAME(self):
return """\
color
Sets the color of the line enclosing each sector.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
width
Sets the width (in px) of the line enclosing each
sector.
widthsrc
Sets the source reference on Chart Studio Cloud for
width .
"""
def __init__(
self, arg=None, color=None, colorsrc=None, width=None, widthsrc=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.pie.marker.Line`
color
Sets the color of the line enclosing each sector.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
width
Sets the width (in px) of the line enclosing each
sector.
widthsrc
Sets the source reference on Chart Studio Cloud for
width .
Returns
-------
Line
"""
super(Line, self).__init__("line")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.pie.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.pie.marker.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.pie.marker import line as v_line
# Initialize validators
# ---------------------
self._validators["color"] = v_line.ColorValidator()
self._validators["colorsrc"] = v_line.ColorsrcValidator()
self._validators["width"] = v_line.WidthValidator()
self._validators["widthsrc"] = v_line.WidthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
_v = arg.pop("widthsrc", None)
self["widthsrc"] = widthsrc if widthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Line"] |
298,753 | create balancer | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.utils.py3 import httplib
from libcloud.utils.misc import reverse_dict
from libcloud.common.brightbox import BrightboxConnection
from libcloud.loadbalancer.base import Driver, Member, Algorithm, LoadBalancer
from libcloud.loadbalancer.types import State
API_VERSION = "1.0"
class BrightboxLBDriver(Driver):
connectionCls = BrightboxConnection
name = "Brightbox"
website = "http://www.brightbox.co.uk/"
LB_STATE_MAP = {
"creating": State.PENDING,
"active": State.RUNNING,
"deleting": State.UNKNOWN,
"deleted": State.UNKNOWN,
"failing": State.UNKNOWN,
"failed": State.UNKNOWN,
}
_VALUE_TO_ALGORITHM_MAP = {
"round-robin": Algorithm.ROUND_ROBIN,
"least-connections": Algorithm.LEAST_CONNECTIONS,
}
_ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
def list_protocols(self):
return ["tcp", "http"]
def list_balancers(self):
data = self.connection.request("/%s/load_balancers" % API_VERSION).object
return list(map(self._to_balancer, data))
def METHOD_NAME(self, name, port, protocol, algorithm, members):
response = self._post(
"/%s/load_balancers" % API_VERSION,
{
"name": name,
"nodes": list(map(self._member_to_node, members)),
"policy": self._algorithm_to_value(algorithm),
"listeners": [{"in": port, "out": port, "protocol": protocol}],
"healthcheck": {"type": protocol, "port": port},
},
)
return self._to_balancer(response.object)
def destroy_balancer(self, balancer):
response = self.connection.request(
"/{}/load_balancers/{}".format(API_VERSION, balancer.id), method="DELETE"
)
return response.status == httplib.ACCEPTED
def get_balancer(self, balancer_id):
data = self.connection.request(
"/{}/load_balancers/{}".format(API_VERSION, balancer_id)
).object
return self._to_balancer(data)
def balancer_attach_compute_node(self, balancer, node):
return self.balancer_attach_member(balancer, node)
def balancer_attach_member(self, balancer, member):
path = "/{}/load_balancers/{}/add_nodes".format(API_VERSION, balancer.id)
self._post(path, {"nodes": [self._member_to_node(member)]})
return member
def balancer_detach_member(self, balancer, member):
path = "/{}/load_balancers/{}/remove_nodes".format(API_VERSION, balancer.id)
response = self._post(path, {"nodes": [self._member_to_node(member)]})
return response.status == httplib.ACCEPTED
def balancer_list_members(self, balancer):
path = "/{}/load_balancers/{}".format(API_VERSION, balancer.id)
data = self.connection.request(path).object
def func(data):
return self._node_to_member(data, balancer)
return list(map(func, data["nodes"]))
def _post(self, path, data={}):
headers = {"Content-Type": "application/json"}
return self.connection.request(path, data=data, headers=headers, method="POST")
def _to_balancer(self, data):
return LoadBalancer(
id=data["id"],
name=data["name"],
state=self.LB_STATE_MAP.get(data["status"], State.UNKNOWN),
ip=self._public_ip(data),
port=data["listeners"][0]["in"],
driver=self.connection.driver,
)
def _member_to_node(self, member):
return {"node": member.id}
def _node_to_member(self, data, balancer):
return Member(id=data["id"], ip=None, port=None, balancer=balancer)
def _public_ip(self, data):
if len(data["cloud_ips"]) > 0:
ip = data["cloud_ips"][0]["public_ip"]
else:
ip = None
return ip |
298,754 | header parameters | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network service-endpoint policy-definition show",
)
class Show(AAZCommand):
"""Get the details of a service endpoint policy definition.
:example: Get the details of a service endpoint policy definition.
az network service-endpoint policy-definition show --name myserviceendpointpolicydefinition --policy-name mypolicy --resource-group myresourcegroup
"""
_aaz_info = {
"version": "2021-08-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/serviceendpointpolicies/{}/serviceendpointpolicydefinitions/{}", "2021-08-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the service endpoint policy definition.",
required=True,
id_part="child_name_1",
)
_args_schema.policy_name = AAZStrArg(
options=["--policy-name"],
help="Name of the service endpoint policy.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.ServiceEndpointPolicyDefinitionsGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class ServiceEndpointPolicyDefinitionsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"serviceEndpointPolicyDefinitionName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"serviceEndpointPolicyName", self.ctx.args.policy_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-08-01",
required=True,
),
}
return parameters
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.id = AAZStrType()
_schema_on_200.name = AAZStrType()
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.type = AAZStrType()
properties = cls._schema_on_200.properties
properties.description = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.service = AAZStrType()
properties.service_resources = AAZListType(
serialized_name="serviceResources",
)
service_resources = cls._schema_on_200.properties.service_resources
service_resources.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
298,755 | test int64 xdr | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# disable module docstring for tests
# pylint: disable=C0114
# disable class docstring for tests
# pylint: disable=C0115
import os
from .bigtable_emulator import BigtableEmulator
from tensorflow_io.python.ops.bigtable.bigtable_dataset_ops import BigtableClient
import tensorflow_io.python.ops.bigtable.bigtable_row_range as row_range
import tensorflow_io.python.ops.bigtable.bigtable_row_set as row_set
import tensorflow as tf
from tensorflow import test
from google.auth.credentials import AnonymousCredentials
from google.cloud.bigtable import Client
import datetime
import pytest
import sys
@pytest.mark.skipif(sys.platform == "darwin", reason="macOS fails now")
class BigtableReadTest(test.TestCase):
def check_values(self, values, table, type_name, tf_dtype):
for i, r in enumerate(
table.read_rows(
["fam1:" + type_name],
row_set=row_set.from_rows_or_ranges(row_range.infinite()),
output_type=tf_dtype,
)
):
if tf_dtype in [tf.float64, tf.float32]:
self.assertAlmostEqual(values[i].numpy(), r.numpy()[0])
else:
self.assertEqual(values[i].numpy(), r.numpy()[0])
def setUp(self):
self.emulator = BigtableEmulator()
self.data = {
"values": [i * 10 / 7 for i in range(10)],
"float": [
b"\x00\x00\x00\x00",
b"?\xb6\xdbn",
b"@6\xdbn",
b"@\x89$\x92",
b"@\xb6\xdbn",
b"@\xe4\x92I",
b"A\t$\x92",
b"A \x00\x00",
b"A6\xdbn",
b"AM\xb6\xdb",
],
"double": [
b"\x00\x00\x00\x00\x00\x00\x00\x00",
b"?\xf6\xdbm\xb6\xdbm\xb7",
b"@\x06\xdbm\xb6\xdbm\xb7",
b"@\x11$\x92I$\x92I",
b"@\x16\xdbm\xb6\xdbm\xb7",
b"@\x1c\x92I$\x92I%",
b"@!$\x92I$\x92I",
b"@$\x00\x00\x00\x00\x00\x00",
b"@&\xdbm\xb6\xdbm\xb7",
b"@)\xb6\xdbm\xb6\xdbn",
],
"int32": [
b"\x00\x00\x00\x00",
b"\x00\x00\x00\x01",
b"\x00\x00\x00\x02",
b"\x00\x00\x00\x04",
b"\x00\x00\x00\x05",
b"\x00\x00\x00\x07",
b"\x00\x00\x00\x08",
b"\x00\x00\x00\n",
b"\x00\x00\x00\x0b",
b"\x00\x00\x00\x0c",
],
"int64": [
b"\x00\x00\x00\x00\x00\x00\x00\x00",
b"\x00\x00\x00\x00\x00\x00\x00\x01",
b"\x00\x00\x00\x00\x00\x00\x00\x02",
b"\x00\x00\x00\x00\x00\x00\x00\x04",
b"\x00\x00\x00\x00\x00\x00\x00\x05",
b"\x00\x00\x00\x00\x00\x00\x00\x07",
b"\x00\x00\x00\x00\x00\x00\x00\x08",
b"\x00\x00\x00\x00\x00\x00\x00\n",
b"\x00\x00\x00\x00\x00\x00\x00\x0b",
b"\x00\x00\x00\x00\x00\x00\x00\x0c",
],
"bool": [
b"\x00",
b"\xff",
b"\xff",
b"\xff",
b"\xff",
b"\xff",
b"\xff",
b"\xff",
b"\xff",
b"\xff",
],
}
os.environ["BIGTABLE_EMULATOR_HOST"] = self.emulator.get_addr()
self.emulator.create_table(
"fake_project", "fake_instance", "test-table", ["fam1"]
)
client = Client(
project="fake_project", credentials=AnonymousCredentials(), admin=True
)
table = client.instance("fake_instance").table("test-table")
for type_name in ["float", "double", "int32", "int64", "bool"]:
rows = []
for i, value in enumerate(self.data[type_name]):
row_key = "row" + str(i).rjust(3, "0")
row = table.direct_row(row_key)
row.set_cell(
"fam1", type_name, value, timestamp=datetime.datetime.utcnow()
)
rows.append(row)
table.mutate_rows(rows)
def tearDown(self):
self.emulator.stop()
def test_float_xdr(self):
values = tf.constant(self.data["values"], dtype=tf.float32)
client = BigtableClient("fake_project", "fake_instance")
table = client.get_table("test-table")
self.check_values(values, table, "float", tf.float32)
def test_double_xdr(self):
values = tf.constant(self.data["values"], dtype=tf.float64)
client = BigtableClient("fake_project", "fake_instance")
table = client.get_table("test-table")
self.check_values(values, table, "double", tf.float64)
def METHOD_NAME(self):
values = tf.cast(tf.constant(self.data["values"]), dtype=tf.int64)
client = BigtableClient("fake_project", "fake_instance")
table = client.get_table("test-table")
self.check_values(values, table, "int64", tf.int64)
def test_int32_xdr(self):
values = tf.cast(tf.constant(self.data["values"]), dtype=tf.int32)
client = BigtableClient("fake_project", "fake_instance")
table = client.get_table("test-table")
self.check_values(values, table, "int32", tf.int32)
def test_bool_xdr(self):
values = tf.cast(tf.constant(self.data["values"]), dtype=tf.bool)
client = BigtableClient("fake_project", "fake_instance")
table = client.get_table("test-table")
self.check_values(values, table, "bool", tf.bool) |
298,756 | live data | import asyncio
import logging
from typing import Mapping, Optional
from opentrons.drivers.mag_deck import (
SimulatingDriver,
MagDeckDriver,
AbstractMagDeckDriver,
)
from opentrons.drivers.rpi_drivers.types import USBPort
from ..execution_manager import ExecutionManager
from . import update, mod_abc, types
log = logging.getLogger(__name__)
MAX_ENGAGE_HEIGHT = {
# Distance from home position.
# Measured in model-specific units (half-mm for GEN1, mm for GEN2).
"magneticModuleV1": 45,
"magneticModuleV2": 25,
}
# Measured in model-specific units (half-mm for GEN1, mm for GEN2).
# TODO(mc, 2022-06-13): the value for gen1 is off by ~1.5 mm
# The correct value is ~8.0 half-mm (4.0 mm)
# https://opentrons.atlassian.net/browse/RET-1242
OFFSET_TO_LABWARE_BOTTOM = {"magneticModuleV1": 5, "magneticModuleV2": 2.5}
def engage_height_is_in_range(model: str, height: float) -> bool:
"""Return whether or not a height would be valid to pass to `MagDeck.engage()`.
Args:
model: The model of Magnetic Module for which you want to check
the engage height.
height: A height that you would provide to `MagDeck.engage()`.
"""
return 0 <= height <= MAX_ENGAGE_HEIGHT[model]
class MagDeck(mod_abc.AbstractModule):
"""Hardware control interface for an attached Temperature Module."""
MODULE_TYPE = types.ModuleType.MAGNETIC
FIRST_GEN2_REVISION = 20
@classmethod
async def build(
cls,
port: str,
usb_port: USBPort,
execution_manager: ExecutionManager,
hw_control_loop: asyncio.AbstractEventLoop,
poll_interval_seconds: Optional[float] = None,
simulating: bool = False,
sim_model: Optional[str] = None,
) -> "MagDeck":
"""Factory function."""
driver: AbstractMagDeckDriver
if not simulating:
driver = await MagDeckDriver.create(port=port, loop=hw_control_loop)
else:
driver = SimulatingDriver(sim_model=sim_model)
mod = cls(
port=port,
usb_port=usb_port,
execution_manager=execution_manager,
hw_control_loop=hw_control_loop,
device_info=await driver.get_device_info(),
driver=driver,
)
return mod
def __init__(
self,
port: str,
usb_port: USBPort,
execution_manager: ExecutionManager,
hw_control_loop: asyncio.AbstractEventLoop,
driver: AbstractMagDeckDriver,
device_info: Mapping[str, str],
) -> None:
"""Constructor"""
super().__init__(
port=port,
usb_port=usb_port,
hw_control_loop=hw_control_loop,
execution_manager=execution_manager,
)
self._device_info = device_info
self._driver = driver
self._current_height = 0.0
async def cleanup(self) -> None:
await self._driver.disconnect()
@classmethod
def name(cls) -> str:
"""Get the module name."""
return "magdeck"
def firmware_prefix(self) -> str:
"""The prefix used for looking up firmware"""
return "magnetic-module"
def model(self) -> str:
"""Get the model."""
return self._model_from_revision(self._device_info.get("model"))
def bootloader(self) -> types.UploadFunction:
"""Get the bootloating method."""
return update.upload_via_avrdude
async def calibrate(self) -> None:
"""Calibration involves probing for top plate to get the plate height."""
await self.wait_for_is_running()
await self._driver.probe_plate()
# return if successful or not?
# TODO(mc, 2022-09-23): refactor this method to take real mm,
# hardware API should abstract away the idea of "short millimeters"
# https://opentrons.atlassian.net/browse/RET-1242
async def engage(
self,
height: Optional[float] = None,
height_from_base: Optional[float] = None,
must_be_running: bool = True,
) -> None:
"""Move the magnet to a specific height, measured from home position.
The units of position depend on the module model.
For GEN1, it's half millimeters ("short millimeters").
For GEN2, it's millimeters.
"""
if height is None:
assert height_from_base is not None, "An engage height must be specified"
height = height_from_base + OFFSET_TO_LABWARE_BOTTOM[self.model()]
if must_be_running:
await self.wait_for_is_running()
if not engage_height_is_in_range(self.model(), height):
raise ValueError(
f"Invalid engage height for {self.model()}: {height}. "
f"Must be 0 - {MAX_ENGAGE_HEIGHT[self.model()]}."
)
await self._driver.move(height)
self._current_height = await self._driver.get_mag_position()
async def deactivate(self, must_be_running: bool = True) -> None:
"""Home the magnet."""
if must_be_running:
await self.wait_for_is_running()
await self._driver.home()
await self.engage(0.0, must_be_running=must_be_running)
@property
def current_height(self) -> float:
"""Get the current height."""
return self._current_height
@property
def device_info(self) -> Mapping[str, str]:
"""
Returns: a dict
{ 'serial': 'abc123', 'model': '8675309', 'version': '9001' }
"""
return self._device_info
@property
def status(self) -> types.MagneticStatus:
if self.current_height > 0:
return types.MagneticStatus.ENGAGED
else:
return types.MagneticStatus.DISENGAGED
@property
def engaged(self) -> bool:
if self.current_height > 0:
return True
else:
return False
@property
def METHOD_NAME(self) -> types.LiveData:
return {
"status": self.status,
"data": {"engaged": self.engaged, "height": self.current_height},
}
@property
def is_simulated(self) -> bool:
return isinstance(self._driver, SimulatingDriver)
# Internal Methods
async def prep_for_update(self) -> str:
await self._driver.enter_programming_mode()
new_port = await update.find_bootloader_port()
return new_port or self.port
@staticmethod
def _model_from_revision(revision: Optional[str]) -> str:
"""Defines the revision -> model mapping"""
if not revision or "v" not in revision:
log.error(f"bad revision: {revision}")
return "magneticModuleV1"
try:
revision_num = float(revision.split("v")[-1])
except (ValueError, TypeError):
log.exception("bad revision: {revision}")
return "magneticModuleV1"
if revision_num < MagDeck.FIRST_GEN2_REVISION:
return "magneticModuleV1"
else:
return "magneticModuleV2" |
298,757 | push stream | from typing import Optional, Callable
import torch
from torch import Tensor
from torch.cuda import Stream
from dig_ext.sync import synchronize, read_async, write_async
# synchronize = torch.ops.torch_geometric_autoscale.synchronize
# read_async = torch.ops.torch_geometric_autoscale.read_async
# write_async = torch.ops.torch_geometric_autoscale.write_async
class AsyncIOPool(torch.nn.Module):
def __init__(self, pool_size: int, buffer_size: int, embedding_dim: int):
super().__init__()
self.pool_size = pool_size
self.buffer_size = buffer_size
self.embedding_dim = embedding_dim
self._device = torch.device('cpu')
self._pull_queue = []
self._push_cache = [None] * pool_size
self._push_streams = [None] * pool_size
self._pull_streams = [None] * pool_size
self._cpu_buffers = [None] * pool_size
self._cuda_buffers = [None] * pool_size
self._pull_index = -1
self._push_index = -1
def _apply(self, fn: Callable) -> None:
self._device = fn(torch.zeros(1)).device
return self
def _pull_stream(self, idx: int) -> Stream:
if self._pull_streams[idx] is None:
assert str(self._device)[:4] == 'cuda'
self._pull_streams[idx] = torch.cuda.Stream(self._device)
return self._pull_streams[idx]
def METHOD_NAME(self, idx: int) -> Stream:
if self._push_streams[idx] is None:
assert str(self._device)[:4] == 'cuda'
self._push_streams[idx] = torch.cuda.Stream(self._device)
return self._push_streams[idx]
def _cpu_buffer(self, idx: int) -> Tensor:
if self._cpu_buffers[idx] is None:
self._cpu_buffers[idx] = torch.empty(self.buffer_size,
self.embedding_dim,
pin_memory=True)
return self._cpu_buffers[idx]
def _cuda_buffer(self, idx: int) -> Tensor:
if self._cuda_buffers[idx] is None:
assert str(self._device)[:4] == 'cuda'
self._cuda_buffers[idx] = torch.empty(self.buffer_size,
self.embedding_dim,
device=self._device)
return self._cuda_buffers[idx]
@torch.no_grad()
def async_pull(self, src: Tensor, offset: Optional[Tensor],
count: Optional[Tensor], index: Tensor) -> None:
# Start pulling `src` at ([offset, count] and index positions:
self._pull_index = (self._pull_index + 1) % self.pool_size
data = (self._pull_index, src, offset, count, index)
self._pull_queue.append(data)
if len(self._pull_queue) <= self.pool_size:
self._async_pull(self._pull_index, src, offset, count, index)
@torch.no_grad()
def _async_pull(self, idx: int, src: Tensor, offset: Optional[Tensor],
count: Optional[Tensor], index: Tensor) -> None:
with torch.cuda.stream(self._pull_stream(idx)):
read_async(src, offset, count, index, self._cuda_buffer(idx),
self._cpu_buffer(idx))
@torch.no_grad()
def synchronize_pull(self) -> Tensor:
# Synchronize the next pull command:
idx = self._pull_queue[0][0]
synchronize()
torch.cuda.synchronize(self._pull_stream(idx))
return self._cuda_buffer(idx)
@torch.no_grad()
def free_pull(self) -> None:
# Free the buffer space and start pulling from remaining queue:
self._pull_queue.pop(0)
if len(self._pull_queue) >= self.pool_size:
data = self._pull_queue[self.pool_size - 1]
idx, src, offset, count, index = data
self._async_pull(idx, src, offset, count, index)
elif len(self._pull_queue) == 0:
self._pull_index = -1
@torch.no_grad()
def async_push(self, src: Tensor, offset: Tensor, count: Tensor,
dst: Tensor) -> None:
# Start pushing `src` to ([offset, count] and index positions to `dst`:
self._push_index = (self._push_index + 1) % self.pool_size
self.synchronize_push(self._push_index)
self._push_cache[self._push_index] = src
with torch.cuda.stream(self.METHOD_NAME(self._push_index)):
write_async(src, offset, count, dst)
@torch.no_grad()
def synchronize_push(self, idx: Optional[int] = None) -> None:
# Synchronize the push command of stream `idx` or all commands:
if idx is None:
for idx in range(self.pool_size):
self.synchronize_push(idx)
self._push_index = -1
else:
torch.cuda.synchronize(self.METHOD_NAME(idx))
self._push_cache[idx] = None
def forward(self, *args, **kwargs):
""""""
raise NotImplementedError
def __repr__(self):
return (f'{self.__class__.__name__}(pool_size={self.pool_size}, '
f'buffer_size={self.buffer_size}, '
f'embedding_dim={self.embedding_dim}, '
f'device={self._device})') |
298,758 | read ncells from xdmf | import argparse
import subprocess
import xml.etree.ElementTree
import os
def next_power_of_2(x):
return 1 if x == 0 else 2 ** ((x - 1).bit_length())
def generate_new_prefix(prefix, append2prefix):
if append2prefix == "":
append2prefix = "_resampled"
prefix = os.path.basename(prefix)
lsplit = prefix.split("-")
if len(lsplit) > 1:
if lsplit[-1] in ["surface", "low", "fault"]:
prefix0 = "-".join(lsplit[0:-1])
prefix_new = prefix0 + append2prefix + "-" + lsplit[-1]
else:
prefix_new = prefix + append2prefix
return prefix_new
def recreateXdmf(prefix, prefix_new, nvertex, ncells, nmem, dt, indices, lsData, tohdf5=False, prec=8, append2prefix="_resampled"):
full_prefix = prefix
prefix = os.path.basename(prefix)
prefix_new = os.path.basename(prefix_new)
# fault and surface output have 3 colums in connect
ncolConnect = 4
scell = "Tetrahedron"
lsplit = prefix.split("-")
if len(lsplit) > 1:
if lsplit[-1] in ["surface", "fault"]:
ncolConnect = 3
scell = "Triangle"
if tohdf5:
colonOrNothing = ".h5:"
DataExtension = ""
DataFormat = "HDF"
else:
colonOrNothing = ""
DataExtension = ".bin"
DataFormat = "Binary"
# create and print the new Xdmf file
header = """<?xml version="1.0" ?>
<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" []>
<Xdmf Version="2.0">
<Domain>
<Grid Name="TimeSeries" GridType="Collection" CollectionType="Temporal">"""
score = ""
for i, ii in enumerate(indices):
score = (
score
+ """ <Grid Name="step_%012d" GridType="Uniform">
<Topology TopologyType="%s" NumberOfElements="%d">
<DataItem NumberType="Int" Precision="8" Format="%s" Dimensions="%d %d">%s_cell%s/mesh0/connect%s</DataItem>
</Topology>
<Geometry name="geo" GeometryType="XYZ" NumberOfElements="%d">
<DataItem NumberType="Float" Precision="8" Format="%s" Dimensions="%d 3">%s_vertex%s/mesh0/geometry%s</DataItem>
</Geometry>
<Time Value="%f"/>\n"""
% (
ii,
scell,
ncells,
DataFormat,
ncells,
ncolConnect,
prefix_new,
colonOrNothing,
DataExtension,
nvertex,
DataFormat,
nvertex,
prefix_new,
colonOrNothing,
DataExtension,
dt * ii,
)
)
for sdata in lsData:
if sdata == "partition":
score = (
score
+ """
<Attribute Name="partition" Center="Cell">
<DataItem NumberType="Int" Precision="4" Format="%s" Dimensions="%d">%s_cell%s/mesh0/partition%s</DataItem>
</Attribute>\n"""
% (DataFormat, ncells, prefix_new, colonOrNothing, DataExtension)
)
else:
score = (
score
+ """ <Attribute Name="%s" Center="Cell">
<DataItem ItemType="HyperSlab" Dimensions="%d">
<DataItem NumberType="UInt" Precision="4" Format="XML" Dimensions="3 2">%d 0 1 1 1 %d</DataItem>
<DataItem NumberType="Float" Precision="%d" Format="%s" Dimensions="%d %d">%s_cell%s/mesh0/%s%s</DataItem>
</DataItem>
</Attribute>\n"""
% (sdata, ncells, i, ncells, prec, DataFormat, i + 1, nmem, prefix_new, colonOrNothing, sdata, DataExtension)
)
score = score + " </Grid>\n"
score = (
score
+ """ </Grid>
</Domain>
</Xdmf>"""
)
prefix0 = generate_new_prefix(full_prefix, append2prefix)
fout = open(prefix0 + ".xdmf", "w")
fout.write(header)
fout.write("\n")
fout.write(score)
fout.write("\n")
fout.close()
print("done writing " + prefix0 + ".xdmf")
def METHOD_NAME(xdmfFile):
out = subprocess.check_output(["grep connect " + xdmfFile + " | head -n1"], shell=True)
e = xml.etree.ElementTree.fromstring(out)
dimstring = e.attrib["Dimensions"].split()
return int(dimstring[0])
def ReadDtFromXdmf(xdmfFile):
out = subprocess.check_output(["grep Time " + xdmfFile + " | head -n3| tail -n1"], shell=True)
e = xml.etree.ElementTree.fromstring(out)
dimstring = e.attrib["Value"].split()
return float(dimstring[0])
def ReadNvertexFromXdmf(xdmfFile):
out = subprocess.check_output(["grep geometry " + xdmfFile + " | head -n1"], shell=True)
e = xml.etree.ElementTree.fromstring(out)
dimstring = e.attrib["Dimensions"].split()
return int(dimstring[0])
def ReadNdtNmemFromXdmf(xdmfFile):
out = subprocess.check_output(["grep DataItem " + xdmfFile + " | tail -n2 | head -n1"], shell=True)
e = xml.etree.ElementTree.fromstring(out)
dimstring = e.attrib["Dimensions"].split()
# return (int(dimstring[0])-1, int(dimstring[1]))
return (int(dimstring[0]), int(dimstring[1]))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="recreate a xdmf file")
parser.add_argument("prefix", help="prefix including -fault or -surface")
parser.add_argument("--idt", nargs="+", help="list of time step to differenciate (1st = 0); -1 = all", type=int)
parser.add_argument("--nvertex", nargs=1, metavar=("nvertex"), help="number of vertex (read if not given)", type=int)
parser.add_argument("--ncells", nargs=1, metavar=("ncells"), help="number of cells (read if not given)", type=int)
parser.add_argument("--dt", nargs=1, metavar=("dt"), help="output time step (read if not given)", type=float)
parser.add_argument("--ndt", nargs=1, metavar=("ndt"), help="number of time steps to output (read if not given)", type=int)
parser.add_argument("--Data", nargs="+", help="list of data variable to write")
args = parser.parse_args()
prefix = args.prefix
xdmfFile = prefix + ".xdmf"
### Read all parameters from the xdmf file of SeisSol (if any)
if args.ncells != None:
ncells = args.ncells[0]
nmem = next_power_of_2(ncells)
# nmem = next_power_of_2(ncells)*2
else:
ncells = METHOD_NAME(xdmfFile)
if args.dt != None:
dt = args.dt[0]
else:
dt = ReadDtFromXdmf(xdmfFile)
if args.nvertex != None:
nvertex = args.nvertex[0]
else:
nvertex = ReadNvertexFromXdmf(xdmfFile)
if args.ndt != None:
ndt = args.ndt[0]
else:
ndt, nmem = ReadNdtNmemFromXdmf(xdmfFile)
recreateXdmf(prefix, prefix, nvertex, ncells, nmem, dt, range(ndt), args.Data) |
298,759 | read file partially | import numpy as np
from sm.engine.config import SMConfig
from sm.engine.db import DB
from sm.engine.storage import get_s3_client
from sm.engine.annotation_lithops.io import deserialize
class DatasetFiles:
"""Class for accessing to imzml browser files and reading them"""
DS_SEL = 'SELECT input_path FROM dataset WHERE id = %s'
def __init__(self, ds_id):
self.ds_id = ds_id
self._db = DB()
self._sm_config = SMConfig.get_conf()
self.s3_client = get_s3_client(sm_config=self._sm_config)
self.browser_bucket = self._sm_config['imzml_browser_storage']['bucket']
self._get_bucket_and_uuid()
self.mz_index_key = f'{self.uuid}/mz_index.npy'
self.mzs_key = f'{self.uuid}/mzs.npy'
self.ints_key = f'{self.uuid}/ints.npy'
self.sp_idxs_key = f'{self.uuid}/sp_idxs.npy'
self.portable_spectrum_reader_key = f'{self.uuid}/portable_spectrum_reader.pickle'
self.find_ibd_key()
self.check_imzml_browser_files()
def _get_bucket_and_uuid(self) -> None:
res = self._db.select_one(DatasetFiles.DS_SEL, params=(self.ds_id,))
try:
self.uuid = res[0].split('/')[-1]
self.upload_bucket = res[0].split('/')[-2]
except IndexError:
raise ValueError(f'Dataset {self.ds_id} does not exist') # pylint: disable=W0707
def find_ibd_key(self) -> None:
for obj in self.s3_client.list_objects(Bucket=self.upload_bucket, Prefix=self.uuid)[
'Contents'
]:
if obj['Key'].lower().endswith('.ibd'):
self.ibd_key = obj['Key']
def check_imzml_browser_files(self):
"""Checking for the presence of all 5 files required for imzml browser"""
status = False
response = self.s3_client.list_objects(Bucket=self.browser_bucket, Prefix=self.uuid)
if response.get('Contents'):
objects = {item['Key'] for item in response['Contents']}
files = {
self.mz_index_key,
self.mzs_key,
self.ints_key,
self.sp_idxs_key,
self.portable_spectrum_reader_key,
}
if len(objects) == 5 and (objects - files) == set():
status = True
return status
def read_file(self, key: str, bucket: str = '') -> bytes:
if not bucket:
bucket = self.browser_bucket
s3_object = self.s3_client.get_object(Bucket=bucket, Key=key)
return s3_object['Body'].read()
def METHOD_NAME(
self, offset: int, bytes_to_read: int, key: str, bucket: str = ''
) -> bytes:
if not bucket:
bucket = self.browser_bucket
s3_object = self.s3_client.get_object(
Bucket=bucket,
Key=key,
Range=f'bytes={offset}-{offset + bytes_to_read - 1}',
)
return s3_object['Body'].read()
class DatasetBrowser:
def __init__(self, ds_id, mz_low, mz_high):
self.ds_id = ds_id
self.mz_low = mz_low
self.mz_high = mz_high
self.ds = DatasetFiles(ds_id)
self.mz_index = np.frombuffer(self.ds.read_file(self.ds.mz_index_key), dtype='f')
self.portable_reader = deserialize(self.ds.read_file(self.ds.portable_spectrum_reader_key))
self.coordinates = np.array(self.portable_reader.coordinates, dtype='i')[:, :2]
self.mz_peaks = self.get_mz_peaks()
def get_mz_peaks(self):
"""Return an array of records mz, int, sp_idx located between mz_low and mz_high
Based on the mz_low and mz_high values, we calculate the index of chunks
and offsets in bytes to read from the files of these chunks.
The resulting combined arrays are filtered by mz_low/mz_high and returned.
"""
# calculate the index of the lower and upper chunk
mz_low_chunk_idx, mz_high_chunk_idx = np.searchsorted(
self.mz_index, [self.mz_low, self.mz_high]
)
if mz_high_chunk_idx == 0:
return np.zeros((0, 3), dtype='f')
# previous chunk actually includes value
if mz_low_chunk_idx > 0:
mz_low_chunk_idx -= 1
chunk_size = 4 * 1024 # element in bytes, chunk record size
offset = mz_low_chunk_idx * chunk_size
bytes_to_read = (mz_high_chunk_idx - mz_low_chunk_idx + 1) * chunk_size
mz_chunks_array = np.frombuffer(
self.ds.METHOD_NAME(offset, bytes_to_read, self.ds.mzs_key),
dtype='f',
)
int_chucks_array = np.frombuffer(
self.ds.METHOD_NAME(offset, bytes_to_read, self.ds.ints_key),
dtype='f',
)
sp_idxs_chunk_array = np.frombuffer(
self.ds.METHOD_NAME(offset, bytes_to_read, self.ds.sp_idxs_key),
dtype='f',
)
peaks_chunk_array = np.stack([mz_chunks_array, int_chucks_array, sp_idxs_chunk_array]).T
index_low, index_high = np.searchsorted(
peaks_chunk_array[:, 0], [self.mz_low, self.mz_high]
)
# index_high equals to index after last valid element
mz_peaks = peaks_chunk_array[index_low:index_high]
return mz_peaks |
298,760 | newroute | # -*- coding: utf-8 -*-
"""
mslib.utils
~~~~~~~~~~~~~~
Collection of utility routines for the Mission Support System.
This file is part of MSS.
:copyright: Copyright 2008-2014 Deutsches Zentrum fuer Luft- und Raumfahrt e.V.
:copyright: Copyright 2011-2014 Marc Rautenhaus (mr)
:copyright: Copyright 2016-2023 by the MSS team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import subprocess
def subprocess_startupinfo():
"""
config options to hide windows terminals on subprocess call
"""
startupinfo = None
if os.name == 'nt':
# thx to https://gist.github.com/nitely/3862493
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
return startupinfo
class FatalUserError(Exception):
def __init__(self, error_string):
logging.debug("%s", error_string)
def setup_logging(args):
logger = logging.getLogger()
# this is necessary as "someone" has already initialized logging, preventing basicConfig from doing stuff
for ch in logger.handlers:
logger.removeHandler(ch)
debug_formatter = logging.Formatter("%(asctime)s (%(module)s.%(funcName)s:%(lineno)s): %(levelname)s: %(message)s")
default_formatter = logging.Formatter("%(levelname)s: %(message)s")
# Console handler (suppress DEBUG by default)
ch = logging.StreamHandler()
if args.debug:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
ch.setFormatter(debug_formatter)
else:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
ch.setFormatter(default_formatter)
logger.addHandler(ch)
# File handler (always on DEBUG level)
# TODO: Change this to write to a rotating log handler (so that the file size
# is kept constant). (mr, 2011-02-25)
if args.logfile:
logfile = args.logfile
try:
fh = logging.FileHandler(logfile, "w")
except (OSError, IOError) as ex:
logger.error("Could not open logfile '%s': %s %s", logfile, type(ex), ex)
else:
logger.setLevel(logging.DEBUG)
fh.setLevel(logging.DEBUG)
fh.setFormatter(debug_formatter)
logger.addHandler(fh)
# modified Version from minidom, https://github.com/python/cpython/blob/2.7/Lib/xml/dom/minidom.py
# MSS needed to change all writings as unicode not str
from xml.dom.minidom import _write_data, Node
# Copyright © 2001-2018 Python Software Foundation. All rights reserved.
# Copyright © 2000 BeOpen.com. All rights reserved.
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent + "<" + self.tagName)
attrs = self._get_attributes()
for a_name in sorted(attrs.keys()):
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and self.childNodes[0].nodeType == Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s" % (newl))
def conditional_decorator(dec, condition):
def decorator(func):
if not condition:
# Return the function unchanged, not decorated.
return func
return dec(func)
return decorator
def prefix_route(route_function, prefix='', mask='{0}{1}'):
'''
https://stackoverflow.com/questions/18967441/add-a-prefix-to-all-flask-routes/18969161#18969161
Defines a new route function with a prefix.
The mask argument is a `format string` formatted with, in that order:
prefix, route
'''
def METHOD_NAME(route, *args, **kwargs):
''' prefix route '''
return route_function(mask.format(prefix, route), *args, **kwargs)
return METHOD_NAME |
298,761 | poll wait for socket | import errno
import select
import sys
from functools import partial
try:
from time import monotonic
except ImportError:
from time import time as monotonic
__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"]
class NoWayToWaitForSocketError(Exception):
pass
# How should we wait on sockets?
#
# There are two types of APIs you can use for waiting on sockets: the fancy
# modern stateful APIs like epoll/kqueue, and the older stateless APIs like
# select/poll. The stateful APIs are more efficient when you have a lots of
# sockets to keep track of, because you can set them up once and then use them
# lots of times. But we only ever want to wait on a single socket at a time
# and don't want to keep track of state, so the stateless APIs are actually
# more efficient. So we want to use select() or poll().
#
# Now, how do we choose between select() and poll()? On traditional Unixes,
# select() has a strange calling convention that makes it slow, or fail
# altogether, for high-numbered file descriptors. The point of poll() is to fix
# that, so on Unixes, we prefer poll().
#
# On Windows, there is no poll() (or at least Python doesn't provide a wrapper
# for it), but that's OK, because on Windows, select() doesn't have this
# strange calling convention; plain select() works fine.
#
# So: on Windows we use select(), and everywhere else we use poll(). We also
# fall back to select() in case poll() is somehow broken or missing.
if sys.version_info >= (3, 5):
# Modern Python, that retries syscalls by default
def _retry_on_intr(fn, timeout):
return fn(timeout)
else:
# Old and broken Pythons.
def _retry_on_intr(fn, timeout):
if timeout is None:
deadline = float("inf")
else:
deadline = monotonic() + timeout
while True:
try:
return fn(timeout)
# OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
except (OSError, select.error) as e:
# 'e.args[0]' incantation works for both OSError and select.error
if e.args[0] != errno.EINTR:
raise
else:
timeout = deadline - monotonic()
if timeout < 0:
timeout = 0
if timeout == float("inf"):
timeout = None
continue
def select_wait_for_socket(sock, read=False, write=False, timeout=None):
if not read and not write:
raise RuntimeError("must specify at least one of read=True, write=True")
rcheck = []
wcheck = []
if read:
rcheck.append(sock)
if write:
wcheck.append(sock)
# When doing a non-blocking connect, most systems signal success by
# marking the socket writable. Windows, though, signals success by marked
# it as "exceptional". We paper over the difference by checking the write
# sockets for both conditions. (The stdlib selectors module does the same
# thing.)
fn = partial(select.select, rcheck, wcheck, wcheck)
rready, wready, xready = _retry_on_intr(fn, timeout)
return bool(rready or wready or xready)
def METHOD_NAME(sock, read=False, write=False, timeout=None):
if not read and not write:
raise RuntimeError("must specify at least one of read=True, write=True")
mask = 0
if read:
mask |= select.POLLIN
if write:
mask |= select.POLLOUT
poll_obj = select.poll()
poll_obj.register(sock, mask)
# For some reason, poll() takes timeout in milliseconds
def do_poll(t):
if t is not None:
t *= 1000
return poll_obj.poll(t)
return bool(_retry_on_intr(do_poll, timeout))
def null_wait_for_socket(*args, **kwargs):
raise NoWayToWaitForSocketError("no select-equivalent available")
def _have_working_poll():
# Apparently some systems have a select.poll that fails as soon as you try
# to use it, either due to strange configuration or broken monkeypatching
# from libraries like eventlet/greenlet.
try:
poll_obj = select.poll()
_retry_on_intr(poll_obj.poll, 0)
except (AttributeError, OSError):
return False
else:
return True
def wait_for_socket(*args, **kwargs):
# We delay choosing which implementation to use until the first time we're
# called. We could do it at import time, but then we might make the wrong
# decision if someone goes wild with monkeypatching select.poll after
# we're imported.
global wait_for_socket
if _have_working_poll():
wait_for_socket = METHOD_NAME
elif hasattr(select, "select"):
wait_for_socket = select_wait_for_socket
else: # Platform-specific: Appengine.
wait_for_socket = null_wait_for_socket
return wait_for_socket(*args, **kwargs)
def wait_for_read(sock, timeout=None):
"""Waits for reading to be available on a given socket.
Returns True if the socket is readable, or False if the timeout expired.
"""
return wait_for_socket(sock, read=True, timeout=timeout)
def wait_for_write(sock, timeout=None):
"""Waits for writing to be available on a given socket.
Returns True if the socket is readable, or False if the timeout expired.
"""
return wait_for_socket(sock, write=True, timeout=timeout) |
298,762 | get view name | from functools import cached_property
import structlog
from django.utils import timezone
from django.utils.translation import gettext as _
from rest_framework.exceptions import ValidationError
from rest_framework.generics import GenericAPIView
from rest_framework.permissions import AllowAny
from rest_framework.throttling import AnonRateThrottle, UserRateThrottle
from readthedocs.api.v3.views import APIv3Settings
from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.search import tasks
from readthedocs.search.api.pagination import SearchPagination
from readthedocs.search.api.v3.executor import SearchExecutor
from readthedocs.search.api.v3.serializers import PageSearchSerializer
from readthedocs.search.api.v3.utils import should_use_advanced_query
log = structlog.get_logger(__name__)
RATE_LIMIT = "100/minute"
class SearchAnonRateThrottle(AnonRateThrottle):
"""Rate limit for the search API for anonymous users."""
rate = RATE_LIMIT
class SearchUserRateThrottle(UserRateThrottle):
"""Rate limit for the search API for authenticated users."""
rate = RATE_LIMIT
class SearchAPI(APIv3Settings, GenericAPIView):
"""
Server side search API V3.
Required query parameters:
- **q**: [Search term](https://docs.readthedocs.io/page/server-side-search/syntax.html).
Check our [docs](https://docs.readthedocs.io/page/server-side-search/api.html) for more information.
""" # noqa
http_method_names = ["get"]
pagination_class = SearchPagination
serializer_class = PageSearchSerializer
search_executor_class = SearchExecutor
permission_classes = [AllowAny]
# The search API would be used by anonymous users,
# and with our search-as-you-type extension.
# So we need to increase the rate limit.
throttle_classes = (SearchUserRateThrottle, SearchAnonRateThrottle)
@property
def description(self):
"""
Get the view description.
Force the description to always be the docstring of this class,
even if it's subclassed.
"""
return SearchAPI.__doc__
def METHOD_NAME(self):
return "Search API V3"
def _validate_query_params(self):
query = self.request.GET.get("q")
errors = {}
if not query:
errors["q"] = [_("This query parameter is required")]
if errors:
raise ValidationError(errors)
@cached_property
def _search_executor(self):
search_executor = self.search_executor_class(
request=self.request,
query=self.request.GET["q"],
)
return search_executor
def _get_search_query(self):
return self._search_executor.parser.query
def _get_projects_to_search(self):
return self._search_executor.projects
def get_queryset(self):
"""
Returns an Elasticsearch DSL search object or an iterator.
.. note::
Calling ``list(search)`` over an DSL search object is the same as
calling ``search.execute().hits``. This is why an DSL search object
is compatible with DRF's paginator.
"""
use_advanced_query = should_use_advanced_query(self._get_projects_to_search())
search = self._search_executor.search(
use_advanced_query=use_advanced_query,
aggregate_results=False,
)
if not search:
return []
return search
def get(self, request, *args, **kwargs):
self._validate_query_params()
result = self.list()
self._record_query(result)
return result
def _record_query(self, response):
total_results = response.data.get("count", 0)
time = timezone.now()
query = self._get_search_query().lower().strip()
# NOTE: I think this may be confusing,
# since the number of results is the total
# of searching on all projects, this specific project
# could have had 0 results.
for project, version in self._get_projects_to_search():
tasks.record_search_query.delay(
project.slug,
version.slug,
query,
total_results,
time.isoformat(),
)
def list(self):
queryset = self.get_queryset()
page = self.paginator.paginate_queryset(
queryset,
self.request,
view=self,
)
serializer = self.get_serializer(
page, many=True, projects=self._get_projects_to_search()
)
response = self.paginator.get_paginated_response(serializer.data)
self._add_extra_fields(response)
return response
def _add_extra_fields(self, response):
"""
Add additional fields to the top level response.
These are fields that aren't part of the serializers,
and are related to the whole list, rather than each element.
"""
# Add all projects that were used in the final search.
response.data["projects"] = [
{"slug": project.slug, "versions": [{"slug": version.slug}]}
for project, version in self._get_projects_to_search()
]
# Add the query used in the final search,
# this doesn't include arguments.
response.data["query"] = self._get_search_query()
class BaseProxiedSearchAPI(SearchAPI):
"""
Use a separate class for the proxied version of this view.
This is so we can override it in .com,
where we need to make use of our auth backends.
"""
class ProxiedSearchAPI(SettingsOverrideObject):
_default_class = BaseProxiedSearchAPI |
298,763 | close | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Nodalink, SARL.
#
# Simple nbd client used to connect to qemu-nbd
#
# author: Benoît Canet <benoit.canet@irqsave.net>
#
# This work is open source software, licensed under the terms of the
# BSD license as described in the LICENSE file in the top-level directory.
#
import socket
import struct
class nbd_client(object):
READ = 0
WRITE = 1
DISCONNECT = 2
FLUSH = 3
FLAG_HAS_FLAGS = (1 << 0)
FLAG_SEND_FLUSH = (1 << 2)
def __init__(self, hostname, port=10809):
self._flushed = True
self._closed = True
self._is_read = False
self._handle = 0
self._length = 0
self._s = socket.create_connection((hostname, port))
self._closed = False
self._handshake()
def __del__(self):
self.METHOD_NAME()
def METHOD_NAME(self):
if not self._flushed:
self.flush()
if not self._closed:
self._disconnect()
self._closed = True
def _handshake(self):
# Perform handshake as specified in
# https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
nbd_magic = self._s.recv(len("NBDMAGIC"))
assert(nbd_magic == b'NBDMAGIC')
buf = self._s.recv(8)
(magic,) = struct.unpack(">Q", buf)
if magic == 0x49484156454F5054:
self._new_style_handshake(magic)
else:
self._old_style_handshake(magic)
def _old_style_handshake(self, magic):
assert(magic == 0x00420281861253)
buf = self._s.recv(8 + 4)
(self._size, self._flags) = struct.unpack(">QL", buf)
# ignore trailing zeroes
self._s.recv(124)
def _new_style_handshake(self, magic):
assert(magic == 0x49484156454F5054) # Should have received IHAVEOPT
buf = self._s.recv(2)
(handshake_flags,) = struct.unpack(">H", buf)
client_flags = struct.pack('>L', 0)
self._s.send(client_flags)
options = struct.pack('>QLL', 0x49484156454F5054, 1, 0)
self._s.send(options)
buf = self._s.recv(8 + 2)
(self._size, transport_flags) = struct.unpack(">QH", buf)
self._flags = (handshake_flags << 16) + transport_flags
# ignore trailing zeroes
self._s.recv(124)
def _build_header(self, request_type, offset, length):
self._is_read = False
header = struct.pack('>LLQQL', 0x25609513,
request_type, self._handle, offset, length)
return header
def _parse_reply(self):
data = ""
reply = self._s.recv(4 + 4 + 8)
(magic, errno, handle) = struct.unpack(">LLQ", reply)
assert(magic == 0x67446698)
assert(handle == self._handle)
self._handle += 1
if self._is_read:
data = self._s.recv(self._length)
return (data, errno)
def _check_value(self, name, value):
if not value % 512:
return
raise ValueError("%s=%i is not a multiple of 512" % (name, value))
def write(self, data, offset):
self._check_value("offset", offset)
self._check_value("size", len(data))
self._flushed = False
self._is_read = False
header = self._build_header(self.WRITE, offset, len(data))
self._s.send(header + data)
(data, errno) = self._parse_reply()
assert(errno == 0)
return len(data)
def read(self, offset, length):
self._check_value("offset", offset)
self._check_value("length", length)
header = self._build_header(self.READ, offset, length)
self._is_read = True
self._length = length
self._s.send(header)
(data, errno) = self._parse_reply()
assert(errno == 0)
return data
def need_flush(self):
if self._flags & self.FLAG_HAS_FLAGS != 0 and \
self._flags & self.FLAG_SEND_FLUSH != 0:
return True
else:
return False
def flush(self):
self._is_read = False
if self.need_flush() == False:
self._flushed = True
return True
header = self._build_header(self.FLUSH, 0, 0)
self._s.send(header)
(data, errno) = self._parse_reply()
self._handle += 1
if not errno:
self._flushed = True
return errno == 0
def _disconnect(self):
self._is_read = False
header = self._build_header(self.DISCONNECT, 0, 0)
self._s.send(header)
def size(self):
return self._size |
298,764 | generate all | #!/usr/bin/python3 -i
#
# Copyright (c) 2021 LunarG, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os, re, sys
from base_generator import *
class VulkanObjectInfoTableBase2HeaderGeneratorOptions(BaseGeneratorOptions):
"""Options for generating C++ function declarations for Vulkan API parameter encoding"""
def __init__(
self,
blacklists=None, # Path to JSON file listing apicalls and structs to ignore.
platformTypes=None, # Path to JSON file listing platform (WIN32, X11, etc.) defined types.
filename=None,
directory='.',
prefixText='',
protectFile=False,
protectFeature=True,
extraVulkanHeaders=[]
):
BaseGeneratorOptions.__init__(
self,
blacklists,
platformTypes,
filename,
directory,
prefixText,
protectFile,
protectFeature,
extraVulkanHeaders=extraVulkanHeaders
)
# Generates declarations for functions for Vulkan object info table
class VulkanObjectInfoTableBase2HeaderGenerator(BaseGenerator):
def __init__(
self, err_file=sys.stderr, warn_file=sys.stderr, diag_file=sys.stdout
):
BaseGenerator.__init__(
self,
process_cmds=True,
process_structs=False,
feature_break=True,
err_file=err_file,
warn_file=warn_file,
diag_file=diag_file
)
# Method override
# yapf: disable
def beginFile(self, genOpts):
BaseGenerator.beginFile(self, genOpts)
self.write_include()
write('GFXRECON_BEGIN_NAMESPACE(gfxrecon)', file=self.outFile)
write('GFXRECON_BEGIN_NAMESPACE(decode)', file=self.outFile)
# yapf: enable
# Method override
# yapf: disable
def endFile(self):
self.METHOD_NAME()
write('GFXRECON_END_NAMESPACE(decode)', file=self.outFile)
write('GFXRECON_END_NAMESPACE(gfxrecon)', file=self.outFile)
# Finish processing in superclass
BaseGenerator.endFile(self)
# yapf: enable
# yapf: disable
def METHOD_NAME(self):
add_code = ''
remove_code = ''
const_get_code = ''
get_code = ''
visit_code = ''
map_code = ''
for handle_name in sorted(self.handle_names):
if handle_name in self.DUPLICATE_HANDLE_TYPES:
continue
handle_name = handle_name[2:]
handle_info = handle_name + 'Info'
handle_map = handle_name[0].lower() + handle_name[1:] + '_map_'
add_code += ' void Add{0}({0}&& info) {{ AddObjectInfo(std::move(info), &{1}); }}\n'.format(handle_info, handle_map)
remove_code += ' void Remove{0}(format::HandleId id) {{ {1}.erase(id); }}\n'.format(handle_info, handle_map)
const_get_code += ' const {0}* Get{0}(format::HandleId id) const {{ return GetObjectInfo<{0}>(id, &{1}); }}\n'.format(handle_info, handle_map)
get_code += ' {0}* Get{0}(format::HandleId id) {{ return GetObjectInfo<{0}>(id, &{1}); }}\n'.format(handle_info, handle_map)
visit_code += ' void Visit{0}(std::function<void(const {0}*)> visitor) const {{ for (const auto& entry : {1}) {{ visitor(&entry.second); }} }}\n'.format(handle_info, handle_map)
map_code += ' std::unordered_map<format::HandleId, {0}> {1};\n'.format(handle_info, handle_map)
self.newline()
code = 'class VulkanObjectInfoTableBase2 : VulkanObjectInfoTableBase\n'
code += '{\n'
code += ' public:\n'
code += ' VulkanObjectInfoTableBase2() {}\n'
code += ' ~VulkanObjectInfoTableBase2() {}\n'
code += '\n'
code += add_code
code += '\n'
code += remove_code
code += '\n'
code += const_get_code
code += '\n'
code += get_code
code += '\n'
code += visit_code
code += '\n'
code += ' protected:\n'
code += map_code
code += '};\n'
write(code, file=self.outFile)
# yapf: enable
def write_include(self):
write(
'#include "decode/vulkan_object_info_table_base.h"\n',
file=self.outFile
)
self.newline() |
298,765 | summary | #!/usr/bin/env python3
import argparse
from copy import deepcopy
from botocore.exceptions import ClientError
module_info = {
# Name of the module (should be the same as the filename)
'name': 'transfer_family__enum',
# Name and any other notes about the author
'author': 'Julio Melo from appminer.io and eufuihackeado.com.br',
# Category of the module. Make sure the name matches an existing category.
'category': 'ENUM',
# One liner description of the module functionality. This shows up when a user searches for modules.
'one_liner': 'Enumerates AWS Transfer Family SFTP/FTP and FTPS servers',
# Full description about what the module does and how it works
'description': 'This module enumerates all relevant servers from AWS Transfer Family of a given region',
# A list of AWS services that the module utilizes during its execution
'services': ['Transfer'],
# For prerequisite modules, try and see if any existing modules return the data that is required for your module before writing that code yourself, that way, session data can stay separated and modular.
'prerequisite_modules': [],
# Module arguments to autocomplete when the user hits tab
'arguments_to_autocomplete': [
'--regions'
],
}
parser = argparse.ArgumentParser(add_help=False, description=module_info['description'])
parser.add_argument('--regions', required=False, default=None, help='One or more (comma separated) AWS regions in the format us-east-1. Defaults to all session regions.')
def fetch_transfer_servers(client, func, key, print, **kwargs):
caller = getattr(client, func)
try:
response = caller(**kwargs)
data = response[key]
while 'NextToken' in response and response['NextToken'] != '':
print({**kwargs, **{'NextToken': response['NextToken']}})
response = caller({**kwargs, **{'NextToken': response['NextToken']}})
data.extend(response[key])
for resource in data:
resource['region'] = client.meta.region_name
return data
except ClientError as error:
code = error.response['Error']['Code']
if code == 'AccessDeniedException':
print(' {} FAILURE: MISSING NEEDED PERMISSIONS'.format(func))
else:
print(code)
return []
def fetch_transfer_server_users(client, func, key, print, **kwargs):
caller = getattr(client, func)
try:
response = caller(**kwargs)
data = response[key]
while 'NextToken' in response and response['NextToken'] != '':
print({**kwargs, **{'NextToken': response['NextToken']}})
response = caller({**kwargs, **{'NextToken': response['NextToken']}})
data.extend(response[key])
for resource in data:
resource['region'] = client.meta.region_name
return data
except ClientError as error:
code = error.response['Error']['Code']
if code == 'AccessDeniedException':
print(' {} FAILURE: MISSING NEEDED PERMISSIONS'.format(func))
else:
print(code)
return []
def main(args, pacu_main):
session = pacu_main.get_active_session()
###### Don't modify these. They can be removed if you are not using the function.
args = parser.parse_args(args)
print = pacu_main.print
get_regions = pacu_main.get_regions
######
if args.regions is None:
regions = get_regions('transfer')
if regions is None or regions == [] or regions == '' or regions == {}:
print('This module is not supported in any regions specified in the current sessions region set. Exiting...')
return
else:
regions = args.regions.split(',')
all_servers = []
for region in regions:
print('Starting region {}...'.format(region))
client = pacu_main.get_boto3_client('transfer', region)
# Servers instances
servers = fetch_transfer_servers(client, 'list_servers', 'Servers', print)
print(' {} server(s) found.'.format(len(servers)))
for server in servers:
server_id = server['ServerId']
server_details = client.describe_server(ServerId=server_id)
server['Details'] = server_details
server['Users'] = fetch_transfer_server_users(client, 'list_users', 'Users', print, ServerId=server_id)
for user in server['Users']:
user['Details'] = client.describe_user(ServerId=server_id, UserName=user['UserName'])
server['Endpoint'] = f"{server_id}.server.transfer.{server['region']}.amazonaws.com"
all_servers += servers
summary_data = {
'servers': len(all_servers),
}
for var in vars(args):
if var == 'regions':
continue
if not getattr(args, var):
del summary_data[var]
transfer_data = deepcopy(session.Transfer)
transfer_data['Servers'] = all_servers
session.update(pacu_main.database, Transfer=transfer_data)
return summary_data
def METHOD_NAME(data, pacu_main):
out = ''
for key in data:
out += ' {} total {}(s) found.\n'.format(data[key], key[:-1])
out += '\n AWS Transfer Family resources saved in Pacu database. You can run `data Transfer` to view this info.\n'
return out |
298,766 | layout | import os
from conan import ConanFile
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import get, copy, rm, export_conandata_patches, apply_conandata_patches
from conan.tools.microsoft import is_msvc
required_conan_version = ">=1.53.0"
class METISConan(ConanFile):
name = "metis"
description = (
"Set of serial programs for partitioning graphs, "
"partitioning finite element meshes, and producing "
"fill reducing orderings for sparse matrices"
)
license = "Apache-2.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/KarypisLab/METIS"
topics = ("karypislab", "graph", "partitioning-algorithms")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_64bit_types": [True, False],
"enable_gkrand": [True, False],
"enable_gkregex": [True, False],
"with_openmp": [True, False],
"with_pcre": [True, False],
"with_valgrind": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_64bit_types": True,
"enable_gkrand": False,
"enable_gkregex": False,
"with_openmp": False,
"with_pcre": False,
"with_valgrind": False,
}
def export_sources(self):
export_conandata_patches(self)
copy(self, "CMakeLists.txt", self.recipe_folder, self.export_sources_folder)
copy(
self,
"gkbuild.cmake",
self.recipe_folder,
os.path.join(self.export_sources_folder, "src"),
)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
del self.options.enable_gkregex
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.libcxx")
self.settings.rm_safe("compiler.cppstd")
def METHOD_NAME(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("gklib/5.1.1")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
rm(self, "*.pdf", self.source_folder, recursive=True)
copy(self, "CMakeLists.txt", self.export_sources_folder, self.source_folder)
def generate(self):
tc = CMakeToolchain(self)
tc.cache_variables["VALGRIND"] = self.options.with_valgrind
tc.cache_variables["OPENMP"] = self.options.with_openmp
tc.cache_variables["PCRE"] = self.options.with_pcre
tc.cache_variables["GKREGEX"] = self.settings.os == "Windows" or self.options.enable_gkregex
tc.cache_variables["GKRAND"] = self.options.enable_gkrand
if self.settings.build_type == "Debug":
tc.preprocessor_definitions["DEBUG"] = ""
else:
# NDEBUG is defined by default by CMake
# tc.preprocessor_definitions["NDEBUG"] = ""
tc.preprocessor_definitions["NDEBUG2"] = ""
bits = 64 if self.options.with_64bit_types else 32
tc.preprocessor_definitions["IDXTYPEWIDTH"] = str(bits)
tc.preprocessor_definitions["REALTYPEWIDTH"] = str(bits)
tc.generate()
tc = CMakeDeps(self)
tc.generate()
def _patch_sources(self):
apply_conandata_patches(self)
def build(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(
self,
pattern="LICENSE",
dst=os.path.join(self.package_folder, "licenses"),
src=self.source_folder,
)
cmake = CMake(self)
cmake.install()
rm(self, "*.cmake", self.package_folder, recursive=True)
rm(self, "*.pc", self.package_folder, recursive=True)
rm(self, "*.pdb", self.package_folder, recursive=True)
def package_info(self):
self.cpp_info.libs = ["metis"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.append("m")
self.cpp_info.defines.append("LINUX")
elif self.settings.os == "Windows":
self.cpp_info.defines.append("WIN32")
self.cpp_info.defines.append("MSC")
self.cpp_info.defines.append("_CRT_SECURE_NO_DEPRECATE")
elif self.settings.os == "Macos":
self.cpp_info.defines.append("MACOS")
elif self.settings.os == "SunOS":
self.cpp_info.defines.append("SUNOS")
if is_msvc(self):
self.cpp_info.defines.append("__thread=__declspec(thread)")
bits = 64 if self.options.with_64bit_types else 32
self.cpp_info.defines.append(f"IDXTYPEWIDTH={bits}")
self.cpp_info.defines.append(f"REALTYPEWIDTH={bits}")
# Defines for GKLib headers
if self.settings.os == "Windows" or self.options.enable_gkregex:
self.cpp_info.defines.append("USE_GKREGEX")
if self.options.enable_gkrand:
self.cpp_info.defines.append("USE_GKRAND")
if self.options.with_pcre:
self.cpp_info.defines.append("__WITHPCRE__")
if self.options.with_openmp:
self.cpp_info.defines.append("__OPENMP__") |
298,767 | get test begin end | import csv
import matplotlib.pyplot as plt
import sys
# == BEGIN OF CONFIGURATION ==
# FOR DISPLAY - Enter settings relative to test
system = "Test ping - timeout=30 - Diset Server"
multimech_thread = 60
multimech_time = 300
multimech_rampup = 200
multimech_clients = 8
server_maxThreads = 20
# For this line, use vmstat and copy the free memory
memoryOffset = 0
plt.suptitle(
"%s with %d threads\n %d threads/client - %d clients (total %d threads) \n \
duration: %dsec - rampup %dsec \n latency between client starts: 2s"
% (
system,
server_maxThreads,
multimech_thread,
multimech_clients,
multimech_clients * multimech_thread,
multimech_time,
multimech_rampup,
)
)
# END OF CONFIG
def get_results():
if len(sys.argv) < 3:
print("Usage: python plot-distributedTest NAME NUMBEROFFILE")
print("Example: python plot-distributedTest 1532506328.38 2")
sys.exit(1)
file = sys.argv[1]
count = int(sys.argv[2])
results = []
for i in range(1, count + 1):
fileName = f"{file}.{i}.txt"
with open(fileName) as content:
print(f"reading {fileName}")
lines = content.read().split("\n")[1:-1]
result = [line.split(",") for line in lines]
results.append(result)
content.close()
return results
def get_server_stats():
print("Please specify location to file with server stats:")
serverStatFile = "/tmp/results.txt" # raw_input()
print(f"Loading {serverStatFile}")
serverStats = dict()
with open(serverStatFile) as content_file:
lines = content_file.read().split("\n")[1:]
for line in lines:
line = line.split(";")
serverStats[line[0]] = line[1:]
return serverStats
def METHOD_NAME(results):
"""
First result file contain the test started in first
Last result file contain the test started in last
Every test have same duration
So we read first and last line to have begin hour and end hour
(the '2' is because time is registered in the third row)
"""
return (int(results[0][0][2]), int(results[-1][-1][2]))
def process_data(results, serverStats):
# Begin and end are timestamps
(begin, end) = METHOD_NAME(results)
# Initializing all data list
(time, requestTime, CPU, RAM, reqPerSec, errorRate, loadAvg) = ([], [], [], [], [], [], [])
global memoryOffset
initialRAM = memoryOffset
for t in range(begin, end): # We determine datas time with timestamp
# Offset to set starttime = 0
time.append(t - begin)
# Getting requesttime (mean), number of request/error at a givent time
(reqTime, reqCount, errorCount) = getRequestTimeAndCount(results, t)
requestTime.append(reqTime)
reqPerSec.append(reqCount)
errorRate.append(errorCount)
# Getting infos from Server, sometimes no info are getted during more than one second
try:
# Get CPU usage
CPU.append(100 - int(serverStats[str(t)][14]))
# Get Memory used (delta with memory at the beginning)
usedRam = int(serverStats[str(t)][5]) - initialRAM
RAM.append(usedRam)
# Get the load
loadAvg.append(100 * float(serverStats[str(t)][17])) # 18
except KeyError:
# If fail in getting value, take previous values
CPU.append(CPU[-1])
RAM.append(RAM[-1])
loadAvg.append(loadAvg[-1])
print(f"ERROR - Some values missing for CPU and Memory usage [try to load for time={t}]")
return (time, requestTime, CPU, RAM, reqPerSec, errorRate, loadAvg)
def getRequestTimeAndCount(data, time):
reqCount = 0
errorCount = 0
totalRequest = 0
for result in results:
i = 0
try:
# Ignore past
while int(result[i][2]) < time:
i += 1
# Get infos for present
while int(result[i][2]) == time:
reqCount += 1
totalRequest += result[i][4]
if result[i][5] != "":
errorCount += 1
i += 1
except IndexError:
pass
return (int(totalRequest / reqCount) if reqCount > 0 else 0, reqCount, errorCount)
def displayGraph(results, serverStats):
"""
Display all the graph on the same figure
"""
print("Processing data and plot, it may take some time for huge tests")
(time, requestTime, CPU, RAM, reqPerSec, errorCount, loadAvg) = process_data(results, serverStats)
plt.subplot(221)
plt.plot(time, requestTime, "-", label="Request time (s)")
plt.legend()
plt.subplot(222)
plt.plot(time, CPU, "*", label="CPU usage (%)")
plt.plot(time, loadAvg, "*", label="Load average * 100")
plt.legend()
plt.subplot(223)
plt.plot(time, RAM, "*", label="Used Memory (bytes)")
plt.legend()
plt.subplot(224)
plt.plot(time, reqPerSec, "*", label="Requests/sec")
plt.plot(time, errorCount, "*", label="Errors/sec")
plt.legend()
results = get_results()
# process_data(results)
serverStats = get_server_stats()
displayGraph(results, serverStats)
plt.show() |
298,768 | unregister model | import glob
import importlib
import json
import os
import subprocess
import sys
import tempfile
import threading
from os import path
from pathlib import Path
from subprocess import PIPE, STDOUT, Popen
import requests
# To help discover margen modules
REPO_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../")
sys.path.append(REPO_ROOT)
from ts_scripts import marsgen as mg
ROOT_DIR = os.path.join(tempfile.gettempdir(), "workspace")
MODEL_STORE = path.join(ROOT_DIR, "model_store/")
CODEBUILD_WD = path.abspath(path.join(__file__, "../../.."))
class PrintPipeTillTheEnd(threading.Thread):
def __init__(self, pipe):
super().__init__()
self.pipe = pipe
def run(self):
for line in self.pipe.stdout:
print(line.decode("utf-8").strip())
def start_torchserve(
model_store=None, snapshot_file=None, no_config_snapshots=False, gen_mar=True
):
stop_torchserve()
crate_mar_file_table()
cmd = ["torchserve", "--start"]
model_store = model_store if model_store else MODEL_STORE
if gen_mar:
mg.gen_mar(model_store)
cmd.extend(["--model-store", model_store])
if snapshot_file:
cmd.extend(["--ts-config", snapshot_file])
if no_config_snapshots:
cmd.extend(["--no-config-snapshots"])
print(cmd)
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
for line in p.stdout:
print(line.decode("utf8").strip())
if "Model server started" in str(line).strip():
break
print_thread = PrintPipeTillTheEnd(p)
print_thread.start()
def stop_torchserve():
subprocess.run(["torchserve", "--stop", "--foreground"])
def delete_all_snapshots():
for f in glob.glob("logs/config/*"):
os.remove(f)
assert len(glob.glob("logs/config/*")) == 0
def delete_model_store(model_store=None):
"""Removes all model mar files from model store"""
model_store = model_store if model_store else MODEL_STORE
for f in glob.glob(model_store + "/*.mar"):
os.remove(f)
def torchserve_cleanup():
stop_torchserve()
delete_model_store()
delete_all_snapshots()
def register_model(model_name, url):
params = (
("model_name", model_name),
("url", url),
("initial_workers", "1"),
("synchronous", "true"),
)
return register_model_with_params(params)
def register_model_with_params(params):
response = requests.post("http://localhost:8081/models", params=params)
return response
def METHOD_NAME(model_name):
response = requests.delete("http://localhost:8081/models/{}".format(model_name))
return response
def delete_mar_file_from_model_store(model_store=None, model_mar=None):
model_store = (
model_store
if (model_store is not None)
else os.path.join(ROOT_DIR, "model_store")
)
if model_mar is not None:
for f in glob.glob(path.join(model_store, model_mar + "*")):
os.remove(f)
environment_json = "../postman/environment.json"
mar_file_table = {}
def crate_mar_file_table():
if not mar_file_table:
with open(
os.path.join(os.path.dirname(__file__), *environment_json.split("/")), "rb"
) as f:
env = json.loads(f.read())
for item in env["values"]:
if item["key"].startswith("mar_path_"):
mar_file_table[item["key"]] = item["value"]
def model_archiver_command_builder(
model_name=None,
version=None,
model_file=None,
serialized_file=None,
handler=None,
extra_files=None,
force=False,
config_file=None,
):
# Initialize a list to store the command-line arguments
cmd_parts = ["torch-model-archiver"]
# Append arguments to the list
if model_name:
cmd_parts.append(f"--model-name {model_name}")
if version:
cmd_parts.append(f"--version {version}")
if model_file:
cmd_parts.append(f"--model-file {model_file}")
if serialized_file:
cmd_parts.append(f"--serialized-file {serialized_file}")
if handler:
cmd_parts.append(f"--handler {handler}")
if extra_files:
cmd_parts.append(f"--extra-files {extra_files}")
if config_file:
cmd_parts.append(f"--config-file {config_file}")
if force:
cmd_parts.append("--force")
# Append the export-path argument to the list
cmd_parts.append(f"--export-path {MODEL_STORE}")
# Convert the list into a string to represent the complete command
cmd = " ".join(cmd_parts)
return cmd
def load_module_from_py_file(py_file: str) -> object:
"""
This method loads a module from a py file which is not in the Python path
"""
module_name = Path(py_file).name
loader = importlib.machinery.SourceFileLoader(module_name, py_file)
spec = importlib.util.spec_from_loader(module_name, loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
return module
def cleanup_model_store(model_store=None):
# rm -rf $MODEL_STORE_DIR / *
for f in glob.glob(os.path.join(model_store, "*")):
os.remove(f) |
298,769 | test basic | import numpy as np
import pytest
import pytensor
from pytensor.gradient import GradientError
from pytensor.tensor.basic import cast
from pytensor.tensor.math import complex as at_complex
from pytensor.tensor.math import complex_from_polar, imag, real
from pytensor.tensor.type import cvector, dvector, fmatrix, fvector, imatrix, zvector
from tests import unittest_tools as utt
class TestRealImag:
def METHOD_NAME(self):
x = zvector()
rng = np.random.default_rng(23)
xval = np.asarray(
[complex(rng.standard_normal(), rng.standard_normal()) for i in range(10)]
)
assert np.all(xval.real == pytensor.function([x], real(x))(xval))
assert np.all(xval.imag == pytensor.function([x], imag(x))(xval))
def test_on_real_input(self):
x = dvector()
rng = np.random.default_rng(23)
xval = rng.standard_normal(10)
np.all(0 == pytensor.function([x], imag(x))(xval))
np.all(xval == pytensor.function([x], real(x))(xval))
x = imatrix()
xval = np.asarray(rng.standard_normal((3, 3)) * 100, dtype="int32")
np.all(0 == pytensor.function([x], imag(x))(xval))
np.all(xval == pytensor.function([x], real(x))(xval))
def test_cast(self):
x = zvector()
with pytest.raises(TypeError):
cast(x, "int32")
def test_complex(self):
rng = np.random.default_rng(2333)
m = fmatrix()
c = at_complex(m[0], m[1])
assert c.type == cvector
r, i = [real(c), imag(c)]
assert r.type == fvector
assert i.type == fvector
f = pytensor.function([m], [r, i])
mval = np.asarray(rng.standard_normal((2, 5)), dtype="float32")
rval, ival = f(mval)
assert np.all(rval == mval[0]), (rval, mval[0])
assert np.all(ival == mval[1]), (ival, mval[1])
@pytest.mark.skip(reason="Complex grads not enabled, see #178")
def test_complex_grads(self):
def f(m):
c = at_complex(m[0], m[1])
return 0.5 * real(c) + 0.9 * imag(c)
rng = np.random.default_rng(9333)
mval = np.asarray(rng.standard_normal((2, 5)))
utt.verify_grad(f, [mval])
@pytest.mark.skip(reason="Complex grads not enabled, see #178")
def test_mul_mixed0(self):
def f(a):
ac = at_complex(a[0], a[1])
return abs((ac) ** 2).sum()
rng = np.random.default_rng(9333)
aval = np.asarray(rng.standard_normal((2, 5)))
try:
utt.verify_grad(f, [aval])
except GradientError as e:
print(e.num_grad.gf)
print(e.analytic_grad)
raise
@pytest.mark.skip(reason="Complex grads not enabled, see #178")
def test_mul_mixed1(self):
def f(a):
ac = at_complex(a[0], a[1])
return abs(ac).sum()
rng = np.random.default_rng(9333)
aval = np.asarray(rng.standard_normal((2, 5)))
try:
utt.verify_grad(f, [aval])
except GradientError as e:
print(e.num_grad.gf)
print(e.analytic_grad)
raise
@pytest.mark.skip(reason="Complex grads not enabled, see #178")
def test_mul_mixed(self):
def f(a, b):
ac = at_complex(a[0], a[1])
return abs((ac * b) ** 2).sum()
rng = np.random.default_rng(9333)
aval = np.asarray(rng.standard_normal((2, 5)))
bval = rng.standard_normal(5)
try:
utt.verify_grad(f, [aval, bval])
except GradientError as e:
print(e.num_grad.gf)
print(e.analytic_grad)
raise
@pytest.mark.skip(reason="Complex grads not enabled, see #178")
def test_polar_grads(self):
def f(m):
c = complex_from_polar(abs(m[0]), m[1])
return 0.5 * real(c) + 0.9 * imag(c)
rng = np.random.default_rng(9333)
mval = np.asarray(rng.standard_normal((2, 5)))
utt.verify_grad(f, [mval])
@pytest.mark.skip(reason="Complex grads not enabled, see #178")
def test_abs_grad(self):
def f(m):
c = at_complex(m[0], m[1])
return 0.5 * abs(c)
rng = np.random.default_rng(9333)
mval = np.asarray(rng.standard_normal((2, 5)))
utt.verify_grad(f, [mval]) |
298,770 | pboff | from smarthome.smartbase0 import Sbase0
import urllib.request
import json
import logging
log = logging.getLogger(__name__)
class Spbase(Sbase0):
def __init__(self):
#
# setting
super().__init__()
self._device_pbip = 'none'
self.device_nummer = 0
def updatepar(self, input_param):
self._smart_param = input_param.copy()
self.device_nummer = int(self._smart_param.get('device_nummer', '0'))
for key, value in self._smart_param.items():
if (key == 'device_pbip'):
self._device_pbip = value
else:
log.info("(" + str(self.device_nummer) + ") "
+ __class__.__name__ + " überlesen " + key +
" " + value)
def showstat(self, manual, relais):
pass
class Sbshelly(Spbase):
def __init__(self):
# setting
super().__init__()
self.counter = 0
self.led = 9
self.event_cnt = 0
self.event = 'none'
self.oldevent_cnt = 0
self.oldevent = 'none'
def showstat(self, manual, relais):
if (manual == 0): # automatic mode
self.METHOD_NAME()
else: # manual mode
if (relais == 0):
self.pbon()
else:
self.pbblink()
def checkbut(self, manual, relais, manual_control):
newmanual = manual
newmanual_control = manual_control
try:
at = str(urllib.request.urlopen("http://" +
str(self._device_pbip)
+ "/status",
timeout=3).read().decode("utf-8"))
except Exception as e1:
log.warning("Shelly button ch (%d) %s Fehlermeldung: %s "
% (self.device_nummer, self._device_pbip, str(e1)))
return newmanual, newmanual_control
a = json.loads(at)
with open(self._basePath+'/ramdisk/smarthome_device_ret' +
str(self.device_nummer) + '_shelly_bp', 'w') as f:
f.write(str(a))
self.oldevent_cnt = self.event_cnt
self.oldevent = self.event
self.event_cnt = int(a['inputs'][0]['event_cnt'])
self.event = str(a['inputs'][0]['event'])
if (self.oldevent == 'none'):
return newmanual, newmanual_control
if ((self.event == self.oldevent) and
(self.event_cnt == self.oldevent_cnt)):
return newmanual, newmanual_control
log.info("Shelly button pressed (%d) %s %s"
% (self.device_nummer, self._device_pbip, self.event))
# im automatic modus -> ein mal Drücken wechselen auf manual
if (manual == 0):
newmanual = 1
return newmanual, newmanual_control
# im manual modus -> ein mal Drücken wechselen zwischen on und off
if (self.event == 'S'):
if (manual_control == 1):
newmanual_control = 0
else:
newmanual_control = 1
return newmanual, newmanual_control
# im manual modus -> mehrmals drücken wechselen auf automat
newmanual = 0
return newmanual, newmanual_control
def METHOD_NAME(self):
if (self.led == 0):
return
try:
urllib.request.urlopen("http://" + str(self._device_pbip) +
"/settings?led_status_disable=true",
timeout=3)
log.info("Shelly button led off (%d) %s"
% (self.device_nummer, self._device_pbip))
except Exception as e1:
log.warning("Shelly button off (%d) %s Fehlermeldung: %s "
% (self.device_nummer, self._device_pbip, str(e1)))
self.led = 0
def pbon(self):
if (self.led == 1):
return
try:
urllib.request.urlopen("http://" + str(self._device_pbip) +
"/settings?led_status_disable=false",
timeout=3)
log.info("Shelly button led on (%d) %s"
% (self.device_nummer, self._device_pbip))
except Exception as e1:
log.warning("Shelly button on (%d) %s Fehlermeldung: %s "
% (self.device_nummer, self._device_pbip, str(e1)))
self.led = 1
def pbblink(self):
self.counter = self.counter + 1
if (self.counter < 1):
return
self.counter = 0
if (self.led == 0):
self.pbon()
else:
self.METHOD_NAME() |
298,771 | on signal data edited | from PyQt5.QtCore import Qt, pyqtSlot
from PyQt5.QtGui import QBrush, QColor, QIcon, QPen
from PyQt5.QtWidgets import QMessageBox
from urh import settings
from urh.controller.dialogs.SendRecvDialog import SendRecvDialog
from urh.dev.VirtualDevice import VirtualDevice, Mode
from urh.signalprocessing.IQArray import IQArray
from urh.signalprocessing.Signal import Signal
from urh.ui.painting.SignalSceneManager import SignalSceneManager
from urh.util import FileOperator
from urh.util.Logger import logger
class SendDialog(SendRecvDialog):
def __init__(self, project_manager, modulated_data, modulation_msg_indices=None, continuous_send_mode=False,
parent=None, testing_mode=False):
super().__init__(project_manager, is_tx=True, continuous_send_mode=continuous_send_mode,
parent=parent, testing_mode=testing_mode)
self.graphics_view = self.ui.graphicsViewSend
self.ui.stackedWidget.setCurrentWidget(self.ui.page_send)
self.hide_receive_ui_items()
self.ui.btnStart.setIcon(QIcon.fromTheme("media-playback-start"))
self.setWindowTitle("Send Signal")
self.setWindowIcon(QIcon.fromTheme("media-playback-start"))
self.ui.btnStart.setToolTip("Send data")
self.ui.btnStop.setToolTip("Stop sending")
self.device_is_sending = False
self.modulation_msg_indices = modulation_msg_indices
if self.modulation_msg_indices is not None:
self.ui.progressBarMessage.setMaximum(len(self.modulation_msg_indices))
else:
self.ui.progressBarMessage.hide()
self.ui.labelCurrentMessage.hide()
if modulated_data is not None:
assert isinstance(modulated_data, IQArray)
# modulated_data is none in continuous send mode
self.ui.progressBarSample.setMaximum(len(modulated_data))
samp_rate = self.device_settings_widget.ui.spinBoxSampleRate.value()
signal = Signal("", "Modulated Preview", sample_rate=samp_rate)
signal.iq_array = modulated_data
self.scene_manager = SignalSceneManager(signal, parent=self)
self.send_indicator = self.scene_manager.scene.addRect(0, -2, 0, 4,
QPen(QColor(Qt.transparent), 0),
QBrush(settings.SEND_INDICATOR_COLOR))
self.send_indicator.stackBefore(self.scene_manager.scene.selection_area)
self.scene_manager.init_scene()
self.graphics_view.set_signal(signal)
self.graphics_view.sample_rate = samp_rate
self.create_connects()
self.device_settings_widget.update_for_new_device(overwrite_settings=False)
def create_connects(self):
super().create_connects()
self.graphics_view.save_as_clicked.connect(self.on_graphics_view_save_as_clicked)
self.scene_manager.signal.data_edited.connect(self.METHOD_NAME)
def _update_send_indicator(self, width: int):
y, h = self.ui.graphicsViewSend.view_rect().y(), self.ui.graphicsViewSend.view_rect().height()
self.send_indicator.setRect(0, y - h, width, 2 * h + abs(y))
def set_current_message_progress_bar_value(self, current_sample: int):
if self.modulation_msg_indices is not None:
msg_index = next((i for i, sample in enumerate(self.modulation_msg_indices) if sample >= current_sample),
len(self.modulation_msg_indices))
self.ui.progressBarMessage.setValue(msg_index + 1)
def update_view(self):
if super().update_view():
self._update_send_indicator(self.device.current_index)
self.ui.progressBarSample.setValue(self.device.current_index)
self.set_current_message_progress_bar_value(self.device.current_index)
if not self.device.sending_finished:
self.ui.lblCurrentRepeatValue.setText(str(self.device.current_iteration + 1))
else:
self.ui.btnStop.click()
self.ui.lblCurrentRepeatValue.setText("Sending finished")
def init_device(self):
device_name = self.selected_device_name
num_repeats = self.device_settings_widget.ui.spinBoxNRepeat.value()
sts = self.scene_manager.signal.iq_array
self.device = VirtualDevice(self.backend_handler, device_name, Mode.send, samples_to_send=sts,
device_ip="192.168.10.2", sending_repeats=num_repeats, parent=self)
self._create_device_connects()
@pyqtSlot()
def on_graphics_view_save_as_clicked(self):
filename = FileOperator.ask_save_file_name("signal.complex")
if filename:
try:
try:
self.scene_manager.signal.sample_rate = self.device.sample_rate
except Exception as e:
logger.exception(e)
self.scene_manager.signal.save_as(filename)
except Exception as e:
QMessageBox.critical(self, self.tr("Error saving signal"), e.args[0])
@pyqtSlot()
def METHOD_NAME(self):
signal = self.scene_manager.signal
self.ui.progressBarSample.setMaximum(signal.num_samples)
self.device.samples_to_send = signal.iq_array.data
self.scene_manager.init_scene()
self.ui.graphicsViewSend.redraw_view()
@pyqtSlot()
def on_start_clicked(self):
super().on_start_clicked()
if self.ui.progressBarSample.value() >= self.ui.progressBarSample.maximum() - 1:
self.on_clear_clicked()
if self.device_is_sending:
self.device.stop("Sending paused by user")
else:
self.device.start()
@pyqtSlot()
def on_stop_clicked(self):
super().on_stop_clicked()
self.on_clear_clicked()
@pyqtSlot()
def on_device_stopped(self):
super().on_device_stopped()
self.ui.btnStart.setIcon(QIcon.fromTheme("media-playback-start"))
self.ui.btnStart.setText("Start")
self.ui.btnStart.setToolTip("Start sending")
self.device_is_sending = False
@pyqtSlot()
def on_device_started(self):
super().on_device_started()
self.device_is_sending = True
self.ui.btnStart.setEnabled(True)
self.ui.btnStart.setIcon(QIcon.fromTheme("media-playback-pause"))
self.ui.btnStart.setText("Pause")
self.set_device_ui_items_enabled(False)
@pyqtSlot()
def on_clear_clicked(self):
self._update_send_indicator(0)
self.reset() |
298,772 | run | # (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleAction, AnsibleActionFail
from ansible.executor.module_common import get_action_args_with_defaults
from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
TRANSFERS_FILES = False
BUILTIN_PKG_MGR_MODULES = {manager['name'] for manager in PKG_MGRS}
def METHOD_NAME(self, tmp=None, task_vars=None):
''' handler for package operations '''
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).METHOD_NAME(tmp, task_vars)
del tmp # tmp no longer has any effect
module = self._task.args.get('use', 'auto')
if module == 'auto':
try:
if self._task.delegate_to: # if we delegate, we should use delegated host's facts
module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to)
else:
module = self._templar.template('{{ansible_facts.pkg_mgr}}')
except Exception:
pass # could not get it from template!
try:
if module == 'auto':
facts = self._execute_module(
module_name='ansible.legacy.setup',
module_args=dict(filter='ansible_pkg_mgr', gather_subset='!all'),
task_vars=task_vars)
display.debug("Facts %s" % facts)
module = facts.get('ansible_facts', {}).get('ansible_pkg_mgr', 'auto')
if module != 'auto':
if not self._shared_loader_obj.module_loader.has_plugin(module):
raise AnsibleActionFail('Could not find a module for %s.' % module)
else:
# run the 'package' module
new_module_args = self._task.args.copy()
if 'use' in new_module_args:
del new_module_args['use']
# get defaults for specific module
context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections)
new_module_args = get_action_args_with_defaults(
context.resolved_fqcn, new_module_args, self._task.module_defaults, self._templar,
action_groups=self._task._parent._play._action_groups
)
if module in self.BUILTIN_PKG_MGR_MODULES:
# prefix with ansible.legacy to eliminate external collisions while still allowing library/ override
module = 'ansible.legacy.' + module
display.vvvv("Running %s" % module)
result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
else:
raise AnsibleActionFail('Could not detect which package manager to use. Try gathering facts or setting the "use" option.')
except AnsibleAction as e:
result.update(e.result)
finally:
if not self._task.async_val:
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
return result |
298,773 | test axis | #!/usr/bin/env python
# coding: utf-8
"""Test related to the definition and use of geometric objects (planes, axes, ...)."""
import numpy as np
from capytaine.meshes.geometry import (
e_x, e_y, e_z,
Axis, Ox_axis, Oy_axis, Oz_axis,
Plane, xOz_Plane, yOz_Plane, xOy_Plane,
orthogonal_vectors, parallel_vectors,
parallel_vectors_with_same_direction
)
def test_helper_functions():
assert orthogonal_vectors(e_x, e_y)
assert not orthogonal_vectors(e_x, 2*e_x)
assert parallel_vectors(e_x, -e_x)
assert not parallel_vectors(e_x, e_y)
assert parallel_vectors_with_same_direction(e_y, 2*e_y)
assert not parallel_vectors_with_same_direction(e_y, -e_y)
def METHOD_NAME():
assert np.allclose(Axis(vector=(1, 1, 1)).vector, np.sqrt(3)/3 * np.ones((3,)))
assert Axis(vector=(1, 1, 1), point=(0, 0, 0)) == Axis(vector=(1, 1, 1), point=(2, 2, 2))
assert Axis(vector=(0, 0, 1), point=(0, 0, 0)) == Axis(vector=(2e-16, 3e-16, 1), point=(0, 0, 1.5))
assert Axis(vector=(1, 1, 1), point=(0, 0, 0)) != Axis(vector=(1, 1, 1), point=(2, 2, 0))
assert (2, 0, 0) in Ox_axis
assert (0, 1, 0) not in Ox_axis
assert Ox_axis.is_orthogonal_to(yOz_Plane)
assert not Ox_axis.is_orthogonal_to((1, 1, 1))
assert Ox_axis.angle_with_respect_to(Oy_axis) == np.pi/2
assert Oy_axis.angle_with_respect_to(Ox_axis) == np.pi/2
def test_axis_transformation():
assert Ox_axis.translated_x(10) == Ox_axis
assert Ox_axis.translated_y(10) == Axis(vector=(1, 0, 0), point=(0, 10, 0))
assert Ox_axis.rotated(Ox_axis, angle=np.pi/2) == Ox_axis
assert Ox_axis.rotated(Oy_axis, angle=-np.pi/2) == Oz_axis
assert Ox_axis.mirrored(plane=yOz_Plane) == Ox_axis
assert Ox_axis.mirrored(plane=xOz_Plane.translated_y(2)) == Axis(vector=(1, 0, 0), point=(0, 4, 0))
axis1 = Axis(vector=(1, 1, 1), point=(0, 0, 0))
axis2 = Axis(vector=(1, 2, 3), point=(0, 0, 0))
assert axis1.rotated_around_center_to_align_vectors(axis1.point, axis1.vector, axis2.vector) == axis2
axis1.rotate(axis2, np.pi)
assert axis1.rotated_around_center_to_align_vectors(axis1.point, axis1.vector, axis2.vector) == axis2
axis1.vector *= -1
assert axis1.rotated_around_center_to_align_vectors(axis1.point, axis1.vector, axis2.vector) == axis2
axis1 = Axis(vector=(1, 1, 1), point=(1, 2, 0))
axis2 = Axis(vector=(2, 2, 2), point=(0, 0, 0))
assert axis1.translated_point_to_point(axis1.point, axis2.point) == axis2
def test_rotation_non_reference_axis():
p = np.random.rand(3)
axis = Axis(vector=(0, 0, 1), point=p)
rotated_point = axis.rotate_points(p, np.pi)
assert np.allclose(rotated_point, p)
def test_invariance_of_rotation_center():
p = np.random.rand(3)
axis1 = Axis(vector=(0, 0, 1), point=p)
axis2 = Axis(vector=(0, 1, 0), point=p)
assert np.allclose(axis1.rotated(axis2, np.pi/3).point, p)
def test_rotation_around_center_to_align_vectors_commutes_with_translation():
n = np.random.rand(3)
n /= np.linalg.norm(n)
axis = Axis(vector=n, point=(0, 0, 0))
a = axis.rotated_around_center_to_align_vectors((0, 0, 0), n, (0, 1, 0)).translated((1, 1, 1))
b = axis.translated((1, 1, 1)).rotated_around_center_to_align_vectors((1, 1, 1), n, (0, 1, 0))
assert np.allclose(a.vector, b.vector, (0, 1, 0))
assert np.allclose(a.point, b.point)
def test_plane():
assert (0, 1, 1) in yOz_Plane
assert Oy_axis in yOz_Plane
assert np.allclose(Plane(normal=(1, 1, 0)).normal, (np.sqrt(2)/2, np.sqrt(2)/2, 0))
assert yOz_Plane == Plane(point=(0, 1, 1), normal=(2, 0, 0))
assert xOy_Plane.is_orthogonal_to(Oz_axis)
points_in_xplus = np.random.rand(10, 3) + np.array([1.0, -0.5, -0.5])
assert np.all(yOz_Plane.distance_to_point(points_in_xplus) > 0)
assert np.all(yOz_Plane.translated_x(-5.0).distance_to_point(points_in_xplus) > 0)
assert not np.any(yOz_Plane.translated_x(5.0).distance_to_point(points_in_xplus) > 0)
points_in_xminus = np.random.rand(10, 3) + np.array([-2.0, -0.5, -0.5])
assert np.all(yOz_Plane.distance_to_point(points_in_xminus) < 0)
assert not np.any(yOz_Plane.translated_x(-5.0).distance_to_point(points_in_xminus) < 0)
assert np.all(yOz_Plane.translated_x(5.0).distance_to_point(points_in_xminus) < 0)
def test_plane_transformations():
# TRANSLATIONS
translated_plane = xOz_Plane.translate(vector=(1, 0, 0), inplace=False)
assert xOz_Plane is not translated_plane
assert xOz_Plane == translated_plane
assert yOz_Plane.translated_x(10).rotated_y(np.pi/8).c == 10
translated_plane = xOz_Plane.translate(vector=(0, 1, 0), inplace=False)
assert translated_plane.c == 1
assert np.all(translated_plane.normal == xOz_Plane.normal)
# ROTATIONS
rotated_plane = xOz_Plane.rotate(Oy_axis, angle=np.pi/12, inplace=False)
assert rotated_plane == xOz_Plane.rotated(Oy_axis, angle=np.pi/12)
assert xOz_Plane is not rotated_plane
assert xOz_Plane == rotated_plane
rotated_plane = xOz_Plane.rotate(Ox_axis, angle=np.pi/2, inplace=False)
assert rotated_plane == xOy_Plane
# MIRRORED BY ITSELF
plane = Plane(normal=(1, 0, 0), point=(0.3, 0.2, 0.6))
assert plane.mirrored(plane) != plane
assert plane.mirrored(plane) == Plane(normal=(-1, 0, 0), point=(0.3, 0.2, 0.6))
flipped_plane = plane.rotate(Axis(point=plane.point, vector=(0, 1, 0)), np.pi)
assert flipped_plane == plane.mirror(plane)
|
298,774 | make mock run | import pytest
import xarray
from ..projector import (
get_run_projection,
project_xarray,
get_xarray_config_field,
project_summary_dict
)
EVENT_FIELD = 'event_field_name'
EVENT_CONFIGURATION_FIELD = 'event_configuration_name'
START_DOC_FIELD = 'start_doc_metadata_name'
START_DOC_FIELD_2 = 'start_doc_metadata_name_2'
MOCK_IMAGE = xarray.DataArray([[1, 2], [3, 4]])
BEAMLINE_ENERGY_VALS = [1, 2, 3, 4, 5]
OTHER_VALS = [-1, -2, -3, -4, -5]
CCD = [MOCK_IMAGE+1, MOCK_IMAGE+2, MOCK_IMAGE+3, MOCK_IMAGE+4, MOCK_IMAGE+5]
good_projection = [{
"name": "nxsas",
"version": "2020.1",
"configuration": {"name": "RSoXS"},
"projection": {
START_DOC_FIELD: {"type": "linked", "location": "start", "field": "sample"},
START_DOC_FIELD_2: {"type": "linked", "location": "start", "field": "sample"},
EVENT_FIELD: {"type": "linked", "location": "event", "stream": "primary", "field": "ccd"},
EVENT_CONFIGURATION_FIELD: {"type": "linked",
"location": "configuration",
"stream": "primary",
"config_index": 0,
"config_device": "camera_thingy",
"field": "camera_manufacturer"},
}
}]
bad_location = [{
"name": "nxsas",
"version": "2020.1",
"configuration": {"name": "RSoXS"},
"projection": {
START_DOC_FIELD: {"type": "linked", "location": "i_dont_exist", "field": "sample"},
}
}]
bad_stream = [{
"name": "nxsas",
"version": "2020.1",
"configuration": {"name": "RSoXS"},
"projection": {
START_DOC_FIELD: {"type": "linked", "location": "start", "field": "sample"},
EVENT_FIELD: {"type": "linked", "location": "event", "stream": "i_dont_exist", "field": "ccd"},
}
}]
bad_field = [{
"name": "nxsas",
"version": "2020.1",
"configuration": {"name": "RSoXS"},
"projection": {
START_DOC_FIELD: {"type": "linked", "location": "start", "field": "sample"},
EVENT_FIELD: {"type": "linked", "location": "event", "stream": "primary", "field": "i_dont_exist"},
}
}]
projections_same_name = [
{
"name": "nxsas"
},
{
"name": "nxsas"
}
]
class MockStream():
def __init__(self, metadata):
self.metadata = metadata
data_vars = {
'beamline_energy': ('time', BEAMLINE_ENERGY_VALS),
'ccd': (('time', 'dim_0', 'dim_1'), CCD)
}
self.dataset = xarray.Dataset(data_vars)
self.to_dask_counter = 0
def to_dask(self):
# This enables us to test that the to_dask function is called
# the appropriate number of times.
# It would be better if we could actually return the dataset as a dask dataframe
# However, for some reason this won't let us access the arrays
# by numeric index and will throw an error
self.to_dask_counter += 1
return self.dataset
class MockRun():
def __init__(self, projections=[], sample='',):
self.metadata = {
'start': {
'uid': 42,
'sample': sample,
'projections': projections
},
'descriptors': [
{
'configuration': {
'camera_thingy': {
'data': {'camera_manufacturer': 'berkeley lab'}
}
}
}
],
'stop': {}
}
self.primary = MockStream(self.metadata)
def __getitem__(self, key):
if key == 'primary':
return self.primary
raise KeyError(f'Key: {key}, does not exist')
def METHOD_NAME(projections, sample):
return MockRun(projections, sample)
def dont_panic(run, *args, **kwargs):
# TODO test that args and kwargs are passed
return xarray.DataArray([42, 42, 42, 42, 42])
def test_calculated_projections():
calculated_projection = [{
"name": "nxsas",
"version": "2020.1",
"configuration": {"name": "RSoXS"},
"projection": {
'/entry/event/computed': {
"type": "calculated",
"location": "event",
"stream": "stream_name",
"field": "field_name",
"callable": "databroker.tests.test_projector:dont_panic",
"args": ['trillian'], "kwargs": {"ford": "prefect"}}
}
}]
mock_run = METHOD_NAME(calculated_projection, 'garggle_blaster')
dataset, issues = project_xarray(mock_run)
assert len(issues) == 0
comparison = dataset['/entry/event/computed'] == [42, 42, 42, 42, 42]
assert comparison.all()
def test_find_projection_in_run():
mock_run = METHOD_NAME(good_projection, 'one_ring')
assert get_run_projection(mock_run, projection_name="nxsas") == good_projection[0]
assert get_run_projection(mock_run, projection_name="vogons") is None
assert get_run_projection(mock_run) == good_projection[0] # only one projection in run so choose it
with pytest.raises(KeyError):
mock_run = METHOD_NAME(projections_same_name, 'one_ring')
get_run_projection(mock_run, projection_name="nxsas")
def test_unknown_location():
mock_run = METHOD_NAME(bad_location, 'one_ring')
dataset, issues = project_xarray(mock_run)
assert len(issues) > 0
def test_nonexistent_stream():
mock_run = METHOD_NAME(bad_stream, 'one_ring')
dataset, issues = project_xarray(mock_run)
assert len(issues) > 0
def test_xarray_projector():
mock_run = METHOD_NAME(good_projection, 'one_ring')
dataset, issues = project_xarray(mock_run)
# Ensure that the to_dask function was called on both
# energy and image datasets
assert mock_run['primary'].to_dask_counter == 1
assert get_xarray_config_field(dataset, EVENT_FIELD, 0, 'camera_thingy', 'camera_manufacturer') == \
'berkeley lab'
for idx, image in enumerate(dataset[EVENT_FIELD]):
comparison = image == mock_run['primary'].dataset['ccd'][idx] # xarray of comparison results
assert comparison.all() # False if comparision does not contain all True
def test_summary_projector():
mock_run = METHOD_NAME(good_projection, 'one_ring')
dataset, issues = project_summary_dict(mock_run)
assert len(issues) == 0
projection_fields = []
for field, value in good_projection[0]['projection'].items():
if 'location' in value and value['location'] == 'start':
projection_fields.append(field)
assert len(dataset) > 0
for field in projection_fields:
assert dataset[START_DOC_FIELD] == 'one_ring'
assert dataset[START_DOC_FIELD_2] == 'one_ring'
def test_summary_projector_filtered():
mock_run = METHOD_NAME(good_projection, 'one_ring')
dataset, issues = project_summary_dict(mock_run, return_fields=[START_DOC_FIELD_2])
assert len(issues) == 0
assert len(dataset) == 1
assert dataset[START_DOC_FIELD_2] == 'one_ring' |
298,775 | test local builds only | from pavilion import arguments
from pavilion import commands
from pavilion import plugins
from pavilion.status_file import STATES
from pavilion.unittest import PavTestCase
class BuildCmdTests(PavTestCase):
"""The build command is really just the run command in disguise, so
we only need to test the unique arguments that it enables."""
def set_up(self):
plugins.initialize_plugins(self.pav_cfg)
build_cmd = commands.get_command('build')
build_cmd.silence()
def test_multi_build(self):
"""Make sure we can just build multiple simultaneous builds on
both the front-end and the nodes."""
arg_parser = arguments.get_parser()
args = arg_parser.parse_args([
'build',
'-H', 'this',
'build_parallel'
])
build_cmd = commands.get_command(args.command_name)
build_ret = build_cmd.run(self.pav_cfg, args)
build_cmd.outfile.seek(0)
self.assertEqual(build_ret, 0, msg=build_cmd.outfile.read())
for test in build_cmd.last_tests:
test.wait(timeout=10)
# Make sure we actually built separate builds
builds = [test.builder for test in build_cmd.last_tests]
build_names = set([b.name for b in builds])
self.assertEqual(len(build_names), 4)
for test in build_cmd.last_tests:
if not test.skipped:
self.assertEqual(test.status.current().state, STATES.BUILD_DONE,
msg='Test {} status: {}'
.format(test.id, test.status.current()))
def METHOD_NAME(self):
"""Make sure we can just build multiple simultanious builds on
both the front-end and the nodes."""
arg_parser = arguments.get_parser()
args = arg_parser.parse_args([
'build',
'-H', 'this',
'--local-builds-only',
'build_parallel'
])
build_cmd = commands.get_command(args.command_name)
build_ret = build_cmd.run(self.pav_cfg, args)
build_cmd.outfile.seek(0)
self.assertEqual(build_ret, 0, msg=build_cmd.outfile.read())
for test in build_cmd.last_tests:
test.wait(timeout=10)
# Make sure we actually built separate builds
builds = [test.builder for test in build_cmd.last_tests]
build_names = set([b.name for b in builds])
self.assertEqual(len(build_names), 2)
for test in build_cmd.last_tests:
if not test.skipped:
self.assertEqual(test.status.current().state, STATES.BUILD_DONE,
msg='Test {} status: {}'
.format(test.id, test.status.current()))
def test_rebuilds(self):
"""Make sure rebuilding works as expected."""
arg_parser = arguments.get_parser()
args = arg_parser.parse_args([
'build',
'-H', 'this',
'build_rebuild',
'--rebuild',
])
build_cmd = commands.get_command(args.command_name)
self.assertEqual(build_cmd.run(self.pav_cfg, args), 0)
for test in build_cmd.last_tests:
test.wait(timeout=10)
# Make sure we actually built separate builds
builds = [test.builder for test in build_cmd.last_tests]
build_names = set([b.name for b in builds])
self.assertEqual(len(build_names), 4)
result_matrix = {
'local1': [STATES.BUILD_DONE, STATES.BUILD_REUSED],
'local1a': [STATES.BUILD_REUSED, STATES.BUILD_DONE],
'nodes1': [STATES.BUILD_REUSED, STATES.BUILD_DONE],
'nodes1a': [STATES.BUILD_REUSED, STATES.BUILD_DONE],
'local2': [STATES.BUILD_DONE],
'nodes3': [STATES.BUILD_DONE],
}
orig_names = {}
for test in build_cmd.last_tests:
tname = test.name.split('.')[1]
self.assertIn(test.status.current().state, result_matrix[tname],
msg='Test {} status: {}'
.format(test.name, test.status.current()))
orig_names[test.name] = test.builder.name
self.assertEqual(build_cmd.run(self.pav_cfg, args), 0)
for test in build_cmd.last_tests:
test.wait(timeout=10)
# Make sure we actually built separate builds
builds = [test.builder for test in build_cmd.last_tests]
build_names = set([b.name for b in builds])
self.assertEqual(len(build_names), 4)
for test in build_cmd.last_tests:
test.load_attributes()
expected_name = test.builder.rehash_name(orig_names[test.name])
self.assertEqual(test.build_name, expected_name,
msg=test.name)
origin = test.build_origin_path.resolve().name
self.assertEqual(origin, expected_name,
msg=test.name) |
298,776 | package | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.env import VirtualBuildEnv
from conan.tools.files import copy, get, replace_in_file, rm, rmdir
from conan.tools.gnu import Autotools, AutotoolsToolchain
from conan.tools.layout import basic_layout
from conan.tools.microsoft import unix_path
import os
required_conan_version = ">=1.54.0"
class LibelfConan(ConanFile):
name = "libelf"
description = "ELF object file access library"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://directory.fsf.org/wiki/Libelf"
license = "LGPL-2.0"
topics = ("elf", "fsf", "libelf", "object-file")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
exports_sources = "CMakeLists.txt"
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.settings.os not in ["Linux", "FreeBSD", "Windows"]:
self.options.rm_safe("shared")
self.package_type = "static-library"
if self.options.get_safe("shared"):
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.cppstd")
self.settings.rm_safe("compiler.libcxx")
def layout(self):
if self.settings.os == "Windows":
cmake_layout(self, src_folder="src")
else:
basic_layout(self, src_folder="src")
def build_requirements(self):
if self.settings.os != "Windows":
self.tool_requires("autoconf/2.71")
self.tool_requires("gnu-config/cci.20210814")
if self._settings_build.os == "Windows":
self.win_bash = True
if not self.conf.get("tools.microsoft.bash:path", check_type=str):
self.tool_requires("msys2/cci.latest")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
if self.settings.os == "Windows":
tc = CMakeToolchain(self)
tc.variables["LIBELF_SRC_DIR"] = self.source_folder.replace("\\", "/")
tc.generate()
else:
env = VirtualBuildEnv(self)
env.generate()
tc = AutotoolsToolchain(self)
tc.configure_args.extend([
# it's required, libelf doesnt seem to understand DESTDIR
f"--prefix={unix_path(self, self.package_folder)}",
])
tc.generate()
def build(self):
if self.settings.os == "Windows":
cmake = CMake(self)
cmake.configure(build_script_folder=os.path.join(self.source_folder, os.pardir))
cmake.build()
else:
replace_in_file(self, os.path.join(self.source_folder, "lib", "Makefile.in"),
"$(LINK_SHLIB)",
"$(LINK_SHLIB) $(LDFLAGS)")
# libelf sources contains really outdated 'config.sub' and
# 'config.guess' files. It not allows to build libelf for armv8 arch.
for gnu_config in [
self.conf.get("user.gnu-config:config_guess", check_type=str),
self.conf.get("user.gnu-config:config_sub", check_type=str),
]:
if gnu_config:
copy(self, os.path.basename(gnu_config), src=os.path.dirname(gnu_config), dst=self.source_folder)
autotools = Autotools(self)
autotools.autoreconf()
autotools.configure()
autotools.make()
def METHOD_NAME(self):
copy(self, "COPYING.LIB", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
if self.settings.os == "Windows":
cmake = CMake(self)
cmake.install()
else:
autotools = Autotools(self)
autotools.install()
rmdir(self, os.path.join(self.package_folder, "lib", "locale"))
if self.options.get_safe("shared"):
rm(self, "*.a", os.path.join(self.package_folder, "lib"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rmdir(self, os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.set_property("pkg_config_name", "libelf")
self.cpp_info.libs = ["elf"]
self.cpp_info.includedirs.append(os.path.join("include", "libelf")) |
298,777 | simulate | # -*- coding: future_fstrings -*-
#
# Copyright (c) The acados authors.
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
# authors: Katrin Baumgaertner, Jonathan Frey
from cstr_model import CSTRParameters, setup_cstr_model, setup_linearized_model
from setup_acados_ocp_solver import (
MpcCSTRParameters,
setup_acados_ocp_solver,
AcadosOcpSolver,
)
from setup_acados_integrator import setup_acados_integrator, AcadosSimSolver
import numpy as np
from cstr_utils import plot_cstr
from typing import Optional
def METHOD_NAME(
controller: Optional[AcadosOcpSolver],
plant: AcadosSimSolver,
x0: np.ndarray,
Nsim: int,
X_ref: np.ndarray,
U_ref: np.ndarray,
):
nx = X_ref.shape[1]
nu = U_ref.shape[1]
X = np.ndarray((Nsim + 1, nx))
U = np.ndarray((Nsim, nu))
timings_solver = np.zeros((Nsim))
timings_integrator = np.zeros((Nsim))
# closed loop
xcurrent = x0
X[0, :] = xcurrent
for i in range(Nsim):
if controller is None:
U[i, :] = U_ref[i, :]
else:
# set initial state
controller.set(0, "lbx", xcurrent)
controller.set(0, "ubx", xcurrent)
yref = np.concatenate((X_ref[i, :], U_ref[i, :]))
for stage in range(controller.acados_ocp.dims.N):
controller.set(stage, "yref", yref)
controller.set(controller.acados_ocp.dims.N, "yref", X_ref[i, :])
# solve ocp
status = controller.solve()
if status != 0:
controller.print_statistics()
raise Exception(
f"acados controller returned status {status} in simulation step {i}. Exiting."
)
U[i, :] = controller.get(0, "u")
timings_solver[i] = controller.get_stats("time_tot")
# simulate system
plant.set("x", xcurrent)
plant.set("u", U[i, :])
if plant.acados_sim.solver_options.integrator_type == "IRK":
plant.set("xdot", np.zeros((nx,)))
status = plant.solve()
if status != 0:
raise Exception(
f"acados integrator returned status {status} in simulation step {i}. Exiting."
)
timings_integrator[i] = plant.get("time_tot")
# update state
xcurrent = plant.get("x")
X[i + 1, :] = xcurrent
return X, U, timings_solver, timings_integrator
def main():
Tsim = 25
dt_plant = 0.25 # [min]
cstr_params = CSTRParameters()
mpc_params = MpcCSTRParameters(xs=cstr_params.xs, us=cstr_params.us)
model = setup_cstr_model(cstr_params)
linearized_model = setup_linearized_model(model, cstr_params, mpc_params)
plant_model = setup_cstr_model(cstr_params)
Nsim = int(Tsim / dt_plant)
if not (Tsim / dt_plant).is_integer():
print("WARNING: Tsim / dt_plant should be an integer!")
integrator = setup_acados_integrator(plant_model, dt_plant, cstr_param=cstr_params)
# steady-state
xs = np.array([[0.878, 324.5, 0.659]]).T
us = np.array([[300, 0.1]]).T
# constant ref
X_ref = np.tile(xs, Nsim + 1).T
U_ref = np.tile(us, Nsim).T
# reference jump
xs2 = np.array([0.7, 337, 0.75])
us2 = np.array([305, 0.1])
# Njump = int(Nsim/4)
# X_ref[Njump:3*Njump,:] = xs2
# U_ref[Njump:3*Njump,:] = us2
Njump = int(Nsim / 3)
X_ref[Njump : 2 * Njump, :] = xs2
U_ref[Njump : 2 * Njump, :] = us2
# initial state
x0 = np.array([0.05, 0.75, 0.5]) * xs.ravel()
X_all = []
U_all = []
labels_all = []
timings_solver_all = []
# simulation with constant reference input
label = "constant reference input"
print(f"\n\nRunning simulation with {label}\n\n")
X, U, timings_solver, _ = METHOD_NAME(None, integrator, x0, Nsim, X_ref, U_ref)
X_all.append(X)
U_all.append(U)
timings_solver_all.append(timings_solver)
labels_all.append(label)
# simulation with NMPC controller
label = "NMPC"
print(f"\n\nRunning simulation with {label}\n\n")
ocp_solver = setup_acados_ocp_solver(model, mpc_params, cstr_params=cstr_params)
X, U, timings_solver, _ = METHOD_NAME(
ocp_solver, integrator, x0, Nsim, X_ref=X_ref, U_ref=U_ref
)
X_all.append(X)
U_all.append(U)
timings_solver_all.append(timings_solver)
labels_all.append(label)
ocp_solver = None
# simulation with LMPC controller
label = "LMPC"
print(f"\n\nRunning simulation with {label}\n\n")
mpc_params.linear_mpc = True
ocp_solver = setup_acados_ocp_solver(
linearized_model, mpc_params, cstr_params=cstr_params, use_rti=True
)
mpc_params.linear_mpc = False
X, U, timings_solver, _ = METHOD_NAME(
ocp_solver, integrator, x0, Nsim, X_ref=X_ref, U_ref=U_ref
)
X_all.append(X)
U_all.append(U)
timings_solver_all.append(timings_solver)
labels_all.append(label)
ocp_solver = None
# simulation with NMPC RTI controller
label = "NMPC-RTI"
print(f"\n\nRunning simulation with {label}\n\n")
ocp_solver = setup_acados_ocp_solver(
model, mpc_params, cstr_params=cstr_params, use_rti=True
)
X, U, timings_solver, _ = METHOD_NAME(
ocp_solver, integrator, x0, Nsim, X_ref=X_ref, U_ref=U_ref
)
X_all.append(X)
U_all.append(U)
timings_solver_all.append(timings_solver)
labels_all.append(label)
ocp_solver = None
# Evaluation
print("\nTiming evaluation:\n------------------")
for i in range(len(labels_all)):
label = labels_all[i]
timings_solver = timings_solver_all[i] * 1e3
print(
f"{label}:\n min: {np.min(timings_solver):.3f} ms, mean: {np.mean(timings_solver):.3f} ms, max: {np.max(timings_solver):.3f} ms\n"
)
# print(f"U:\n {U}")
# print(f"X:\n {X}")
# import pdb; pdb.set_trace()
# plot results
plot_cstr(
dt_plant,
X_all,
U_all,
X_ref,
U_ref,
mpc_params.umin,
mpc_params.umax,
labels_all,
) # , fig_filename='cstr_acados_RTI.pdf')
if __name__ == "__main__":
main() |
298,778 | env setup | # --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
import pytest
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
pytestmark = pytest.mark.tier1
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
CONTAINER_1_OU = 'test_ou_1'
CONTAINER_2_OU = 'test_ou_2'
CONTAINER_1 = f'ou={CONTAINER_1_OU},dc=example,dc=com'
CONTAINER_2 = f'ou={CONTAINER_2_OU},dc=example,dc=com'
USER_CN = 'test_user'
USER_PWD = 'Secret123'
USER = f'cn={USER_CN},{CONTAINER_1}'
@pytest.fixture(scope="module")
def METHOD_NAME(topology_st):
"""Adds two containers, one user and two ACI rules"""
log.info("Add a container: %s" % CONTAINER_1)
topology_st.standalone.add_s(Entry((CONTAINER_1,
{'objectclass': ['top','organizationalunit'],
'ou': CONTAINER_1_OU,
})))
log.info("Add a container: %s" % CONTAINER_2)
topology_st.standalone.add_s(Entry((CONTAINER_2,
{'objectclass': ['top', 'organizationalunit'],
'ou': CONTAINER_2_OU,
})))
log.info("Add a user: %s" % USER)
topology_st.standalone.add_s(Entry((USER,
{'objectclass': 'top person'.split(),
'cn': USER_CN,
'sn': USER_CN,
'userpassword': USER_PWD
})))
ACI_TARGET = '(targetattr="*")'
ACI_ALLOW = '(version 3.0; acl "All rights for %s"; allow (all) ' % USER
ACI_SUBJECT = 'userdn="ldap:///%s";)' % USER
ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))]
log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER,
CONTAINER_1))
topology_st.standalone.modify_s(CONTAINER_1, mod)
log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER,
CONTAINER_2))
topology_st.standalone.modify_s(CONTAINER_2, mod)
@pytest.mark.ds47553
def test_enhanced_aci_modrnd(topology_st, METHOD_NAME):
"""Tests, that MODRDN operation is allowed,
if user has ACI right '(all)' under superior entries,
but doesn't have '(modrdn)'
:id: 492cf2a9-2efe-4e3b-955e-85eca61d66b9
:setup: Standalone instance
:steps:
1. Create two containers
2. Create a user within "ou=test_ou_1,dc=example,dc=com"
3. Add an aci with a rule "cn=test_user is allowed all" within these containers
4. Run MODRDN operation on the "cn=test_user" and set "newsuperior" to
the "ou=test_ou_2,dc=example,dc=com"
5. Check there is no user under container one (ou=test_ou_1,dc=example,dc=com)
6. Check there is a user under container two (ou=test_ou_2,dc=example,dc=com)
:expectedresults:
1. Two containers should be created
2. User should be added successfully
3. This should pass
4. This should pass
5. User should not be found under container ou=test_ou_1,dc=example,dc=com
6. User should be found under container ou=test_ou_2,dc=example,dc=com
"""
log.info("Bind as %s" % USER)
topology_st.standalone.simple_bind_s(USER, USER_PWD)
log.info("User MODRDN operation from %s to %s" % (CONTAINER_1,
CONTAINER_2))
topology_st.standalone.rename_s(USER, "cn=%s" % USER_CN,
newsuperior=CONTAINER_2, delold=1)
log.info("Check there is no user in %s" % CONTAINER_1)
entries = topology_st.standalone.search_s(CONTAINER_1,
ldap.SCOPE_ONELEVEL,
'cn=%s' % USER_CN)
assert not entries
log.info("Check there is our user in %s" % CONTAINER_2)
entries = topology_st.standalone.search_s(CONTAINER_2,
ldap.SCOPE_ONELEVEL,
'cn=%s' % USER_CN)
assert entries
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
# -v for additional verbose
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s -v %s" % CURRENT_FILE) |
298,779 | leave call | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class rapidmigrationassessmentCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'create_annotation': ('parent', 'annotation', 'request_id', ),
'create_collector': ('parent', 'collector_id', 'collector', 'request_id', ),
'delete_collector': ('name', 'request_id', ),
'get_annotation': ('name', ),
'get_collector': ('name', ),
'list_collectors': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ),
'pause_collector': ('name', 'request_id', ),
'register_collector': ('name', 'request_id', ),
'resume_collector': ('name', 'request_id', ),
'update_collector': ('update_mask', 'collector', 'request_id', ),
}
def METHOD_NAME(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=rapidmigrationassessmentCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the rapidmigrationassessment client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir) |
298,780 | find snapshot urls | #!/usr/bin/python3
"""check-snapshots greps the directory tree for rpmrepo urls and checks them
against the current snapshot list"""
import argparse
import json
import os
import sys
import subprocess
import time
from urllib.parse import urlparse
import requests
SNAPSHOTS_URL="https://rpmrepo.osbuild.org/v2/enumerate"
SNAPSHOTS_TIMEOUT = 2 * 60
SNAPSHOT_GREP = ["grep", "--color=never", "-or", r"http.*rpmrepo.osbuild.org.*-20[0-9]\+"]
def fetch_snapshots_api(url, timeout=SNAPSHOTS_TIMEOUT):
"""Get the list of snapshots from the rpmrepo API"""
print(f"Fetching list of snapshots from {url}")
start = time.time()
try:
r = requests.get(url, timeout=timeout)
except:
return None
elapsed = time.time() - start
if r.status_code != 200:
print(f"HTTP Response {r.status_code} from {url} after {elapsed:0.0f}s: {r.text}")
return None
print(f"Received snapshot list in {elapsed:0.0f}s")
return r.json()
def METHOD_NAME(directory):
"""grep the directory for rpmrepo snapshot urls
Returns a map of urls to the files they are used in.
"""
urls = {}
try:
grep_out = subprocess.run(SNAPSHOT_GREP + [directory],
check=True,
capture_output=True,
env={"LANG": "C"})
except subprocess.CalledProcessError as e:
print("ERROR: " + e.stderr.decode("utf-8"))
sys.exit(1)
for line in grep_out.stdout.decode("utf-8").splitlines():
try:
file, url = line.split(":", 1)
except ValueError:
print(f"Problem parsing {line}")
continue
url = url.strip()
if url not in urls:
urls[url] = [file]
else:
urls[url].append(file)
return urls
def check_baseurl(repo, snapshots):
"""Check the baseurl to see if it is a valid snapshot, and if there is a newer one
available.
"""
invalid = None
newer = None
url = urlparse(repo)
snapshot = os.path.basename(url.path)
# Is this snapshot valid?
if snapshot not in snapshots:
invalid = f"{snapshot} is not a valid snapshot name"
# is this snapshot old?
base = snapshot.rsplit("-", 1)[0]
newest = snapshot
for s in snapshots:
if s.rsplit("-", 1)[0] != base:
continue
if s > newest:
newest = s
if newest != snapshot:
newer = f"{snapshot} has a newer version - {newest}"
return invalid, newer
def check_snapshot_urls(urls, snapshots, skip=["test/data/manifests", "test/data/stages"], errors_only=False):
"""check the urls against the current list of snapshots
Returns:
0 if all were valid and no newer snapshots are available
2 if there were invalid snapshots
3 if there were newer snapshots
6 if there were invalid and newer snapshots
"""
# Gather up the messages for each file
messages = {}
ret = 0
for url in urls:
invalid, newer = check_baseurl(url, snapshots)
if invalid:
# Add this to each file's invalid message list
for f in urls[url]:
if any(bool(s in f) for s in skip):
continue
ret |= 2
if f in messages:
if invalid not in messages[f]["invalid"]:
messages[f]["invalid"].append(invalid)
else:
messages[f] = {"invalid": [invalid], "newer": []}
if errors_only:
continue
if newer:
# Add this to each file's newer message list
for f in urls[url]:
if any(bool(s in f) for s in skip):
continue
ret |= 4
if f in messages:
if newer not in messages[f]["newer"]:
messages[f]["newer"].append(newer)
else:
messages[f] = {"newer": [newer], "invalid": []}
# Print the messages for each file
for f in messages:
print(f"{f}:")
for msg in messages[f]["invalid"]:
print(f" ERROR: {msg}")
for msg in messages[f]["newer"]:
print(f" NEWER: {msg}")
return ret
# parse cmdline args
def parse_args():
parser =argparse.ArgumentParser(description="Check snapshot urls")
parser.add_argument("--verbose")
parser.add_argument("--timeout", type=int, default=SNAPSHOTS_TIMEOUT,
help="How long to wait for rpmrepo snapshot list")
parser.add_argument("--cache", help="Use a cached file for the list of rpmrepo snapshots")
parser.add_argument("--url", default=SNAPSHOTS_URL,
help="URL to use for the list of rpmrepo snapshots")
parser.add_argument("--errors-only", action="store_true",
help="Only return errors")
parser.add_argument("directory")
return parser.parse_args()
def main():
args = parse_args()
urls = METHOD_NAME(args.directory)
snapshots = None
if args.cache:
try:
with open(args.cache, encoding="utf8") as f:
snapshots = json.load(f)
except:
print(f"No snapshots cache found at {args.cache}")
sys.exit(1)
else:
snapshots = fetch_snapshots_api(args.url, args.timeout)
if not snapshots:
print(f"Cannot download snapshots from {args.url}")
sys.exit(1)
return check_snapshot_urls(urls, snapshots, errors_only=args.errors_only)
if __name__=='__main__':
sys.exit(main()) |
298,781 | get creator | from typing import TYPE_CHECKING, List, Optional, Union
import attrs
from interactions.client.const import MISSING, Absent
from interactions.client.utils.attr_converters import optional
from interactions.client.utils.serializer import dict_filter_none
from interactions.models.discord.snowflake import to_snowflake
from .base import DiscordObject
from interactions.models.discord.enums import StickerTypes, StickerFormatType
if TYPE_CHECKING:
from interactions.models.discord.guild import Guild
from interactions.models.discord.user import User
from interactions.models.discord.snowflake import Snowflake_Type
__all__ = ("StickerItem", "Sticker", "StickerPack")
@attrs.define(eq=False, order=False, hash=False, kw_only=False)
class StickerItem(DiscordObject):
name: str = attrs.field(repr=True)
"""Name of the sticker."""
format_type: StickerFormatType = attrs.field(repr=True, converter=StickerFormatType)
"""Type of sticker image format."""
@attrs.define(eq=False, order=False, hash=False, kw_only=True)
class Sticker(StickerItem):
"""Represents a sticker that can be sent in messages."""
pack_id: Optional["Snowflake_Type"] = attrs.field(repr=False, default=None, converter=optional(to_snowflake))
"""For standard stickers, id of the pack the sticker is from."""
description: Optional[str] = attrs.field(repr=False, default=None)
"""Description of the sticker."""
tags: str = attrs.field(repr=False)
"""autocomplete/suggestion tags for the sticker (max 200 characters)"""
type: Union[StickerTypes, int] = attrs.field(repr=False, converter=StickerTypes)
"""Type of sticker."""
available: Optional[bool] = attrs.field(repr=False, default=True)
"""Whether this guild sticker can be used, may be false due to loss of Server Boosts."""
sort_value: Optional[int] = attrs.field(repr=False, default=None)
"""The standard sticker's sort order within its pack."""
_user_id: Optional["Snowflake_Type"] = attrs.field(repr=False, default=None, converter=optional(to_snowflake))
_guild_id: Optional["Snowflake_Type"] = attrs.field(repr=False, default=None, converter=optional(to_snowflake))
async def fetch_creator(self, *, force: bool = False) -> "User":
"""
Fetch the user who created this emoji.
Args:
force: Whether to force a fetch from the API
Returns:
User object
"""
return await self._client.cache.fetch_user(self._user_id, force=force)
def METHOD_NAME(self) -> "User":
"""
Get the user who created this emoji.
Returns:
User object
"""
return self._client.cache.get_user(self._user_id)
async def fetch_guild(self, *, force: bool = False) -> "Guild":
"""
Fetch the guild associated with this emoji.
Args:
force: Whether to force a fetch from the API
Returns:
Guild object
"""
return await self._client.cache.fetch_guild(self._guild_id, force=force)
def get_guild(self) -> "Guild":
"""
Get the guild associated with this emoji.
Returns:
Guild object
"""
return self._client.cache.get_guild(self._guild_id)
async def edit(
self,
*,
name: Absent[Optional[str]] = MISSING,
description: Absent[Optional[str]] = MISSING,
tags: Absent[Optional[str]] = MISSING,
reason: Absent[Optional[str]] = MISSING,
) -> "Sticker":
"""
Edit a sticker.
Args:
name: New name of the sticker
description: New description of the sticker
tags: New tags of the sticker
reason: Reason for the edit
Returns:
The updated sticker instance
"""
if not self._guild_id:
raise ValueError("You can only edit guild stickers.")
payload = dict_filter_none({"name": name, "description": description, "tags": tags})
sticker_data = await self._client.http.modify_guild_sticker(payload, self._guild_id, self.id, reason)
return self.update_from_dict(sticker_data)
async def delete(self, reason: Optional[str] = MISSING) -> None:
"""
Delete a sticker.
Args:
reason: Reason for the deletion
Raises:
ValueError: If you attempt to delete a non-guild sticker
"""
if not self._guild_id:
raise ValueError("You can only delete guild stickers.")
await self._client.http.delete_guild_sticker(self._guild_id, self.id, reason)
@property
def url(self) -> str:
"""CDN url for the sticker."""
return f"https://media.discordapp.net/stickers/{self.id}.webp"
@attrs.define(eq=False, order=False, hash=False, kw_only=True)
class StickerPack(DiscordObject):
"""Represents a pack of standard stickers."""
stickers: List["Sticker"] = attrs.field(repr=False, factory=list)
"""The stickers in the pack."""
name: str = attrs.field(repr=True)
"""Name of the sticker pack."""
sku_id: "Snowflake_Type" = attrs.field(repr=True)
"""id of the pack's SKU."""
cover_sticker_id: Optional["Snowflake_Type"] = attrs.field(repr=False, default=None)
"""id of a sticker in the pack which is shown as the pack's icon."""
description: str = attrs.field(repr=False)
"""Description of the sticker pack."""
banner_asset_id: "Snowflake_Type" = attrs.field(repr=False) # TODO CDN Asset
"""id of the sticker pack's banner image.""" |
298,782 | exercise nested loop | from __future__ import absolute_import, division, print_function
from libtbx.test_utils import approx_equal
from six.moves import range
def exercise_integer():
from libtbx.math_utils import iround, iceil, ifloor, nearest_integer
assert iround(0) == 0
assert iround(1.4) == 1
assert iround(-1.4) == -1
assert iround(1.6) == 2
assert iround(-1.6) == -2
assert iceil(0) == 0
assert iceil(1.1) == 2
assert iceil(-1.1) == -1
assert iceil(1.9) == 2
assert iceil(-1.9) == -1
assert ifloor(0) == 0
assert ifloor(1.1) == 1
assert ifloor(-1.1) == -2
assert ifloor(1.9) == 1
assert ifloor(-1.9) == -2
for i in range(-3,3+1):
assert nearest_integer(i+0.3) == i
assert nearest_integer(i+0.7) == i+1
def exercise_logical():
from libtbx.math_utils import does_imply, are_equivalent
#
assert does_imply(True, True)
assert not does_imply(True, False)
assert does_imply(False, True)
assert does_imply(False, False)
#
assert are_equivalent(True, True)
assert not are_equivalent(True, False)
assert not are_equivalent(False, True)
assert are_equivalent(False, False)
def METHOD_NAME():
from libtbx.math_utils import nested_loop as nl
assert [list(i) for i in nl([])] == []
assert [list(i) for i in nl([1])] == [[0]]
assert [list(i) for i in nl([1], open_range=False)] == [[0], [1]]
assert [list(i) for i in nl([3])] == [[0], [1], [2]]
assert [list(i) for i in nl(begin=[-2], end=[3])] == [
[-2], [-1], [0], [1], [2]]
assert [list(i) for i in nl(begin=[-1], end=[1], open_range=False)] == [
[-1], [0], [1]]
assert [list(i) for i in nl(begin=[-2,4], end=[3,6])] == [
[-2, 4], [-2, 5], [-1, 4], [-1, 5], [0, 4], [0, 5], [1, 4], [1, 5],
[2, 4], [2, 5]]
assert [list(i) for i in nl(begin=[-2,4], end=[3,6], open_range=False)] == [
[-2, 4], [-2, 5], [-2, 6], [-1, 4], [-1, 5], [-1, 6], [0, 4], [0, 5],
[0, 6], [1, 4], [1, 5], [1, 6], [2, 4], [2, 5], [2, 6], [3, 4], [3, 5],
[3, 6]]
assert [list(i) for i in nl(begin=[-1,0,-1], end=[1,2,1])] == [
[-1, 0, -1], [-1, 0, 0], [-1, 1, -1], [-1, 1, 0], [0, 0, -1], [0, 0, 0],
[0, 1, -1], [0, 1, 0]]
def exercise_next_permutation():
from libtbx.math_utils import next_permutation
seq = []
assert next_permutation(seq) is False
seq = [0]
assert next_permutation(seq) is False
seq = [0,1]
assert next_permutation(seq)
assert seq == [1, 0]
assert not next_permutation(seq)
assert seq == [0, 1]
seq = [0,1,2]
result = []
while True:
result.append(tuple(seq))
if (not next_permutation(seq)):
break
assert result == [
(0, 1, 2),
(0, 2, 1),
(1, 0, 2),
(1, 2, 0),
(2, 0, 1),
(2, 1, 0)]
assert seq == [0,1,2]
expected_n = 1
for m in range(1,7):
expected_n *= m
seq = list(range(m))
n = 0
while True:
n += 1
if (not next_permutation(seq)):
break
assert seq == list(range(m))
assert n == expected_n
def exercise_random_permutation_in_place():
from libtbx.math_utils import random_permutation_in_place
import random
random.seed(0)
l = list(range(8))
for i_trial in range(10):
random_permutation_in_place(list=l)
if (l != list(range(8))):
break
else:
raise AssertionError
assert sorted(l) == list(range(8))
def exercise_prime_factors_of():
from libtbx.math_utils import prime_factors_of
assert prime_factors_of(n=1) == []
prime_set = set()
for n in range(2, 100):
primes = prime_factors_of(n)
pp = 1
for p in primes:
pp *= p
assert pp == n
prime_set.update(primes)
if (n == 30):
assert prime_set == set([2,3,5,7,11,13,17,19,23,29])
for n in prime_set:
assert prime_factors_of(n) == [n]
assert len(prime_set) == 25
def exercise_normalize_angle():
from libtbx.math_utils import normalize_angle as n
import math
for deg,period in [(False, 2*math.pi), (True, 360.)]:
assert approx_equal(n(0, deg=deg), 0, eps=1.e-12)
assert approx_equal(n(1.e-8, deg=deg), 1.e-8, eps=1.e-12)
assert approx_equal(n(-1.e-8, deg=deg), period-1.e-8, eps=1.e-12)
assert approx_equal(n(1, deg=deg), 1, eps=1.e-12)
assert approx_equal(n(-1, deg=deg), period-1, eps=1.e-12)
assert approx_equal(n(1.e+8), 1.9426951384)
assert approx_equal(n(-1.e+8), 4.34049016878)
assert approx_equal(n(1.e+8, deg=True), 280)
assert approx_equal(n(-1.e+8, deg=True), 80)
def exercise_percentile_based_spread():
from libtbx.math_utils import percentile_based_spread
import random
import math
n_points = 123456
deltas = []
for i in range(n_points):
x = random.gauss(100, 10)
deltas.append(x)
for i in range(1000):
x = random.gauss(300, 30)
deltas.append(x)
pbs = percentile_based_spread(deltas)
pbs_1 = percentile_based_spread(deltas, sort = False)
assert abs(pbs - pbs_1) < 0.01
rmsd = math.sqrt(sum([ x**2 for x in deltas]) / n_points)
assert (pbs > 100) and (pbs < rmsd)
# Test small list processing
assert percentile_based_spread([1,1]) > 0
def exercise():
exercise_integer()
exercise_logical()
METHOD_NAME()
exercise_next_permutation()
exercise_random_permutation_in_place()
exercise_prime_factors_of()
exercise_normalize_angle()
exercise_percentile_based_spread()
print("OK")
if (__name__ == "__main__"):
exercise() |
298,783 | set up class | #!/usr/bin/env python
import dns
import os
import subprocess
from authtests import AuthTest
class GSSTSIGBase(AuthTest):
_config_template_default = """
module-dir=../regression-tests/modules
daemon=no
socket-dir={confdir}
cache-ttl=0
negquery-cache-ttl=0
query-cache-ttl=0
log-dns-queries=yes
log-dns-details=yes
loglevel=9
distributor-threads=1"""
_config_template = """
launch=gsqlite3
gsqlite3-database=configs/auth/powerdns.sqlite
gsqlite3-pragma-foreign-keys=yes
gsqlite3-dnssec=yes
enable-gss-tsig=yes
allow-dnsupdate-from=0.0.0.0/0
dnsupdate=yes
"""
_auth_env = {'KRB5_CONFIG' : './kerberos-client/krb5.conf',
'KRB5_KTNAME' : './kerberos-client/kt.keytab'
}
@classmethod
def METHOD_NAME(cls):
super(GSSTSIGBase, cls).METHOD_NAME()
os.system("$PDNSUTIL --config-dir=configs/auth delete-zone example.net")
os.system("$PDNSUTIL --config-dir=configs/auth delete-zone noacceptor.net")
os.system("$PDNSUTIL --config-dir=configs/auth delete-zone wrongacceptor.net")
os.system("$PDNSUTIL --config-dir=configs/auth create-zone example.net")
os.system("$PDNSUTIL --config-dir=configs/auth create-zone noacceptor.net")
os.system("$PDNSUTIL --config-dir=configs/auth create-zone wrongacceptor.net")
os.system("$PDNSUTIL --config-dir=configs/auth add-record example.net . SOA 3600 'ns1.example.net otto.example.net 2022010403 10800 3600 604800 3600'")
os.system("$PDNSUTIL --config-dir=configs/auth add-record noacceptor.net . SOA 3600 'ns1.noacceptor.net otto.example.net 2022010403 10800 3600 604800 3600'")
os.system("$PDNSUTIL --config-dir=configs/auth add-record wrongacceptor.net . SOA 3600 'ns1.wrongacceptor.net otto.example.net 2022010403 10800 3600 604800 3600'")
os.system("$PDNSUTIL --config-dir=configs/auth set-meta example.net GSS-ACCEPTOR-PRINCIPAL DNS/ns1.example.net@EXAMPLE.COM")
os.system("$PDNSUTIL --config-dir=configs/auth set-meta wrongacceptor.net GSS-ACCEPTOR-PRINCIPAL DNS/ns1.example.net@EXAMPLE.COM")
os.system("$PDNSUTIL --config-dir=configs/auth set-meta example.net TSIG-ALLOW-DNSUPDATE testuser1@EXAMPLE.COM")
def kinit(self, user):
ret = subprocess.run(["kinit", "-Vt", "./kerberos-client/kt.keytab", user], env=self._auth_env)
self.assertEqual(ret.returncode, 0)
def nsupdate(self, commands, expected=0):
full = "server 127.0.0.1 %s\n" % self._authPort
full += commands + "\nsend\nquit\n"
ret = subprocess.run(["nsupdate", "-g"], input=full, env=self._auth_env, capture_output=True, text=True)
self.assertEqual(ret.returncode, expected)
def checkInDB(self, zone, record):
ret = os.system("$PDNSUTIL --config-dir=configs/auth list-zone %s | egrep -q %s" % (zone, record))
self.assertEqual(ret, 0)
def checkNotInDB(self, zone, record):
ret = os.system("$PDNSUTIL --config-dir=configs/auth list-zone %s | fgrep -q %s" % (zone, record))
self.assertNotEqual(ret, 0)
class TestBasicGSSTSIG(GSSTSIGBase):
_config_template = """
launch=gsqlite3
gsqlite3-database=configs/auth/powerdns.sqlite
gsqlite3-pragma-foreign-keys=yes
gsqlite3-dnssec=yes
enable-gss-tsig=yes
allow-dnsupdate-from=0.0.0.0/0
dnsupdate=yes
"""
def testAllowedUpdate(self):
self.checkNotInDB('example.net', 'inserted1.example.net')
self.kinit("testuser1")
self.nsupdate("add inserted1.example.net 10 A 1.2.3.1")
self.checkInDB('example.net', '^inserted1.example.net.*10.*IN.*A.*1.2.3.1$')
def testDisallowedUpdate(self):
self.kinit("testuser2")
self.nsupdate("add inserted2.example.net 10 A 1.2.3.2", 2)
self.checkNotInDB('example.net', 'inserted2.example.net')
def testNoAcceptor(self):
self.kinit("testuser1")
self.nsupdate("add inserted3.noacceptor.net 10 A 1.2.3.3", 2)
self.checkNotInDB('example.net', 'inserted3.example.net')
def testWrongAcceptor(self):
self.kinit("testuser1")
self.nsupdate("add inserted4.wrongacceptor.net 10 A 1.2.3.4", 2)
self.checkNotInDB('example.net', 'inserted4.example.net')
class TestLuaGSSTSIG(GSSTSIGBase):
_config_template = """
launch=gsqlite3
gsqlite3-database=configs/auth/powerdns.sqlite
gsqlite3-pragma-foreign-keys=yes
gsqlite3-dnssec=yes
enable-gss-tsig=yes
allow-dnsupdate-from=0.0.0.0/0
dnsupdate=yes
lua-dnsupdate-policy-script=kerberos-client/update-policy.lua
"""
def testDisallowedByLuaUpdate(self):
self.kinit("testuser1")
self.nsupdate("add inserted10.example.net 10 A 1.2.3.10", 0) # Lua deny is still a NOERROR
self.checkNotInDB('example.net', 'inserted10.example.net')
def testAllowedByLuaUpdate(self):
self.kinit("testuser2")
self.nsupdate("add inserted11.example.net 10 A 1.2.3.11")
self.checkInDB('example.net', '^inserted11.example.net.*10.*IN.*A.*1.2.3.11$')
def testNoAcceptor(self):
self.kinit("testuser1")
self.nsupdate("add inserted12.noacceptor.net 10 A 1.2.3.12", 2)
self.checkNotInDB('example.net', 'inserted12.example.net')
def testWrongAcceptor(self):
self.kinit("testuser1")
self.nsupdate("add inserted13.wrongacceptor.net 10 A 1.2.3.13", 2)
self.checkNotInDB('example.net', 'inserted13.example.net')
|
298,784 | init action list | from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import numpy as np
from os import path
import codecs
import inspect
from indra.sources.geneways.actionmention_parser \
import GenewaysActionMentionParser
from indra.sources.geneways.symbols_parser import GenewaysSymbols
class GenewaysAction(object):
"""Represents a row of data in the Geneways human_action.txt,
structured so you can access by field."""
def __init__(self, text_row):
"""Parses a row of text data in human_action.txt and sets the
field name of this object to the corresponding data."""
tokens = text_row.split('\t')
if len(tokens) != 9:
msg = 'Expected 9 tokens for each line of human_action.txt' + \
' but got %d tokens: "%s"' % (len(tokens), text_row)
raise Exception(msg)
self.hiid = tokens[0]
self.up = tokens[1]
self.dn = tokens[2]
self.actiontype = tokens[3]
self.action_count = tokens[4]
self.actionmention_count = tokens[5]
self.plo = tokens[6]
self.max_score = tokens[7]
self.max_prec = tokens[8]
self.action_mentions = list() # Initially empty, can be populated later
def make_annotation(self):
"""Returns a dictionary with all properties of the action
and each of its action mentions."""
annotation = dict()
# Put all properties of the action object into the annotation
for item in dir(self):
if len(item) > 0 and item[0] != '_' and \
not inspect.ismethod(getattr(self, item)):
annotation[item] = getattr(self, item)
# Add properties of each action mention
annotation['action_mentions'] = list()
for action_mention in self.action_mentions:
annotation_mention = action_mention.make_annotation()
annotation['action_mentions'].append(annotation_mention)
return annotation
def __repr__(self):
r = ''
first = True
for item in dir(self):
if len(item) > 0 and item[0] != '_' and \
not inspect.ismethod(getattr(self, item)):
if not first:
r = r + ","
r = r + item + "=" + repr(getattr(self, item))
first = False
return r
class GenewaysActionParser(object):
"""Parses a human_action.txt file, and populates
a list of GenewaysAction objects with these data."""
def __init__(self, input_folder):
"""Parses the file and populations the action data"""
f = 'human_action.txt'
action_filename = self._search_path(input_folder, f)
f = 'human_actionmention.txt'
actionmention_filename = self._search_path(input_folder, f)
f = 'human_symbols.txt'
symbols_filename = self._search_path(input_folder, f)
if action_filename is None or actionmention_filename is None \
or symbols_filename is None:
msg = 'Could not find Geneways extracted data: ' + \
'(human_action.txt, human_actionmention.txt, ' + \
'human_symbols.txt) in %s' % input_folder
raise Exception(msg)
self.METHOD_NAME(action_filename)
self._link_to_action_mentions(actionmention_filename)
self._lookup_symbols(symbols_filename)
def _search_path(self, directory_name, filename):
"""Searches for a given file in the specified directory."""
full_path = path.join(directory_name, filename)
if path.exists(full_path):
return full_path
# Could not find the requested file in any of the directories
return None
def METHOD_NAME(self, action_filename):
"""Parses the file and populates the data."""
self.actions = list()
self.hiid_to_action_index = dict()
f = codecs.open(action_filename, 'r', encoding='latin-1')
first_line = True
for line in f:
line = line.rstrip()
if first_line:
# Ignore the first line
first_line = False
else:
self.actions.append(GenewaysAction(line))
latestInd = len(self.actions)-1
hiid = self.actions[latestInd].hiid
if hiid in self.hiid_to_action_index:
raise Exception('action hiid not unique: %d' % hiid)
self.hiid_to_action_index[hiid] = latestInd
def _link_to_action_mentions(self, actionmention_filename):
"""Add action mentions"""
parser = GenewaysActionMentionParser(actionmention_filename)
self.action_mentions = parser.action_mentions
for action_mention in self.action_mentions:
hiid = action_mention.hiid
if hiid not in self.hiid_to_action_index:
m1 = 'Parsed action mention has hiid %d, which does not exist'
m2 = ' in table of action hiids'
raise Exception((m1 + m2) % hiid)
else:
idx = self.hiid_to_action_index[hiid]
self.actions[idx].action_mentions.append(action_mention)
def _lookup_symbols(self, symbols_filename):
"""Look up symbols for actions and action mentions"""
symbol_lookup = GenewaysSymbols(symbols_filename)
for action in self.actions:
action.up_symbol = symbol_lookup.id_to_symbol(action.up)
action.dn_symbol = symbol_lookup.id_to_symbol(action.dn)
def get_top_n_action_types(self, top_n):
"""Returns the top N actions by count."""
# Count action types
action_type_to_counts = dict()
for action in self.actions:
actiontype = action.actiontype
if actiontype not in action_type_to_counts:
action_type_to_counts[actiontype] = 1
else:
action_type_to_counts[actiontype] = \
action_type_to_counts[actiontype] + 1
# Convert the dictionary representation into a pair of lists
action_types = list()
counts = list()
for actiontype in action_type_to_counts.keys():
action_types.append(actiontype)
counts.append(action_type_to_counts[actiontype])
# How many actions in total?
num_actions = len(self.actions)
num_actions2 = 0
for count in counts:
num_actions2 = num_actions2 + count
if num_actions != num_actions2:
raise(Exception('Problem counting everything up!'))
# Sort action types by count (lowest to highest)
sorted_inds = np.argsort(counts)
last_ind = len(sorted_inds)-1
# Return the top N actions
top_actions = list()
if top_n > len(sorted_inds):
raise Exception('Asked for top %d action types, ' +
'but there are only %d action types'
% (top_n, len(sorted_inds)))
for i in range(top_n):
top_actions.append(action_types[sorted_inds[last_ind-i]])
return top_actions |
298,785 | get entity code by guid | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Global mapping of guids to Entity instances."""
from typing import Dict
class GuidToEntityMap(object):
"""Container for global mapping of Entity instances by entity guids.
Attributes: guid_to_entity_map(class variable): Global mapping of entity guids
to Entity instances.
"""
_guid_to_entity_map = {}
def __init__(self):
"""Init."""
def AddSite(self, site: ...) -> None:
"""Adds a site by guid to the global map.
Adding a site to the global guid to entity instance map is necessary because
connections may reference a site by guid or code. Sites are handled
separately than entities in the model_builder module, and therefore need to
be added to the global map separately.
Args:
site: Site instance to be added as a value to the global map.
Raises:
AttributeError: When site guid attribute is None.
KeyError: When site guid already maps to another site in the model.
"""
if not site.guid:
raise AttributeError(f'{site.code}: guid missing')
elif site.guid not in self._guid_to_entity_map:
self._guid_to_entity_map[site.guid] = site
else:
raise KeyError(
f'{site.guid} maps to {self._guid_to_entity_map[site.guid]}')
def AddEntity(self, entity: ...) -> None:
"""Adds an entity by guid to the global map.
This method does not generate a guid for an entity and will throw an
exception if the entity's guid attribute is empty.
Args:
entity: Entity instance to be added as a value.
Raises:
AttributeError: When entity has an empty guid attribute.
KeyError: When a GUID already maps to another Entity in the model.
"""
if entity is None:
raise ValueError('Cannot add None values to global entity map.')
if not entity.bc_guid:
raise AttributeError(f'{entity.code}: guid missing')
if entity.bc_guid not in self._guid_to_entity_map:
self._guid_to_entity_map[entity.bc_guid] = entity
else:
raise KeyError(
f'{entity.bc_guid} maps to {self._guid_to_entity_map[entity.bc_guid]}'
)
def GetEntityByGuid(self, guid: str) ->...:
"""Gets an Entity instance mapped to the input guid.
Args:
guid: A guid key.
Returns:
The Entity instance keyed by guid.
Raises:
KeyError: When guid is not a valid key in the global map.
"""
entity = self._guid_to_entity_map.get(guid)
if entity is None:
raise KeyError(f'{guid} is not a valid guid in the global entity map')
return entity
def METHOD_NAME(self, guid: str) -> str:
"""Gets an entity code mapped by guid.
Args:
guid: A guid key.
Returns:
A human-readable code from the entity instance mapped by guid.
"""
return self.GetEntityByGuid(guid).code
def GetEntityGuidByCode(self, code: str) -> str:
"""Returns entity code mapped by guid in the global guid to entity mapping.
Args:
code: A non-duplicate entity code.
Returns:
The guid associated with an entity code.
Raises:
AttributeError: If code is not an entity code contained in
self._guid_to_entity_map
"""
guid_by_code = {
entity.code: guid for guid, entity in self._guid_to_entity_map.items()
}
guid = guid_by_code.get(code)
if not guid:
raise AttributeError(f'{code} is not a valid entity code.')
else:
return guid
def RemoveEntity(self, guid: str) -> None:
"""Removes a guid to Entity mapping.
Args:
guid: A guid key.
Returns:
The removed Entity instance.
"""
return self._guid_to_entity_map.pop(guid)
def UpdateEntityMapping(self, guid: str, entity: ...) -> None:
"""Maps existing guid key to new Entity instance.
Args:
guid: Guid key already mapped in self._guid_to_entity_map.
entity: An Entity instance.
Raises:
KeyError: When guid is not a valid key in the global map.
"""
if not entity:
raise ValueError(f'{guid} cannot map to object of type None')
self._guid_to_entity_map.update({guid: entity})
def GetGuidToEntityMap(self) -> Dict[str, object]:
"""Returns mapping of guids to Entity instances."""
return self._guid_to_entity_map
def Clear(self) -> None:
"""Clears global guid mapping.
Adding for testing purposes.
"""
self._guid_to_entity_map.clear() |
298,786 | test read start end time |
import numpy as np
import pytest
import pandas as pd
import mikeio
from mikeio import Dfs1
from mikeio import EUMType, EUMUnit
def test_filenotexist():
with pytest.raises(FileNotFoundError):
mikeio.open("file_that_does_not_exist.dfs1")
def test_repr():
filename = r"tests/testdata/random.dfs1"
dfs = mikeio.open(filename)
text = repr(dfs)
assert "Dfs1" in text
assert "items" in text
assert "dx" in text
def test_repr_empty():
dfs = Dfs1()
text = repr(dfs)
assert "Dfs1" in text
def test_properties():
filename = r"tests/testdata/tide1.dfs1"
dfs = mikeio.open(filename)
assert dfs.dx == 0.06666692346334457
assert dfs.x0 == 0.0
assert dfs.nx == 10
assert dfs.projection_string == "LONG/LAT"
assert dfs.longitude == -5.0
assert dfs.latitude == 51.20000076293945
assert dfs.orientation == 180
g = dfs.geometry
assert isinstance(g, mikeio.Grid1D)
assert g.dx == 0.06666692346334457
assert g._x0 == 0.0
assert g.nx == 10
assert g.projection == "LONG/LAT"
assert g.origin == (-5.0, 51.20000076293945)
assert g.orientation == 180
def test_read_write_properties(tmp_path):
# test that properties are the same after read-write
filename = r"tests/testdata/tide1.dfs1"
ds1 = mikeio.read(filename)
fp = tmp_path / "tide1.dfs1"
ds1.to_dfs(fp)
ds2 = mikeio.read(fp)
assert ds1.geometry == ds2.geometry
def test_read():
filename = r"tests/testdata/random.dfs1"
dfs = mikeio.open(filename)
ds = dfs.read(items=[0])
data = ds[0].to_numpy()
assert data.shape == (100, 3) # time, x
def test_read_item_names():
filename = r"tests/testdata/random.dfs1"
dfs = mikeio.open(filename)
ds = dfs.read(items=["testing water level"])
data = ds[0].to_numpy()
assert data.shape == (100, 3) # time, x
def test_read_time_steps():
filename = r"tests/testdata/random.dfs1"
dfs = mikeio.open(filename)
ds = dfs.read(time=[3, 5])
data = ds[0].to_numpy()
assert data.shape == (2, 3) # time, x
def test_write_some_time_steps_new_file(tmp_path):
fp = tmp_path / "random.dfs1"
dfs = mikeio.open("tests/testdata/random.dfs1")
ds = dfs.read(time=[0, 1, 2, 3, 4, 5])
data = ds[0].to_numpy()
assert data.shape == (6, 3) # time, x
dfs.write(fp, ds)
dfsnew = mikeio.open(fp)
dsnew = dfsnew.read()
assert dsnew["testing water level"].shape == (6, 3)
def test_read_item_names_not_in_dataset_fails():
filename = r"tests/testdata/random.dfs1"
dfs = mikeio.open(filename)
with pytest.raises(Exception):
dfs.read(["NOTAREALVARIABLE"])
def test_read_names_access():
filename = r"tests/testdata/random.dfs1"
dfs = mikeio.open(filename)
res = dfs.read(items=[0])
item_data = res[0].to_numpy()
time = res.time
assert item_data.shape == (100, 3) # time, x
assert len(time) == 100
assert res.items[0].name == "testing water level"
assert res.items[0].type == EUMType.Water_Level
assert res.items[0].unit == EUMUnit.meter
def METHOD_NAME():
dfs = mikeio.open("tests/testdata/random.dfs1")
ds = dfs.read()
assert dfs.start_time == ds.start_time
assert dfs.end_time == ds.end_time
def test_read_start_end_time_relative_time():
dfs = mikeio.open("tests/testdata/physical_basin_wave_maker_signal.dfs1")
ds = dfs.read()
assert dfs.start_time == ds.start_time
assert dfs.end_time == ds.end_time
def test_get_time_axis_without_reading_data():
dfs0file = r"tests/testdata/random.dfs1"
dfs = mikeio.open(dfs0file)
assert isinstance(dfs.time, pd.DatetimeIndex)
assert len(dfs.time) == 100
def test_get_time_axis_without_reading_data_relative():
dfs0file = r"tests/testdata/physical_basin_wave_maker_signal.dfs1"
dfs = mikeio.open(dfs0file)
assert isinstance(dfs.time, pd.DatetimeIndex) # start time is not correct !
assert len(dfs.time) == 200
def test_select_point_dfs1_to_dfs0(tmp_path):
outfilename = tmp_path / "vu_tide_hourly_p0.dfs0"
ds = mikeio.read("tests/testdata/vu_tide_hourly.dfs1")
assert ds.n_elements > 1
ds_0 = ds.isel(0, axis="space")
assert ds_0.n_elements == 1
ds_0.to_dfs(outfilename)
dsnew = mikeio.read(outfilename)
assert dsnew.n_timesteps == ds.n_timesteps
def test_select_point_and_single_step_dfs1_to_dfs0(tmp_path):
outfilename = tmp_path / "vu_tide_hourly_p0.dfs0"
ds = mikeio.read("tests/testdata/vu_tide_hourly.dfs1")
assert ds.n_elements > 1
ds_0 = ds.isel(0, axis="space")
assert ds_0.n_elements == 1
ds_0_0 = ds_0.isel(0)
assert ds_0_0.n_timesteps == 1
ds_0_0.to_dfs(outfilename)
dsnew = mikeio.read(outfilename)
assert dsnew.n_timesteps == 1
def test_select_point_dfs1_to_dfs0_double(tmp_path):
outfilename = tmp_path / "vu_tide_hourly_p0_dbl.dfs0"
ds = mikeio.read("tests/testdata/vu_tide_hourly.dfs1")
assert ds.n_elements > 1
ds_0 = ds.isel(0, axis="space")
assert ds_0.n_elements == 1
ds_0.to_dfs(outfilename, dtype=np.float64)
dsnew = mikeio.read(outfilename)
assert dsnew.n_timesteps == ds.n_timesteps
def test_interp_dfs1():
ds = mikeio.read("tests/testdata/waterlevel_north.dfs1")
da: mikeio.DataArray = ds.North_WL
assert da.geometry.x[-1] == 8800
dai = da.interp(x=0)
assert dai[0].values == pytest.approx(-0.33)
dai = da.interp(x=4000)
assert dai[0].values == pytest.approx(-0.3022272830659693)
dai = da.interp(x=8800)
assert dai[-1].values == pytest.approx(-0.0814)
dai = da.interp(x=8900) # outside the domain
assert np.isnan(dai[-1].values)
dai = da.interp(x=-10) # outside the domain
assert np.isnan(dai[-1].values)
def test_interp_onepoint_dfs1():
ds = mikeio.read("tests/testdata/nx1.dfs1")
assert ds.geometry.nx == 1
with pytest.raises(AssertionError, match="not possible for Grid1D with one point"):
ds[0].interp(x=0) |
298,787 | test read jsonl | """Test the file_io module."""
import numpy as np
import pytest
from clarity.utils.file_io import read_jsonl, read_signal, write_jsonl, write_signal
def METHOD_NAME():
"""Test the read_jsonl function."""
expected = [
{"id": 1, "name": "xxx"},
{"id": 2, "name": "yyy"},
{"id": 3, "name": "zzz"},
]
data = read_jsonl("tests/test_data/filetypes/valid.jsonl")
assert data == expected
@pytest.mark.parametrize(
"records",
[
[
{"a": 1, "b": 2},
{"a": 3, "b": 4},
]
],
)
def test_jsonl_write_read_loop(records, tmp_path):
"""Test the write_jsonl and read_jsonl functions."""
write_jsonl(tmp_path / "test1.jsonl", records)
assert read_jsonl(tmp_path / "test1.jsonl") == records
@pytest.mark.parametrize(
"records",
[
[
{"a": 1, "b": 2},
{"a": 3, "b": 4},
]
],
)
def test_jsonl_append_read_loop(records, tmp_path):
"""Test the write_jsonl correctly appends to existing files."""
write_jsonl(tmp_path / "test2.jsonl", records)
write_jsonl(tmp_path / "test2.jsonl", records)
assert read_jsonl(tmp_path / "test2.jsonl") == records + records
@pytest.mark.parametrize(
"signal, floating_point, strict",
[
(np.array([1.1, -2.0, 0.0, 44.0, -54.0]), True, True), # float32
(np.array([0.1, -0.2, 0.1, -0.5, 0.99]), False, True), # int16
(np.array([0.1, -0.2, 0.1, -1.00, 0.99]), False, True), # int16
(np.array([0.1, -0.2, 0.1, -1.00, 1.99]), False, False), # int16
(np.array([0.1, -0.2, 0.1, -1.00, 0.99]), False, True), # int16
],
)
def test_write_read_loop(tmp_path, signal, floating_point, strict):
"""Test write_signal and read_signal"""
tmp_filename = tmp_path / "test.wav"
sample_rate = 16000.0
write_signal(
tmp_filename,
signal,
sample_rate=int(sample_rate), # <-- Sample rate needs to be cast to int
floating_point=floating_point,
strict=strict,
)
result = read_signal(tmp_filename, sample_rate=sample_rate)
# Some precision is lost as convert to int16 and back again
assert result.shape == signal.shape
# The test where strict is False has overflow which is not caught and hence
# reading back the signal has changed
if strict:
assert result == pytest.approx(signal, abs=1.0 / 16384)
else:
# Deliberate fail: shows why strict is True is needed
assert result != pytest.approx(signal, abs=1.0 / 16384)
def test_write_mono_as_2D_signal(tmp_path):
"""Test special case of writing signals for shape [N, 1]"""
tmp_filename_1d = tmp_path / "test_1d.wav"
tmp_filename_2d = tmp_path / "test_2d.wav"
signal_1d = np.ones((10,)) * 0.5
signal_2d = np.ones((10, 1)) * 0.5
write_signal(tmp_filename_1d, signal_1d, sample_rate=16000)
write_signal(tmp_filename_2d, signal_2d, sample_rate=16000)
result_1d = read_signal(tmp_filename_1d)
result_2d = read_signal(tmp_filename_2d)
# Both 1 and 2D signals should be read back as 1D
assert result_1d.shape == (10,)
assert result_2d.shape == (10,)
def test_read_write_multichannel(tmp_path):
"""Test write_signal and read_signal"""
tmp_filename = tmp_path / "test.wav"
signal = np.ones((2, 10)) * 0.5
sample_rate = 16000.0
write_signal(
tmp_filename,
signal,
sample_rate=int(sample_rate), # <-- Sample rate needs to be cast to int
floating_point=False,
strict=True,
)
result = read_signal(tmp_filename, sample_rate=sample_rate)
# Some precision is lost as convert to int16 and back again
assert result.shape == signal.shape
def test_read_write_sample_mismatch_error(tmp_path):
"""Should raise an error if the sample rate is not the same"""
tmp_filename = tmp_path / "test.wav"
signal = np.ones((2, 10)) * 0.5
sample_rate = 16000.0
write_signal(
tmp_filename,
signal,
sample_rate=int(sample_rate), # <-- Sample rate needs to be cast to int
floating_point=False,
strict=True,
)
with pytest.raises(ValueError):
read_signal(tmp_filename, sample_rate=8000, allow_resample=False)
def test_read_write_channel_mismatch(tmp_path):
"""Should raise an error if the sample rate is not the same"""
tmp_filename = tmp_path / "test.wav"
signal = np.ones((10, 2)) * 0.5
sample_rate = 16000.0
write_signal(
tmp_filename,
signal,
sample_rate=int(sample_rate), # <-- Sample rate needs to be cast to int
floating_point=False,
strict=True,
)
# Correct number of channels - will read OK
read_signal(tmp_filename, sample_rate=16000, n_channels=2)
# Incorrect number of channels - will raise an error
with pytest.raises(ValueError):
read_signal(tmp_filename, sample_rate=16000, n_channels=1)
def test_read_write_sample_with_resample(tmp_path):
"""Should if the sample rate is not the same and allow_resample is True"""
tmp_filename = tmp_path / "test.wav"
signal = np.ones((10, 2)) * 0.5
sample_rate = 16000.0
write_signal(
tmp_filename,
signal,
sample_rate=int(sample_rate), # <-- Sample rate needs to be cast to int
floating_point=False,
strict=True,
)
x = read_signal(tmp_filename, sample_rate=8000, allow_resample=True)
assert x.shape == (5, 2)
def test_write_clipping(tmp_path):
"""Should raise an error if the sample rate is not the same"""
tmp_filename = tmp_path / "test.wav"
signal = np.array([-1.0, 0.0, 1.0]) # <-- Clipping because 1.0 not OK
# strict=True, i.e. Clipping not allowed - will throw an error
with pytest.raises(ValueError):
write_signal(
tmp_filename,
signal,
sample_rate=16000,
floating_point=False,
strict=True,
)
# strict=False, i.e. Clipping allowed - will write OK but log warning
write_signal(
tmp_filename,
signal,
sample_rate=16000,
floating_point=False,
strict=False,
)
signal_read = read_signal(tmp_filename, sample_rate=16000)
# Note that the +1.0 is clipped to 0.99996948
assert signal_read == pytest.approx(np.array([-1.0, 0, 0.99996948]))
# This is the standard behaviour of soundfile and arises due to the
# asymmetric nature of the int16 format, i.e. -32768 to 32767
def test_read_write_with_offset(tmp_path):
"""Should if the sample rate is not the same and allow_resample is True"""
tmp_filename = tmp_path / "test.wav"
signal = np.ones((10, 2)) * 0.5
sample_rate = 10000
write_signal(
tmp_filename,
signal,
sample_rate=sample_rate, # <-- Sample rate needs to be cast to int
floating_point=False,
strict=True,
)
x1 = read_signal(tmp_filename, offset=3, offset_is_samples=True)
assert x1.shape == (7, 2)
x2 = read_signal(tmp_filename, offset=0.0003, offset_is_samples=False)
assert x2.shape == (7, 2) |
298,788 | id | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetWorkstationConfigIamPolicyResult',
'AwaitableGetWorkstationConfigIamPolicyResult',
'get_workstation_config_iam_policy',
'get_workstation_config_iam_policy_output',
]
@pulumi.output_type
class GetWorkstationConfigIamPolicyResult:
"""
A collection of values returned by getWorkstationConfigIamPolicy.
"""
def __init__(__self__, etag=None, METHOD_NAME=None, location=None, policy_data=None, project=None, workstation_cluster_id=None, workstation_config_id=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if policy_data and not isinstance(policy_data, str):
raise TypeError("Expected argument 'policy_data' to be a str")
pulumi.set(__self__, "policy_data", policy_data)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
if workstation_cluster_id and not isinstance(workstation_cluster_id, str):
raise TypeError("Expected argument 'workstation_cluster_id' to be a str")
pulumi.set(__self__, "workstation_cluster_id", workstation_cluster_id)
if workstation_config_id and not isinstance(workstation_config_id, str):
raise TypeError("Expected argument 'workstation_config_id' to be a str")
pulumi.set(__self__, "workstation_config_id", workstation_config_id)
@property
@pulumi.getter
def etag(self) -> str:
"""
(Computed) The etag of the IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
return pulumi.get(self, "location")
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> str:
"""
(Required only by `workstations.WorkstationConfigIamPolicy`) The policy data generated by
a `organizations_get_iam_policy` data source.
"""
return pulumi.get(self, "policy_data")
@property
@pulumi.getter
def project(self) -> str:
return pulumi.get(self, "project")
@property
@pulumi.getter(name="workstationClusterId")
def workstation_cluster_id(self) -> str:
return pulumi.get(self, "workstation_cluster_id")
@property
@pulumi.getter(name="workstationConfigId")
def workstation_config_id(self) -> str:
return pulumi.get(self, "workstation_config_id")
class AwaitableGetWorkstationConfigIamPolicyResult(GetWorkstationConfigIamPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkstationConfigIamPolicyResult(
etag=self.etag,
METHOD_NAME=self.METHOD_NAME,
location=self.location,
policy_data=self.policy_data,
project=self.project,
workstation_cluster_id=self.workstation_cluster_id,
workstation_config_id=self.workstation_config_id)
def get_workstation_config_iam_policy(location: Optional[str] = None,
project: Optional[str] = None,
workstation_cluster_id: Optional[str] = None,
workstation_config_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkstationConfigIamPolicyResult:
"""
Use this data source to access information about an existing resource.
:param str location: The location where the workstation cluster config should reside.
Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
__args__ = dict()
__args__['location'] = location
__args__['project'] = project
__args__['workstationClusterId'] = workstation_cluster_id
__args__['workstationConfigId'] = workstation_config_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('gcp:workstations/getWorkstationConfigIamPolicy:getWorkstationConfigIamPolicy', __args__, opts=opts, typ=GetWorkstationConfigIamPolicyResult).value
return AwaitableGetWorkstationConfigIamPolicyResult(
etag=pulumi.get(__ret__, 'etag'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
policy_data=pulumi.get(__ret__, 'policy_data'),
project=pulumi.get(__ret__, 'project'),
workstation_cluster_id=pulumi.get(__ret__, 'workstation_cluster_id'),
workstation_config_id=pulumi.get(__ret__, 'workstation_config_id'))
@_utilities.lift_output_func(get_workstation_config_iam_policy)
def get_workstation_config_iam_policy_output(location: Optional[pulumi.Input[Optional[str]]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
workstation_cluster_id: Optional[pulumi.Input[str]] = None,
workstation_config_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkstationConfigIamPolicyResult]:
"""
Use this data source to access information about an existing resource.
:param str location: The location where the workstation cluster config should reside.
Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
... |
298,789 | get authorization access policy output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetAuthorizationAccessPolicyResult',
'AwaitableGetAuthorizationAccessPolicyResult',
'get_authorization_access_policy',
'get_authorization_access_policy_output',
]
@pulumi.output_type
class GetAuthorizationAccessPolicyResult:
"""
Authorization access policy contract.
"""
def __init__(__self__, id=None, name=None, object_id=None, tenant_id=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if object_id and not isinstance(object_id, str):
raise TypeError("Expected argument 'object_id' to be a str")
pulumi.set(__self__, "object_id", object_id)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[str]:
"""
The Object Id
"""
return pulumi.get(self, "object_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[str]:
"""
The Tenant Id
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetAuthorizationAccessPolicyResult(GetAuthorizationAccessPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAuthorizationAccessPolicyResult(
id=self.id,
name=self.name,
object_id=self.object_id,
tenant_id=self.tenant_id,
type=self.type)
def get_authorization_access_policy(authorization_access_policy_id: Optional[str] = None,
authorization_id: Optional[str] = None,
authorization_provider_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAuthorizationAccessPolicyResult:
"""
Gets the details of the authorization access policy specified by its identifier.
:param str authorization_access_policy_id: Identifier of the authorization access policy.
:param str authorization_id: Identifier of the authorization.
:param str authorization_provider_id: Identifier of the authorization provider.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['authorizationAccessPolicyId'] = authorization_access_policy_id
__args__['authorizationId'] = authorization_id
__args__['authorizationProviderId'] = authorization_provider_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20230301preview:getAuthorizationAccessPolicy', __args__, opts=opts, typ=GetAuthorizationAccessPolicyResult).value
return AwaitableGetAuthorizationAccessPolicyResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
object_id=pulumi.get(__ret__, 'object_id'),
tenant_id=pulumi.get(__ret__, 'tenant_id'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_authorization_access_policy)
def METHOD_NAME(authorization_access_policy_id: Optional[pulumi.Input[str]] = None,
authorization_id: Optional[pulumi.Input[str]] = None,
authorization_provider_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAuthorizationAccessPolicyResult]:
"""
Gets the details of the authorization access policy specified by its identifier.
:param str authorization_access_policy_id: Identifier of the authorization access policy.
:param str authorization_id: Identifier of the authorization.
:param str authorization_provider_id: Identifier of the authorization provider.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
"""
... |
298,790 | operating system | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Provides version information of important supporting modules.
"""
import functools
import platform
import os
import app
import appinfo
def _catch_unknown(f):
"""Decorate a function, returning "unknown" on import/attribute error."""
@functools.wraps(f)
def wrapper():
try:
return f()
except (ImportError, AttributeError):
return "unknown"
return wrapper
@_catch_unknown
def app_version():
import appinfo
return appinfo.version
@_catch_unknown
def pyqt_version():
import PyQt5.QtCore
return PyQt5.QtCore.PYQT_VERSION_STR
@_catch_unknown
def qt_version():
import PyQt5.QtCore
return PyQt5.QtCore.QT_VERSION_STR
@_catch_unknown
def python_version():
return platform.python_version()
@_catch_unknown
def METHOD_NAME():
plat = platform.platform()
if platform.system() == "Linux":
try:
distro = platform.freedesktop_os_release()["PRETTY_NAME"]
except OSError:
# play it safe
distro = "unknown distribution"
return f"{plat} ({distro})"
else:
return plat
@_catch_unknown
def ly_version():
import ly.pkginfo
return ly.pkginfo.version
@_catch_unknown
def qpageview_version():
import qpageview
return qpageview.version_string
@_catch_unknown
def poppler_version():
import popplerqt5
return '.'.join(format(n) for n in popplerqt5.poppler_version())
@_catch_unknown
def python_poppler_version():
import popplerqt5
return '.'.join(format(n) for n in popplerqt5.version())
if platform.system() == "Darwin":
@_catch_unknown
def mac_installation_kind():
import macosx
if macosx.inside_lightweight_app_bundle():
return 'lightweight .app bundle'
elif macosx.inside_app_bundle():
return 'standalone .app bundle'
else:
return 'command line'
elif platform.system() == "Linux":
@_catch_unknown
def linux_installation_kind():
import linux
if linux.inside_flatpak():
return "Flatpak"
else:
return "distro package or installed from source"
def version_info_named():
"""Yield all the relevant names and their version string."""
yield appinfo.appname, appinfo.version
yield "Extension API", appinfo.extension_api
yield "Python", python_version()
if app.is_git_controlled():
import vcs
repo = vcs.app_repo
yield "Git branch", repo.active_branch()
commit = repo.run_command(
'log',
['-n', '1', '--format=format:%h'])
yield "on commit", commit[0]
yield "python-ly", ly_version()
yield "Qt", qt_version()
yield "PyQt", pyqt_version()
yield "qpageview", qpageview_version()
yield "poppler", poppler_version()
yield "python-poppler-qt", python_poppler_version()
yield "OS", METHOD_NAME()
if platform.system() == 'Darwin':
yield "installation kind", mac_installation_kind()
elif platform.system() == "Linux":
yield "Installation kind", linux_installation_kind()
def version_info_string(separator='\n'):
"""Return all version names as a string, joint with separator."""
return separator.join(map("{0[0]}: {0[1]}".format, version_info_named())) |
298,791 | gen c check | # This script is used to generate the CircleCI dynamic config file in
# .circleci/config.gen.yml.
#
# To add new configuration manipulations that are based on top of the template
# file in .circleci/config.templ.yml, add a function named gen_<name> to this
# file. The function will be called automatically when this script is run.
import typing as t
def gen_required_suites(template: dict) -> None:
"""Generate the list of test suites that need to be run."""
from needs_testrun import for_each_testrun_needed as fetn
from suitespec import get_suites
suites = get_suites()
jobs = set(template["jobs"].keys())
required_suites = template["requires_tests"]["requires"] = []
fetn(suites=sorted(suites & jobs), action=lambda suite: required_suites.append(suite))
if not required_suites:
# Nothing to generate
return
jobs = template["workflows"]["test"]["jobs"]
# Create the base venvs
jobs.append("build_base_venvs")
# Add the jobs
requires_base_venvs = template["requires_base_venvs"]
jobs.extend([{suite: requires_base_venvs} for suite in required_suites])
# Collect coverage
jobs.append({"coverage_report": template["requires_tests"]})
def gen_pre_checks(template: dict) -> None:
"""Generate the list of pre-checks that need to be run."""
from needs_testrun import pr_matches_patterns
def check(name: str, command: str, paths: t.Set[str]) -> None:
if pr_matches_patterns(paths):
template["jobs"]["pre_check"]["steps"].append({"run": {"name": name, "command": command}})
check(
name="Style",
command="hatch run lint:style",
paths={"*.py", "*.pyi", "hatch.toml"},
)
check(
name="Typing",
command="hatch run lint:typing",
paths={"*.py", "*.pyi", "hatch.toml"},
)
check(
name="Security",
command="hatch run lint:security",
paths={"ddtrace/*", "hatch.toml"},
)
check(
name="Run riotfile.py tests",
command="hatch run lint:riot",
paths={"riotfile.py", "hatch.toml"},
)
check(
name="Style: Test snapshots",
command="hatch run lint:fmt-snapshots && git diff --exit-code tests/snapshots hatch.toml",
paths={"tests/snapshots/*", "hatch.toml"},
)
check(
name="Slots check",
command="hatch run slotscheck:_",
paths={"ddtrace/*.py", "hatch.toml"},
)
check(
name="Run scripts/*.py tests",
command="hatch run scripts:test",
paths={"scripts/*.py"},
)
check(
name="Run conftest tests",
command="hatch run meta-testing:meta-testing",
paths={"tests/*conftest.py", "tests/meta/*"},
)
check(
name="Validate suitespec JSON file",
command="python -m tests.suitespec",
paths={"tests/.suitespec.json", "tests/suitespec.py"},
)
def gen_build_docs(template: dict) -> None:
"""Include the docs build step if the docs have changed."""
from needs_testrun import pr_matches_patterns
if pr_matches_patterns({"docs/*", "ddtrace/*", "scripts/docs", "releasenotes/*"}):
template["workflows"]["test"]["jobs"].append({"build_docs": template["requires_pre_check"]})
def METHOD_NAME(template: dict) -> None:
"""Include C code checks if C code has changed."""
from needs_testrun import pr_matches_patterns
if pr_matches_patterns({"*.c", "*.h", "*.cpp", "*.hpp", "*.cc", "*.hh"}):
template["requires_pre_check"]["requires"].append("ccheck")
template["requires_base_venvs"]["requires"].append("ccheck")
template["workflows"]["test"]["jobs"].append("ccheck")
# -----------------------------------------------------------------------------
# The code below is the boilerplate that makes the script work. There is
# generally no reason to modify it.
from argparse import ArgumentParser # noqa
import logging # noqa
from pathlib import Path # noqa
import sys # noqa
from time import monotonic_ns as time # noqa
from ruamel.yaml import YAML # noqa
logging.basicConfig(level=logging.WARNING, format="%(levelname)s: %(message)s")
LOGGER = logging.getLogger(__name__)
argp = ArgumentParser()
argp.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
args = argp.parse_args()
if args.verbose:
LOGGER.setLevel(logging.INFO)
ROOT = Path(__file__).parents[1]
CONFIG_TEMPLATE_FILE = ROOT / ".circleci" / "config.templ.yml"
CONFIG_GEN_FILE = ROOT / ".circleci" / "config.gen.yml"
# Make the scripts and tests folders available for importing.
sys.path.append(str(ROOT / "scripts"))
sys.path.append(str(ROOT / "tests"))
with YAML(output=CONFIG_GEN_FILE) as yaml:
LOGGER.info("Loading configuration template from %s", CONFIG_TEMPLATE_FILE)
config = yaml.load(CONFIG_TEMPLATE_FILE)
LOGGER.info("Configuration generation steps:")
for name, func in dict(globals()).items():
if name.startswith("gen_"):
desc = func.__doc__.splitlines()[0]
try:
start = time()
func(config)
end = time()
LOGGER.info("- %s: %s [took %dms]", name, desc, int((end - start) / 1e6))
except Exception as e:
LOGGER.error("- %s: %s [reason: %s]", name, desc, str(e))
LOGGER.info("Writing generated configuration to %s", CONFIG_GEN_FILE)
yaml.dump(config) |
298,792 | t sne | #!/usr/bin/env python3
# coding: utf-8
"""
@author: Ping Qiu qiuping1@genomics.cn
"""
import numpy as np
from scipy.sparse import issparse
from scipy.sparse.linalg import LinearOperator, svds
from sklearn.utils import check_array, check_random_state
from sklearn.utils.extmath import svd_flip
from sklearn.decomposition import FactorAnalysis
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from .scale import _get_mean_var
from stereo.log_manager import logger
def low_variance(x, threshold=0.01):
"""
filter the features which have low variance between the samples.
:param x: 2D array, shape (M, N)
:param threshold: the min threshold of variance.
:return: a new array which filtered the feature with low variance.
"""
x_var = np.var(x, axis=0)
var_index = np.where(x_var > threshold)[0]
x = x[:, var_index]
return x
def factor_analysis(x, n_pcs):
"""
the dim reduce function of factor analysis
:param x: 2D array, shape (M, N)
:param n_pcs: the number of features for a return array after reducing.
:return: ndarray of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
fa = FactorAnalysis(n_components=n_pcs)
fa.fit(x)
tran_x = fa.transform(x)
return tran_x
def pca(x, n_pcs, svd_solver='auto', random_state=0):
"""
Principal component analysis.
:param x: 2D array, shape (M, N)
:param n_pcs: the number of features for a return array after reducing.
:param svd_solver: {'auto', 'full', 'arpack', 'randomized'}, default to 'auto'
If auto :
The solver is selected by a default policy based on `X.shape` and
`n_pcs`: if the input data is larger than 500x500 and the
number of components to extract is lower than 80% of the smallest
dimension of the data, then the more efficient 'randomized'
method is enabled. Otherwise the exact full SVD is computed and
optionally truncated afterwards.
If full :
run exact full SVD calling the standard LAPACK solver via
`scipy.linalg.svd` and select the components by postprocessing
If arpack :
run SVD truncated to n_pcs calling ARPACK solver via
`scipy.sparse.linalg.svds`. It requires strictly
0 < n_pcs < min(x.shape)
If randomized :
run randomized SVD by the method of Halko et al.
:param random_state : int, RandomState instance
:return: ndarray of shape (n_samples, n_pcs) Embedding of the training data in low-dimensional space.
"""
if issparse(x):
if svd_solver != 'arpack':
logger.warning(
f'svd_solver: {svd_solver} can not be used with sparse input.\n'
'Use "arpack" (the default) instead.'
)
svd_solver = 'arpack'
if x.dtype.char not in "fFdD":
x = x.astype(np.float32)
logger.info('exp_matrix dType is changed to float32')
output = _pca_with_sparse(x, n_pcs, solver=svd_solver, random_state=random_state)
# this is just a wrapper for the results
# pca_ = PCA(n_components=n_pcs, svd_solver=svd_solver)
# pca_.components_ = output['components']
# pca_.explained_variance_ = output['variance']
# pca_.explained_variance_ratio_ = output['variance_ratio']
# return dict([('x_pca', output['X_pca']), ('variance', output['variance']), ('variance_ratio', output['variance_ratio']), ('pcs', pca_.components_.T)])
return dict([('x_pca', output['X_pca']), ('variance', output['variance']), ('variance_ratio', output['variance_ratio']), ('pcs', output['components'].T)])
else:
pca_obj = PCA(n_components=n_pcs, svd_solver=svd_solver, random_state=random_state)
x_pca = pca_obj.fit_transform(x)
variance = pca_obj.explained_variance_
variance_ratio = pca_obj.explained_variance_ratio_
pcs = pca_obj.components_.T
return dict([('x_pca', x_pca), ('variance', variance), ('variance_ratio', variance_ratio), ('pcs', pcs)])
def _pca_with_sparse(X, n_pcs, solver='arpack', mu=None, random_state=None):
random_state = check_random_state(random_state)
np.random.set_state(random_state.get_state())
random_init = np.random.rand(np.min(X.shape))
X = check_array(X, accept_sparse=['csr', 'csc'])
if mu is None:
mu = X.mean(0).A.flatten()[None, :]
mdot = mu.dot
mmat = mdot
mhdot = mu.T.dot
mhmat = mu.T.dot
Xdot = X.dot
Xmat = Xdot
XHdot = X.T.conj().dot
XHmat = XHdot
ones = np.ones(X.shape[0])[None, :].dot
def matvec(x):
return Xdot(x) - mdot(x)
def matmat(x):
return Xmat(x) - mmat(x)
def rmatvec(x):
return XHdot(x) - mhdot(ones(x))
def rmatmat(x):
return XHmat(x) - mhmat(ones(x))
XL = LinearOperator(
matvec=matvec,
dtype=X.dtype,
matmat=matmat,
shape=X.shape,
rmatvec=rmatvec,
rmatmat=rmatmat,
)
u, s, v = svds(XL, solver=solver, k=n_pcs, v0=random_init)
u, v = svd_flip(u, v)
idx = np.argsort(-s)
v = v[idx, :]
X_pca = (u * s)[:, idx]
ev = s[idx] ** 2 / (X.shape[0] - 1)
total_var = _get_mean_var(X)[1].sum()
ev_ratio = ev / total_var
output = {
'X_pca': X_pca,
'variance': ev,
'variance_ratio': ev_ratio,
'components': v,
}
return output
def METHOD_NAME(x, n_pcs, n_iter=200):
"""
the dim reduce function of TSEN
:param x: 2D array, shape (M, N)
:param n_pcs: the number of features for a return array after reducing.
:param n_iter: the number of iterators.
:return: ndarray of shape (n_samples, n_components) Embedding of the training data in low-dimensional space.
"""
tsen = TSNE(n_components=n_pcs, n_iter=n_iter)
tsne_x = tsen.fit_transform(x)
return tsne_x
def u_map(x, n_pcs, n_neighbors=5, min_dist=0.3):
"""
the dim reduce function of UMAP
:param x: 2D array, shape (M, N)
:param n_pcs: the number of features for a return array after reducing.
:param n_neighbors: the number of neighbors
:param min_dist: the min value of distance.
:return: ndarray of shape (n_samples, n_components) Embedding of the training data in low-dimensional space.
"""
import umap
umap_obj = umap.UMAP(n_neighbors=n_neighbors, n_components=n_pcs, min_dist=min_dist)
umap_x = umap_obj.fit_transform(x)
return umap_x |
298,793 | test white box lifestyle regression | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import pytest
import numpy as np
from art.attacks.inference.attribute_inference.white_box_lifestyle_decision_tree import (
AttributeInferenceWhiteBoxLifestyleDecisionTree,
)
from art.estimators.classification.scikitlearn import ScikitlearnDecisionTreeClassifier
from art.estimators.regression.scikitlearn import ScikitlearnDecisionTreeRegressor
from tests.attacks.utils import backend_test_classifier_type_check_fail
from tests.utils import ARTTestException
logger = logging.getLogger(__name__)
@pytest.mark.skip_framework("dl_frameworks")
def test_white_box_lifestyle(art_warning, decision_tree_estimator, get_iris_dataset):
try:
attack_feature = 2 # petal length
values = [0.14, 0.42, 0.71] # rounded down
priors = [50 / 150, 54 / 150, 46 / 150]
(x_train_iris, y_train_iris), (x_test_iris, y_test_iris) = get_iris_dataset
x_train_for_attack = np.delete(x_train_iris, attack_feature, 1)
x_train_feature = x_train_iris[:, attack_feature]
x_test_for_attack = np.delete(x_test_iris, attack_feature, 1)
x_test_feature = x_test_iris[:, attack_feature]
classifier = decision_tree_estimator()
attack = AttributeInferenceWhiteBoxLifestyleDecisionTree(classifier, attack_feature=attack_feature)
x_train_predictions = np.array([np.argmax(arr) for arr in classifier.predict(x_train_iris)]).reshape(-1, 1)
x_test_predictions = np.array([np.argmax(arr) for arr in classifier.predict(x_test_iris)]).reshape(-1, 1)
inferred_train = attack.infer(x_train_for_attack, x_train_predictions, values=values, priors=priors)
inferred_test = attack.infer(x_test_for_attack, x_test_predictions, values=values, priors=priors)
train_diff = np.abs(inferred_train - x_train_feature.reshape(1, -1))
test_diff = np.abs(inferred_test - x_test_feature.reshape(1, -1))
assert np.sum(train_diff) / len(inferred_train) == pytest.approx(0.3357, abs=0.03)
assert np.sum(test_diff) / len(inferred_test) == pytest.approx(0.3149, abs=0.03)
# assert np.sum(train_diff) / len(inferred_train) < np.sum(test_diff) / len(inferred_test)
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_framework("dl_frameworks")
def METHOD_NAME(art_warning, get_diabetes_dataset):
try:
attack_feature = 0 # age
(x_train_diabetes, y_train_diabetes), (x_test_diabetes, y_test_diabetes) = get_diabetes_dataset
bins = [
-0.96838121,
-0.18102872,
0.21264752,
1.0,
]
# need to transform attacked feature into categorical
def transform_feature(x):
orig = x.copy()
for i in range(3):
x[(orig >= bins[i]) & (orig <= bins[i + 1])] = i / 3
values = [i / 3 for i in range(3)]
priors = [154 / 442, 145 / 442, 143 / 442]
x_train_for_attack = np.delete(x_train_diabetes, attack_feature, 1)
x_train_feature = x_train_diabetes[:, attack_feature].copy()
transform_feature(x_train_feature)
x_test_for_attack = np.delete(x_test_diabetes, attack_feature, 1)
x_test_feature = x_test_diabetes[:, attack_feature].copy()
transform_feature(x_test_feature)
from sklearn import tree
regr_model = tree.DecisionTreeRegressor(random_state=7)
regr_model.fit(x_train_diabetes, y_train_diabetes)
regressor = ScikitlearnDecisionTreeRegressor(regr_model)
attack = AttributeInferenceWhiteBoxLifestyleDecisionTree(regressor, attack_feature=attack_feature)
x_train_predictions = regressor.predict(x_train_diabetes).reshape(-1, 1)
x_test_predictions = regressor.predict(x_test_diabetes).reshape(-1, 1)
inferred_train = attack.infer(x_train_for_attack, x_train_predictions, values=values, priors=priors)
inferred_test = attack.infer(x_test_for_attack, x_test_predictions, values=values, priors=priors)
train_diff = np.abs(inferred_train - x_train_feature.reshape(1, -1))
test_diff = np.abs(inferred_test - x_test_feature.reshape(1, -1))
assert np.sum(train_diff) / len(inferred_train) == pytest.approx(0.318, abs=0.1)
assert np.sum(test_diff) / len(inferred_test) == pytest.approx(0.34, abs=0.12)
# assert np.sum(train_diff) / len(inferred_train) < np.sum(test_diff) / len(inferred_test)
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_framework("dl_frameworks")
def test_check_params(art_warning, decision_tree_estimator):
try:
classifier = decision_tree_estimator()
with pytest.raises(ValueError):
_ = AttributeInferenceWhiteBoxLifestyleDecisionTree(classifier, attack_feature=-5)
except ARTTestException as e:
art_warning(e)
def test_classifier_type_check_fail():
backend_test_classifier_type_check_fail(
AttributeInferenceWhiteBoxLifestyleDecisionTree,
((ScikitlearnDecisionTreeClassifier, ScikitlearnDecisionTreeRegressor),),
) |
298,794 | output sanitization | import json
import pandas
from avi.migrationtools.test.common import excel_reader
class ExcelReader():
def percentage_success(self, path_to_excel):
# Percentage Success from Excel Reports
# find the status column
path = path_to_excel
s = pandas.read_excel(path, engine='openpyxl', sheet_name='Status Sheet')
if "NsxT type" in s:
type_str = "NsxT type"
else:
pass
report_dict = dict()
for row in range(s.index.size):
# taking col_type_val column for type and col_status_val for status
val = s[type_str][row]
state = s['Status'][row]
fail = 1
suc = 0
if state == "PARTIAL" or state == "SUCCESSFUL":
fail = 0
suc = 1
if val not in report_dict:
report_dict.update({val: {'success': suc, 'fail': fail}})
else:
report_dict[val]['success'] += suc
report_dict[val]['fail'] += fail
# break
for key in report_dict.keys():
if report_dict[key]['success'] + report_dict[key]['fail'] != 0:
percent = float(report_dict[key]['success'] * 100 /
(report_dict[key]['success'] + report_dict[key]['fail']))
report_dict[key].update({'percent': percent})
else:
report_dict[key].update({'percent': 100.0})
for key in report_dict.keys():
print(key, " -> ", report_dict[key]['percent'], "%")
def output_vs_level_status(self, path_to_excel):
excel_reader.output_vs_level_status(path_to_excel)
def METHOD_NAME(self, path_to_excel, path_to_out_json=None, path_to_log=None):
''' Find the Success percentage of each output report '''
path = path_to_excel
out_obj = []
excel_obj = []
# Output Sanitization
s = pandas.read_excel(path, engine='openpyxl', sheet_name='Status Sheet')
cols = 0
cols_id = None
cols_status = None
for row in range(s.index.size):
if 'NsxT ID' in s and s['Status'][row] in ['SUCCESSFUL', 'PARTIAL']:
if s['NsxT ID'][row] in ['hash', 'oneconnect'] or \
s['NsxT type'][row] == 'route' or \
s['NsxT SubType'][row] in ['oneconnect', 'one-connect'] or \
"Indirectly mapped" in s['Avi Object'][row]:
value = None
else:
value = s['NsxT ID'][row]
if s['NsxT type'][row] in ['pool', 'policy']:
value = s['NsxT ID'][row].split('/')[-1]
if value:
print(value+"----------------------------")
excel_obj.append(value)
with open(path_to_out_json, 'r') as file_strem:
file_strem = json.load(file_strem)
for entity in file_strem:
print(entity)
print("----")
if entity != 'META' and entity != 'VsVip' and entity != \
"OneConnect" and entity != "hash_algorithm":
for obj in file_strem[entity]:
out_obj.append(obj.get('name'))
excel_obj.sort()
out_obj.sort()
log_obj = {}
if path_to_log:
with open(path_to_log, 'r') as file_strem:
a = file_strem.readlines()
try:
b = str(a).split('$$$$$$')[-2].replace('\'', '"')
print(b)
log_obj = eval(b)
except:
pass
obj_list = list()
# comparing excel objects with json out objects
obj_list = list(set(excel_obj) - set(out_obj))
# If object read from log is dict compare
if isinstance(log_obj, dict):
for key in log_obj.keys():
obj_list = list(set(obj_list) - set(log_obj[key].keys()))
print("Object Difference between Excel sheet and output is %s" % len(obj_list))
if obj_list:
print("Object not Common in Both Excel and Output %s", obj_list)
return False
print("Excel sheet matches with Output.json")
return True |
298,795 | test if project without boards succeeds | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import params
import pytest
"""
Test command "open-project --export-pnp-top --export-pnp-bottom"
"""
@pytest.mark.parametrize("project", [params.EMPTY_PROJECT_LPP_PARAM])
@pytest.mark.parametrize("argument,side", [
('--export-pnp-top=foo.bar', 'top'),
('--export-pnp-bottom=foo.bar', 'bottom'),
])
def test_if_unknown_file_extension_fails(cli, project, argument, side):
cli.add_project(project.dir, as_lppz=project.is_lppz)
code, stdout, stderr = cli.run('open-project', argument, project.path)
assert stderr == " ERROR: Unknown extension 'bar'.\n"
assert stdout == \
"Open project '{project.path}'...\n" \
"Export {side} assembly data to 'foo.bar'...\n" \
" - 'default' => 'foo.bar'\n" \
"Finished with errors!\n" \
.format(project=project, side=side)
assert code == 1
@pytest.mark.parametrize("project", [params.EMPTY_PROJECT_LPP_PARAM])
@pytest.mark.parametrize("ext", ['csv', 'gbr'])
def METHOD_NAME(cli, project, ext):
cli.add_project(project.dir, as_lppz=project.is_lppz)
# remove all boards first
with open(cli.abspath(project.dir + '/boards/boards.lp'), 'w') as f:
f.write('(librepcb_boards)')
relpath_top = project.output_dir + '/pnp/top.' + ext
relpath_bot = project.output_dir + '/pnp/bot.' + ext
abspath_top = cli.abspath(relpath_top)
abspath_bot = cli.abspath(relpath_bot)
assert not os.path.exists(abspath_top)
assert not os.path.exists(abspath_bot)
code, stdout, stderr = cli.run('open-project',
'--export-pnp-top=' + relpath_top,
'--export-pnp-bottom=' + relpath_bot,
project.path)
assert stderr == ''
assert stdout == \
"Open project '{project.path}'...\n" \
"Export top assembly data to '{project.output_dir}/pnp/top.{ext}'...\n" \
"Export bottom assembly data to '{project.output_dir}/pnp/bot.{ext}'...\n" \
"SUCCESS\n".format(project=project, ext=ext)
assert code == 0
assert not os.path.exists(abspath_top) # nothing exported
assert not os.path.exists(abspath_bot) # nothing exported
@pytest.mark.parametrize("project", [
params.PROJECT_WITH_TWO_BOARDS_LPP_PARAM,
params.PROJECT_WITH_TWO_BOARDS_LPPZ_PARAM,
])
@pytest.mark.parametrize("ext", ['csv', 'gbr'])
def test_export_project_with_two_boards_implicit(cli, project, ext):
cli.add_project(project.dir, as_lppz=project.is_lppz)
fp_top = project.output_dir + '/pnp/{{BOARD}}_top.' + ext
fp_bot = project.output_dir + '/pnp/{{BOARD}}_bot.' + ext
dir = cli.abspath(project.output_dir + '/pnp')
assert not os.path.exists(dir)
code, stdout, stderr = cli.run('open-project',
'--export-pnp-top=' + fp_top, # --arg="val"
'--export-pnp-bottom', fp_bot, # --arg "val"
project.path)
assert stderr == ''
assert stdout == \
"Open project '{project.path}'...\n" \
"Export top assembly data to '{project.output_dir}/pnp/{{{{BOARD}}}}_top.{ext}'...\n" \
" - 'default' => '{project.output_dir_native}//pnp//default_top.{ext}'\n" \
" - 'copy' => '{project.output_dir_native}//pnp//copy_top.{ext}'\n" \
"Export bottom assembly data to '{project.output_dir}/pnp/{{{{BOARD}}}}_bot.{ext}'...\n" \
" - 'default' => '{project.output_dir_native}//pnp//default_bot.{ext}'\n" \
" - 'copy' => '{project.output_dir_native}//pnp//copy_bot.{ext}'\n" \
"SUCCESS\n".format(project=project, ext=ext).replace('//', os.sep)
assert code == 0
assert os.path.exists(dir)
assert len(os.listdir(dir)) == 4
@pytest.mark.parametrize("project", [
params.PROJECT_WITH_TWO_BOARDS_LPP_PARAM,
params.PROJECT_WITH_TWO_BOARDS_LPPZ_PARAM,
])
@pytest.mark.parametrize("ext", ['csv', 'gbr'])
def test_export_project_with_two_boards_explicit_one(cli, project, ext):
cli.add_project(project.dir, as_lppz=project.is_lppz)
fp_top = project.output_dir + '/pnp/{{BOARD}}_top.' + ext
fp_bot = project.output_dir + '/pnp/{{BOARD}}_bot.' + ext
dir = cli.abspath(project.output_dir + '/pnp')
assert not os.path.exists(dir)
code, stdout, stderr = cli.run('open-project',
'--export-pnp-top=' + fp_top,
'--export-pnp-bottom=' + fp_bot,
'--board=copy',
project.path)
assert stderr == ''
assert stdout == \
"Open project '{project.path}'...\n" \
"Export top assembly data to '{project.output_dir}/pnp/{{{{BOARD}}}}_top.{ext}'...\n" \
" - 'copy' => '{project.output_dir_native}//pnp//copy_top.{ext}'\n" \
"Export bottom assembly data to '{project.output_dir}/pnp/{{{{BOARD}}}}_bot.{ext}'...\n" \
" - 'copy' => '{project.output_dir_native}//pnp//copy_bot.{ext}'\n" \
"SUCCESS\n".format(project=project, ext=ext).replace('//', os.sep)
assert code == 0
assert os.path.exists(dir)
assert len(os.listdir(dir)) == 2
@pytest.mark.parametrize("project", [params.PROJECT_WITH_TWO_BOARDS_LPP])
@pytest.mark.parametrize("ext", ['csv', 'gbr'])
def test_export_project_with_two_conflicting_boards_fails(cli, project, ext):
cli.add_project(project.dir, as_lppz=project.is_lppz)
fp_top = project.output_dir + '/top.' + ext
fp_bot = project.output_dir + '/bot.' + ext
code, stdout, stderr = cli.run('open-project',
'--export-pnp-top=' + fp_top,
'--export-pnp-bottom=' + fp_bot,
project.path)
assert stderr == \
"ERROR: The file '{project.output_dir_native}//bot.{ext}' was " \
"written multiple times!\n" \
"ERROR: The file '{project.output_dir_native}//top.{ext}' was " \
"written multiple times!\n" \
"NOTE: To avoid writing files multiple times, make sure to pass " \
"unique filepaths to all export functions. For board output files, " \
"you could either add the placeholder '{{{{BOARD}}}}' to the path or " \
"specify the boards to export with the '--board' argument.\n" \
.format(project=project, ext=ext).replace('//', os.sep)
assert stdout == \
"Open project '{project.path}'...\n" \
"Export top assembly data to '{project.output_dir}/top.{ext}'...\n" \
" - 'default' => '{project.output_dir_native}//top.{ext}'\n" \
" - 'copy' => '{project.output_dir_native}//top.{ext}'\n" \
"Export bottom assembly data to '{project.output_dir}/bot.{ext}'...\n" \
" - 'default' => '{project.output_dir_native}//bot.{ext}'\n" \
" - 'copy' => '{project.output_dir_native}//bot.{ext}'\n" \
"Finished with errors!\n" \
.format(project=project, ext=ext).replace('//', os.sep)
assert code == 1 |
298,796 | test scatter3 d pysmo poly | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
"""
Tests for Surrogate Plotting Methods
"""
import pytest
import os
import pandas as pd
from pyomo.common.fileutils import this_file_dir
from pyomo.common.tempfiles import TempfileManager
from idaes.core.surrogate.pysmo_surrogate import PysmoSurrogate
from idaes.core.surrogate.sampling.data_utils import split_training_validation
from idaes.core.surrogate.plotting.sm_plotter import (
surrogate_scatter2D,
surrogate_scatter3D,
surrogate_parity,
surrogate_residual,
)
@pytest.fixture
def pysmo_poly_surrogate():
# import surrogates from external JSON
pysmo_poly_surrogate = PysmoSurrogate.load_from_file(
os.path.join(this_file_dir(), "pysmo_poly_surrogate.json")
)
return pysmo_poly_surrogate
@pytest.fixture
def data_validation():
# import validation data and create dataframe here as well
# random data subset will change every time, but should be fine
csv_data = pd.read_csv(os.path.join(this_file_dir(), "reformer-data.csv"))
data = csv_data.sample(n=100) # randomly sample points for validation
input_data = data.iloc[:, :2]
input_labels = input_data.columns
n_data = data[input_labels[0]].size # size = 100
data_training, data_validation = split_training_validation(data, 0.8, seed=n_data)
return data_validation
# --------------------------------------------------------------------------
@pytest.mark.unit
def test_scatter2D_pysmo_poly(pysmo_poly_surrogate, data_validation):
with TempfileManager.new_context() as tf:
# note - a failure 'The process cannot access the file because it is
# being used by another process' could occur if an internal error
# arises before the results file is closed inside the surrogate method
# create and step into new temporary directory
dname = tf.mkdtemp()
filename = os.path.join(dname, "results.pdf")
surrogate_scatter2D(
pysmo_poly_surrogate, data_validation, filename=filename, show=False
)
assert os.path.exists(filename) # PDF results file
@pytest.mark.unit
def METHOD_NAME(pysmo_poly_surrogate, data_validation):
with TempfileManager.new_context() as tf:
# note - a failure 'The process cannot access the file because it is
# being used by another process' could occur if an internal error
# arises before the results file is closed inside the surrogate method
# create and step into new temporary directory
dname = tf.mkdtemp()
filename = os.path.join(dname, "results.pdf")
surrogate_scatter3D(
pysmo_poly_surrogate, data_validation, filename=filename, show=False
)
assert os.path.exists(filename) # PDF results file
@pytest.mark.unit
def test_parity_pysmo_poly(pysmo_poly_surrogate, data_validation):
with TempfileManager.new_context() as tf:
# note - a failure 'The process cannot access the file because it is
# being used by another process' could occur if an internal error
# arises before the results file is closed inside the surrogate method
# create and step into new temporary directory
dname = tf.mkdtemp()
filename = os.path.join(dname, "results.pdf")
surrogate_parity(
pysmo_poly_surrogate, data_validation, filename=filename, show=False
)
assert os.path.exists(filename) # PDF results file
@pytest.mark.unit
def test_residual_pysmo_poly(pysmo_poly_surrogate, data_validation):
with TempfileManager.new_context() as tf:
# note - a failure 'The process cannot access the file because it is
# being used by another process' could occur if an internal error
# arises before the results file is closed inside the surrogate method
# create and step into new temporary directory
dname = tf.mkdtemp()
filename = os.path.join(dname, "results.pdf")
surrogate_residual(
pysmo_poly_surrogate, data_validation, filename=filename, show=False
)
assert os.path.exists(filename) # PDF results file
@pytest.mark.unit
def test_scatter2D_noPDF_pysmo_poly(pysmo_poly_surrogate, data_validation):
with TempfileManager.new_context() as tf:
# note - a failure 'The process cannot access the file because it is
# being used by another process' could occur if an internal error
# arises before the results file is closed inside the surrogate method
# create and step into new temporary directory
dname = tf.mkdtemp()
surrogate_scatter2D(pysmo_poly_surrogate, data_validation, show=False)
for file in list(os.walk(dname)): # check entire temp directory
assert file[-4:] != ".pdf" # no PDF files should be created
@pytest.mark.unit
def test_scatter3D_noPDF_pysmo_poly(pysmo_poly_surrogate, data_validation):
with TempfileManager.new_context() as tf:
# note - a failure 'The process cannot access the file because it is
# being used by another process' could occur if an internal error
# arises before the results file is closed inside the surrogate method
# create and step into new temporary directory
dname = tf.mkdtemp()
surrogate_scatter3D(pysmo_poly_surrogate, data_validation, show=False)
for file in list(os.walk(dname)): # check entire temp directory
assert file[-4:] != ".pdf" # no PDF files should be created
@pytest.mark.unit
def test_parity_noPDF_pysmo_poly(pysmo_poly_surrogate, data_validation):
with TempfileManager.new_context() as tf:
# note - a failure 'The process cannot access the file because it is
# being used by another process' could occur if an internal error
# arises before the results file is closed inside the surrogate method
# create and step into new temporary directory
dname = tf.mkdtemp()
surrogate_parity(pysmo_poly_surrogate, data_validation, show=False)
for file in list(os.walk(dname)): # check entire temp directory
assert file[-4:] != ".pdf" # no PDF files should be created
@pytest.mark.unit
def test_residual_noPDF_pysmo_poly(pysmo_poly_surrogate, data_validation):
with TempfileManager.new_context() as tf:
# note - a failure 'The process cannot access the file because it is
# being used by another process' could occur if an internal error
# arises before the results file is closed inside the surrogate method
# create and step into new temporary directory
dname = tf.mkdtemp()
surrogate_residual(pysmo_poly_surrogate, data_validation, show=False)
for file in list(os.walk(dname)): # check entire temp directory
assert file[-4:] != ".pdf" # no PDF files should be created |
298,797 | current frame | # The PEP 484 type hints stub file for the QtSvg module.
#
# Generated by SIP 6.4.0
#
# Copyright (c) 2021 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of PyQt5.
#
# This file may be used under the terms of the GNU General Public License
# version 3.0 as published by the Free Software Foundation and appearing in
# the file LICENSE included in the packaging of this file. Please review the
# following information to ensure the GNU General Public License version 3.0
# requirements will be met: http://www.gnu.org/copyleft/gpl.html.
#
# If you do not wish to use this file under the terms of the GPL version 3.0
# then you may purchase a commercial license. For more information contact
# info@riverbankcomputing.com.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
import typing
from PyQt5 import sip
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
# Support for QDate, QDateTime and QTime.
import datetime
# Convenient type aliases.
PYQT_SLOT = typing.Union[typing.Callable[..., None], QtCore.pyqtBoundSignal]
# Convenient aliases for complicated OpenGL types.
PYQT_OPENGL_ARRAY = typing.Union[typing.Sequence[int], typing.Sequence[float],
sip.Buffer, None]
PYQT_OPENGL_BOUND_ARRAY = typing.Union[typing.Sequence[int],
typing.Sequence[float], sip.Buffer, int, None]
class QGraphicsSvgItem(QtWidgets.QGraphicsObject):
@typing.overload
def __init__(self, parent: typing.Optional[QtWidgets.QGraphicsItem] = ...) -> None: ...
@typing.overload
def __init__(self, fileName: str, parent: typing.Optional[QtWidgets.QGraphicsItem] = ...) -> None: ...
def type(self) -> int: ...
def paint(self, painter: QtGui.QPainter, option: QtWidgets.QStyleOptionGraphicsItem, widget: typing.Optional[QtWidgets.QWidget] = ...) -> None: ...
def boundingRect(self) -> QtCore.QRectF: ...
def maximumCacheSize(self) -> QtCore.QSize: ...
def setMaximumCacheSize(self, size: QtCore.QSize) -> None: ...
def elementId(self) -> str: ...
def setElementId(self, id: str) -> None: ...
def renderer(self) -> 'QSvgRenderer': ...
def setSharedRenderer(self, renderer: 'QSvgRenderer') -> None: ...
class QSvgGenerator(QtGui.QPaintDevice):
def __init__(self) -> None: ...
def metric(self, metric: QtGui.QPaintDevice.PaintDeviceMetric) -> int: ...
def paintEngine(self) -> QtGui.QPaintEngine: ...
@typing.overload
def setViewBox(self, viewBox: QtCore.QRect) -> None: ...
@typing.overload
def setViewBox(self, viewBox: QtCore.QRectF) -> None: ...
def viewBoxF(self) -> QtCore.QRectF: ...
def viewBox(self) -> QtCore.QRect: ...
def setDescription(self, description: str) -> None: ...
def description(self) -> str: ...
def setTitle(self, title: str) -> None: ...
def title(self) -> str: ...
def setResolution(self, resolution: int) -> None: ...
def resolution(self) -> int: ...
def setOutputDevice(self, outputDevice: QtCore.QIODevice) -> None: ...
def outputDevice(self) -> QtCore.QIODevice: ...
def setFileName(self, fileName: str) -> None: ...
def fileName(self) -> str: ...
def setSize(self, size: QtCore.QSize) -> None: ...
def size(self) -> QtCore.QSize: ...
class QSvgRenderer(QtCore.QObject):
@typing.overload
def __init__(self, parent: typing.Optional[QtCore.QObject] = ...) -> None: ...
@typing.overload
def __init__(self, filename: str, parent: typing.Optional[QtCore.QObject] = ...) -> None: ...
@typing.overload
def __init__(self, contents: typing.Union[QtCore.QByteArray, bytes, bytearray], parent: typing.Optional[QtCore.QObject] = ...) -> None: ...
@typing.overload
def __init__(self, contents: QtCore.QXmlStreamReader, parent: typing.Optional[QtCore.QObject] = ...) -> None: ...
def transformForElement(self, id: str) -> QtGui.QTransform: ...
def setAspectRatioMode(self, mode: QtCore.Qt.AspectRatioMode) -> None: ...
def aspectRatioMode(self) -> QtCore.Qt.AspectRatioMode: ...
repaintNeeded: typing.ClassVar[QtCore.pyqtSignal]
@typing.overload
def render(self, p: QtGui.QPainter) -> None: ...
@typing.overload
def render(self, p: QtGui.QPainter, bounds: QtCore.QRectF) -> None: ...
@typing.overload
def render(self, painter: QtGui.QPainter, elementId: str, bounds: QtCore.QRectF = ...) -> None: ...
@typing.overload
def load(self, filename: str) -> bool: ...
@typing.overload
def load(self, contents: typing.Union[QtCore.QByteArray, bytes, bytearray]) -> bool: ...
@typing.overload
def load(self, contents: QtCore.QXmlStreamReader) -> bool: ...
def animationDuration(self) -> int: ...
def setCurrentFrame(self, a0: int) -> None: ...
def METHOD_NAME(self) -> int: ...
def setFramesPerSecond(self, num: int) -> None: ...
def framesPerSecond(self) -> int: ...
def boundsOnElement(self, id: str) -> QtCore.QRectF: ...
def animated(self) -> bool: ...
@typing.overload
def setViewBox(self, viewbox: QtCore.QRect) -> None: ...
@typing.overload
def setViewBox(self, viewbox: QtCore.QRectF) -> None: ...
def viewBoxF(self) -> QtCore.QRectF: ...
def viewBox(self) -> QtCore.QRect: ...
def elementExists(self, id: str) -> bool: ...
def defaultSize(self) -> QtCore.QSize: ...
def isValid(self) -> bool: ...
class QSvgWidget(QtWidgets.QWidget):
@typing.overload
def __init__(self, parent: typing.Optional[QtWidgets.QWidget] = ...) -> None: ...
@typing.overload
def __init__(self, file: str, parent: typing.Optional[QtWidgets.QWidget] = ...) -> None: ...
def paintEvent(self, event: QtGui.QPaintEvent) -> None: ...
@typing.overload
def load(self, file: str) -> None: ...
@typing.overload
def load(self, contents: typing.Union[QtCore.QByteArray, bytes, bytearray]) -> None: ...
def sizeHint(self) -> QtCore.QSize: ...
def renderer(self) -> QSvgRenderer: ... |
298,798 | test main overview status url | import urllib.parse
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from dfirtrack_config.models import MainConfigModel
def set_main_overview(main_overview):
"""change config"""
model = MainConfigModel.objects.get(main_config_name='MainConfig')
model.main_overview = f'main_overview_{main_overview}'
model.save()
# return to test function
return
class MainOverviewViewTestCase(TestCase):
"""main overview view tests"""
@classmethod
def setUpTestData(cls):
# create user
User.objects.create_user(
username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP'
)
def test_main_overview_not_logged_in(self):
"""test main overview"""
# create url
destination = '/login/?next=' + urllib.parse.quote('/main_overview/', safe='')
# get response
response = self.client.get('/main_overview/', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_main_overview_system_url(self):
"""test main overview url and redirect"""
# change config
set_main_overview('system')
# login testuser
self.client.login(
username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP'
)
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/system/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_main_overview_artifact_url(self):
"""test main overview url and redirect"""
# change config
set_main_overview('artifact')
# login testuser
self.client.login(
username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP'
)
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/artifacts/artifact/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_main_overview_case_url(self):
"""test main overview url and redirect"""
# change config
set_main_overview('case')
# login testuser
self.client.login(
username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP'
)
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/case/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def METHOD_NAME(self):
"""test main overview url and redirect"""
# change config
set_main_overview('status')
# login testuser
self.client.login(
username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP'
)
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/config/status/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_main_overview_tag_url(self):
"""test main overview url and redirect"""
# change config
set_main_overview('tag')
# login testuser
self.client.login(
username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP'
)
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/tag/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_main_overview_task_url(self):
"""test main overview url and redirect"""
# change config
set_main_overview('task')
# login testuser
self.client.login(
username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP'
)
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/task/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_main_overview_default_url(self):
"""test main overview url and redirect"""
# change config
set_main_overview('foobar')
# login testuser
self.client.login(
username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP'
)
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/system/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
) |
298,799 | test view data on reshape | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .. import (
ones,
add,
swapaxes,
moveaxis,
atleast_1d,
atleast_2d,
atleast_3d,
squeeze,
tensor,
)
def test_array_function(setup):
a = ones((10, 20), chunk_size=8)
# test sum
np.testing.assert_equal(np.sum(a).execute().fetch(), 200)
# test qr
q, r = np.linalg.qr(a)
np.testing.assert_array_almost_equal(np.dot(q, r).execute().fetch(), a)
def test_view_data_on_slice(setup):
data = np.random.rand(10, 20)
a = tensor(data, chunk_size=8)
b = a[:5, 5:10]
b[:3, :3] = 3
npa = data.copy()
npb = npa[:5, 5:10]
npb[:3, :3] = 3
np.testing.assert_array_equal(b.execute(), npb)
np.testing.assert_array_equal(a.execute(), npa)
data = np.random.rand(10, 20)
a = tensor(data, chunk_size=8)
b = a[:7]
b += 1
npa = data.copy()
npb = npa[:7]
npb += 1
np.testing.assert_array_equal(a.execute(), npa)
np.testing.assert_array_equal(b.execute(), npb)
def test_set_item_on_view(setup):
a = ones((5, 8), dtype=int)
b = a[:3]
b[0, 0] = 2
c = b.ravel() # create view
c[1] = 4
npa = np.ones((5, 8), dtype=int)
npb = npa[:3]
npb[0, 0] = 2
npc = npb.ravel() # create view
npc[1] = 4
np.testing.assert_array_equal(a.execute(), npa)
np.testing.assert_array_equal(b.execute(), npb)
np.testing.assert_array_equal(c.execute(), npc)
def test_view_data_on_transpose(setup):
data = np.random.rand(10, 20)
a = tensor(data, chunk_size=6)
b = a.T
add(b, 1, out=b)
np.testing.assert_array_equal(b.execute(), data.T + 1)
np.testing.assert_array_equal(a.execute(), data + 1)
def test_view_data_on_swapaxes(setup):
data = np.random.rand(10, 20)
a = tensor(data, chunk_size=6)
b = swapaxes(a, 1, 0)
a[1] = 10
npa = data.copy()
npb = np.swapaxes(npa, 1, 0)
npa[1] = 10
np.testing.assert_array_equal(b.execute(), npb)
np.testing.assert_array_equal(a.execute(), npa)
def test_view_data_on_moveaxis(setup):
data = np.random.rand(10, 20)
a = tensor(data, chunk_size=6)
b = moveaxis(a, 1, 0)
a[0][1] = 10
npa = data.copy()
npb = np.moveaxis(npa, 1, 0)
npa[0][1] = 10
np.testing.assert_array_equal(b.execute(), npb)
np.testing.assert_array_equal(a.execute(), npa)
def test_view_data_on_atleast1d(setup):
a = atleast_1d(1)
b = a[:]
b[0] = 10
np.testing.assert_array_equal(b.execute(), np.array([10]))
np.testing.assert_array_equal(a.execute(), np.array([10]))
def test_view_data_on_atleast2d(setup):
data = np.random.rand(10)
a = atleast_2d(tensor(data, chunk_size=5))
b = add(a[:, :5], 1, out=a[:, 5:])
npa = np.atleast_2d(data.copy())
npb = np.add(npa[:, :5], 1, out=npa[:, 5:])
np.testing.assert_array_equal(a.execute(), npa)
np.testing.assert_array_equal(b.execute(), npb)
def test_view_data_on_atleast3d(setup):
data = np.random.rand(10, 20)
a = atleast_3d(tensor(data, chunk_size=5))
b = a[:, :5, :10][0]
c = add(b[:4], b[1:], out=a[0, 16:])
npa = np.atleast_3d(data.copy())
npb = npa[:, :5, :10][0]
npc = np.add(npb[:4], npb[1:], out=npa[0, 16:])
np.testing.assert_array_equal(a.execute(), npa)
np.testing.assert_array_equal(b.execute(), npb)
np.testing.assert_array_equal(c.execute(), npc)
def test_view_data_on_squeeze(setup):
data = np.random.rand(1, 4, 1)
a = tensor(data, chunk_size=2)
b = squeeze(a, axis=0)
b[:3] = 10
npa = data.copy()
npb = np.squeeze(npa, axis=0)
npb[:3] = 10
np.testing.assert_array_equal(b.execute(), npb)
np.testing.assert_array_equal(a.execute(), npa)
def METHOD_NAME(setup):
data = np.random.RandomState(0).random((3, 4, 5))
a = tensor(data.copy(), chunk_size=2)
b = a.reshape((5, 4, 3))
b[:3] = 10
npa = data.copy()
npb = npa.reshape((5, 4, 3))
npb[:3] = 10
np.testing.assert_array_equal(b.execute(), npb)
np.testing.assert_array_equal(a.execute(), npa)
data = np.random.RandomState(0).random((4, 5))
a2 = tensor(data.copy(), chunk_size=2)
b2 = a2.reshape((5, 4), order="F")
b2[:3] = 10
npa = data.copy()
npb = npa.reshape((5, 4), order="F")
npb[:3] = 10
b2_result = b2.execute()
np.testing.assert_array_equal(a2.execute(), npa)
np.testing.assert_array_equal(b2_result, npb)
def test_view_data_on_ravel(setup):
# ravel creates a view
data = np.random.rand(3, 4, 5)
a = tensor(data, chunk_size=2)
b = a.ravel()
b[:10] = 10
npa = data.copy()
npb = npa.ravel()
npb[:10] = 10
np.testing.assert_array_equal(b.execute(), npb)
np.testing.assert_array_equal(a.execute(), npa)
# flatten creates a copy
data = np.random.rand(3, 4, 5)
a = tensor(data, chunk_size=2)
b = a.flatten()
b[:10] = 10
npa = data.copy()
npb = npa.flatten()
npb[:10] = 10
np.testing.assert_array_equal(b.execute(), npb)
np.testing.assert_array_equal(a.execute(), npa)
def test_copy_and_view(setup):
data = np.random.rand(10, 20)
a = tensor(data, chunk_size=6)
b = a.view()
b[:5] = 10
npa = data.copy()
npb = npa.view()
npb[:5] = 10
np.testing.assert_array_equal(b.execute(), npb)
np.testing.assert_array_equal(a.execute(), npa)
data = np.random.rand(10, 20)
a = tensor(data.copy(), chunk_size=6)
b = a.copy()
b[:5] = 10
npa = data.copy()
npb = npa.copy()
npb[:5] = 10
np.testing.assert_array_equal(b.execute(), npb)
np.testing.assert_array_equal(a.execute(), npa)
a = tensor(data.copy(), chunk_size=6)
b = a[:5, :4]
c = b.copy()
c[0, 0] = 10
npa = data.copy()
npb = npa[:5, :4]
npc = npb.copy()
npc[0, 0] = 10
np.testing.assert_array_equal(c.execute(), npc)
np.testing.assert_array_equal(a.execute(), npa)
def test_flat(setup):
data = np.random.rand(10, 20)
a = tensor(data, chunk_size=4)
fl = a.flat
fl[1:10] = 10
b = fl[10:20]
b[0:4] = 20
npa = data.copy()
npfl = npa.flat
npfl[1:10] = 10
npb = npfl[10:20]
npb[0:4] = 20
np.testing.assert_array_equal(b.execute(), npb)
np.testing.assert_array_equal(a.execute(), npa) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.