id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
6,600 | test is previewable for broken mode | from unittest import mock
from django.conf import settings
from wagtail.models import Page
from wagtail.test.routablepage.models import RoutablePageTest
from wagtail.test.utils import WagtailPageTestCase
class TestCustomPageAssertions(WagtailPageTestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = cls.create_superuser("super")
def setUp(self):
self.parent = Page.objects.get(id=2)
self.page = RoutablePageTest(
title="Hello world!",
slug="hello-world",
)
self.parent.add_child(instance=self.page)
def test_is_routable(self):
self.assertPageIsRoutable(self.page)
def test_is_routable_with_alternative_route(self):
self.assertPageIsRoutable(self.page, "archive/year/1984/")
def test_is_routable_fails_for_draft_page(self):
self.page.live = False
self.page.save()
with self.assertRaises(self.failureException):
self.assertPageIsRoutable(self.page)
def test_is_routable_fails_for_invalid_route_path(self):
with self.assertRaises(self.failureException):
self.assertPageIsRoutable(self.page, "invalid-route-path/")
@mock.patch("django.test.testcases.Client.get")
@mock.patch("django.test.testcases.Client.force_login")
def test_is_renderable(self, mocked_force_login, mocked_get):
self.assertPageIsRenderable(self.page)
mocked_force_login.assert_not_called()
mocked_get.assert_called_once_with("/hello-world/", data=None)
@mock.patch("django.test.testcases.Client.get")
@mock.patch("django.test.testcases.Client.force_login")
def test_is_renderable_for_alternative_route(self, mocked_force_login, mocked_get):
self.assertPageIsRenderable(self.page, "archive/year/1984/")
mocked_force_login.assert_not_called()
mocked_get.assert_called_once_with("/hello-world/archive/year/1984/", data=None)
@mock.patch("django.test.testcases.Client.get")
@mock.patch("django.test.testcases.Client.force_login")
def test_is_renderable_for_user(self, mocked_force_login, mocked_get):
self.assertPageIsRenderable(self.page, user=self.superuser)
mocked_force_login.assert_called_once_with(
self.superuser, settings.AUTHENTICATION_BACKENDS[0]
)
mocked_get.assert_called_once_with("/hello-world/", data=None)
@mock.patch("django.test.testcases.Client.get")
def test_is_renderable_with_query_data(self, mocked_get):
query_data = {"p": 1, "q": "test"}
self.assertPageIsRenderable(self.page, query_data=query_data)
mocked_get.assert_called_once_with("/hello-world/", data=query_data)
@mock.patch("django.test.testcases.Client.post")
def test_is_renderable_with_query_and_post_data(self, mocked_post):
query_data = {"p": 1, "q": "test"}
post_data = {"subscribe": True}
self.assertPageIsRenderable(
self.page, query_data=query_data, post_data=post_data
)
mocked_post.assert_called_once_with(
"/hello-world/", data=post_data, QUERYSTRING="p=1&q=test"
)
def test_is_renderable_for_draft_page(self):
self.page.live = False
self.page.save()
# When accept_404 is False (the default) the test should fail
with self.assertRaises(self.failureException):
self.assertPageIsRenderable(self.page)
# When accept_404 is True, the test should pass
self.assertPageIsRenderable(self.page, accept_404=True)
def test_is_renderable_for_invalid_route_path(self):
# When accept_404 is False (the default) the test should fail
with self.assertRaises(self.failureException):
self.assertPageIsRenderable(self.page, "invalid-route-path/")
# When accept_404 is True, the test should pass
self.assertPageIsRenderable(self.page, "invalid-route-path/", accept_404=True)
def test_is_rendereable_accept_redirect(self):
redirect_route_paths = [
"permanant-homepage-redirect/",
"temporary-homepage-redirect/",
]
# When accept_redirect is False (the default) the tests should fail
for route_path in redirect_route_paths:
with self.assertRaises(self.failureException):
self.assertPageIsRenderable(self.page, route_path)
# When accept_redirect is True, the tests should pass
for route_path in redirect_route_paths:
self.assertPageIsRenderable(self.page, route_path, accept_redirect=True)
def test_is_editable(self):
self.assertPageIsEditable(self.page)
@mock.patch("django.test.testcases.Client.force_login")
def test_is_editable_always_authenticates(self, mocked_force_login):
try:
self.assertPageIsEditable(self.page)
except self.failureException:
pass
mocked_force_login.assert_called_with(
self._pageiseditable_superuser, settings.AUTHENTICATION_BACKENDS[0]
)
try:
self.assertPageIsEditable(self.page, user=self.superuser)
except self.failureException:
pass
mocked_force_login.assert_called_with(
self.superuser, settings.AUTHENTICATION_BACKENDS[0]
)
@mock.patch("django.test.testcases.Client.get")
@mock.patch("django.test.testcases.Client.force_login")
def test_is_editable_with_permission_lacking_user(
self, mocked_force_login, mocked_get
):
user = self.create_user("bob")
with self.assertRaises(self.failureException):
self.assertPageIsEditable(self.page, user=user)
mocked_force_login.assert_not_called()
mocked_get.assert_not_called()
def test_is_editable_with_post_data(self):
self.assertPageIsEditable(
self.page,
post_data={
"title": "Goodbye world?",
"slug": "goodbye-world",
"content": "goodbye",
},
)
def test_is_previewable(self):
self.assertPageIsPreviewable(self.page)
def test_is_previewable_with_post_data(self):
self.assertPageIsPreviewable(
self.page, post_data={"title": "test", "slug": "test"}
)
def test_is_previewable_with_custom_user(self):
self.assertPageIsPreviewable(self.page, user=self.superuser)
def test_is_previewable_for_alternative_mode(self):
self.assertPageIsPreviewable(self.page, mode="extra")
def METHOD_NAME(self):
with self.assertRaises(self.failureException):
self.assertPageIsPreviewable(self.page, mode="broken") |
6,601 | constraint definition | # -*- coding: utf-8 -*
from lxml import etree
def policy_attributes(Policy_definition, policy_attributes):
for idx,(name,syntax,constraint,description,defaultvalue) in enumerate(policy_attributes):
policy_attribute_name = etree.SubElement(Policy_definition, 'policyAttribute', name=name)
policy_attribute_descriptor = etree.SubElement(policy_attribute_name,'Descriptor')
policy_attribute_syntax = etree.SubElement(policy_attribute_descriptor, 'Syntax').text = syntax
if constraint != 'NULL':
policy_attribute_constraint = etree.SubElement(policy_attribute_descriptor, 'Constraint').text=constraint
policy_attribute_description = etree.SubElement(policy_attribute_descriptor, 'Description').text = description
if defaultvalue != 'NULL':
policy_attribute_defaultvalue = etree.SubElement(policy_attribute_descriptor, 'DefaultValue').text = defaultvalue
else:
policy_attribute_defaultvalue = etree.SubElement(policy_attribute_descriptor, 'DefaultValue')
def constraint_attributes(METHOD_NAME, constraint_attributes):
for idx,(constraintid, syntax, constraint, description, defaultvalue, value) in enumerate(constraint_attributes):
constraint_id = etree.SubElement(METHOD_NAME, 'constraint', id = constraintid)
constraint_id_descriptor = etree.SubElement(constraint_id, 'descriptor')
constraint_id_descriptor_syntax = etree.SubElement(constraint_id_descriptor, 'Syntax').text = syntax
if constraint != 'NULL':
constraint_id_descriptor_syntax = etree.SubElement(constraint_id_descriptor, 'Constraint').text = constraint
constraint_id_descriptor_description = etree.SubElement(constraint_id_descriptor, 'Description').text = description
if defaultvalue != 'NULL':
constraint_id_descriptor_defaultvalue = etree.SubElement(constraint_id_descriptor, 'DefaultValue').text = defaultvalue
if value != 'NULL':
constraint_value = etree.SubElement(constraint_id, 'value').text = value
else:
constraint_value = etree.SubElement(constraint_id, 'value')
def policy_parameters(Policy_definition, parameters):
for idx,(name, value) in enumerate(parameters):
policy_param_name = etree.SubElement(Policy_definition, 'params', name=name)
if value != 'NULL':
policy_param_value = etree.SubElement(policy_param_name, 'value').text=value
else:
policy_param_value = etree.SubElement(policy_param_name, 'value')
def policy_definition(Policy_Value,definition):
Policy_definition = etree.SubElement(Policy_Value, 'def', id=definition['id'], classId=definition['classid'])
Policy_description = etree.SubElement(Policy_definition, 'description').text = definition['description']
return Policy_definition
def METHOD_NAME(Policy_Value, definition):
METHOD_NAME = etree.SubElement(Policy_Value, 'constraint', id=definition['id'])
constraint_description = etree.SubElement(METHOD_NAME, 'description').text = definition['description']
constraint_classid = etree.SubElement(METHOD_NAME, 'classId').text = definition['classId']
return METHOD_NAME
def check_ext_key_usage(mylist, string):
s1 = 'true'
s2 = 'false'
if string in mylist:
return s1
else:
return s2
def get_policyId(root):
Policy_Value = root.findall('./PolicySets/PolicySet/value')
value = 0
for key in Policy_Value:
attributes = key.attrib
value = attributes["id"]
if value is 0:
pvalue = '1'
else:
pvalue = int(value) + 1
return str(pvalue)
def get_Element_PolicyValue(PolicySet,javaclass):
mydict = {}
for key in PolicySet.iterchildren(tag='value'):
PolicyValues=key.items()[0][1]
classId=key[0].get('classId')
mydict[classId]=PolicyValues
if mydict.has_key(javaclass):
value_Id = mydict[javaclass]
Policy_value = PolicySet.find('./value[@id=' + "\"" + str(value_Id) + "\"" + "]")
return Policy_value
else:
return None
def check_policy(PolicySet, javaclass):
DefinedPolicies = PolicySet.findall("./value/def")
list_of_policy_classes = []
for classes in DefinedPolicies:
list_of_policy_classes.append(classes.get('classId'))
# check if my classId is already there
if javaclass in list_of_policy_classes:
return True
else:
return False
|
6,602 | neighbors of | import logging
import numpy as np
import scipy
import pickle
import gzip
import os
from tqdm import tqdm
import torch.utils.data as data
# From GTTF, need to cite once paper is officially accepted to ICLR 2021
class CompactAdjacency:
def __init__(self, adj, precomputed=None, subset=None):
"""Constructs CompactAdjacency.
Args:
adj: scipy sparse matrix containing full adjacency.
precomputed: If given, must be a tuple (compact_adj, degrees).
In this case, adj must be None. If supplied, subset will be ignored.
"""
if adj is None:
return
if precomputed:
if adj is not None:
raise ValueError("Both adj and precomputed are set.")
if subset is not None:
logging.info(
"WARNING: subset is provided. It is ignored, since precomputed is supplied."
)
self.compact_adj, self.degrees = precomputed
self.num_nodes = len(self.degrees)
else:
self.adj = adj
self.num_nodes = (
len(self.adj) if isinstance(self.adj, dict) else self.adj.shape[0]
)
self.compact_adj = scipy.sparse.dok_matrix(
(self.num_nodes, self.num_nodes), dtype="int32"
)
self.degrees = np.zeros(shape=[self.num_nodes], dtype="int32")
self.node_set = set(subset) if subset is not None else None
for v in range(self.num_nodes):
if isinstance(self.adj, dict) and self.node_set is not None:
connection_ids = np.array(
list(self.adj[v].intersection(self.node_set))
)
elif isinstance(self.adj, dict) and self.node_set is None:
connection_ids = np.array(list(self.adj[v]))
else:
connection_ids = self.adj[v].nonzero()[1]
self.degrees[v] = len(connection_ids)
self.compact_adj[
v, np.arange(len(connection_ids), dtype="int32")
] = connection_ids
self.compact_adj = self.compact_adj.tocsr()
@staticmethod
def from_file(filename):
instance = CompactAdjacency(None, None)
data = pickle.load(gzip.open(filename, "rb"))
instance.compact_adj = data["compact_adj"]
instance.adj = data["adj"]
instance.degrees = data["degrees"] if "degrees" in data else data["lengths"]
instance.num_nodes = data["num_nodes"]
return instance
@staticmethod
def from_directory(directory):
instance = CompactAdjacency(None, None)
instance.degrees = np.load(os.path.join(directory, "degrees.npy"))
instance.compact_adj = scipy.sparse.load_npz(
os.path.join(directory, "cadj.npz")
)
logging.info("\n\ncompact_adj.py from_directory\n\n")
# Make adj from cadj and save to adj.npz
import IPython
IPython.embed()
instance.adj = scipy.sparse.load_npz(os.path.join(directory, "adj.npz"))
instance.num_nodes = instance.adj.shape[0]
return instance
def save(self, filename):
with gzip.open(filename, "wb") as fout:
pickle.dump(
{
"compact_adj": self.compact_adj,
"adj": self.adj,
"degrees": self.degrees,
"num_nodes": self.num_nodes,
},
fout,
)
def METHOD_NAME(self, node):
neighbors = self.compact_adj[node, : self.degrees[node]].todense()
return np.array(neighbors)[0]
class MoleculesDataset(data.Dataset):
def __init__(
self,
adj_matrices,
feature_matrices,
labels,
path,
compact=True,
fanouts=[2, 2],
split="train",
):
if compact:
# filename = path + '/train_comp_adjs.pkl'
# if split == 'val':
# filename = path + '/val_comp_adjs.pkl'
# elif split == 'test':
# filename = path + '/test_comp_adjs.pkl'
#
# if os.path.isfile(filename):
# print('Loading saved compact adjacencies from disk!')
# with open(filename, 'rb') as f:
# self.adj_matrices = pickle.load(f)
#
# else:
# logging.info('Compacting adjacency matrices (GTTF)')
# self.adj_matrices = [CompactAdjacency(adj_matrix) for adj_matrix in tqdm(adj_matrices)]
# with open(filename, 'wb') as f:
# pickle.dump(self.adj_matrices, f)
self.adj_matrices = [
CompactAdjacency(adj_matrix) for adj_matrix in tqdm(adj_matrices)
]
else:
self.adj_matrices = adj_matrices
self.feature_matrices = feature_matrices
self.labels = labels
self.fanouts = [fanouts] * len(adj_matrices)
def __getitem__(self, index):
return (
self.adj_matrices[index],
self.feature_matrices[index],
self.labels[index],
self.fanouts[index],
)
def __len__(self):
return len(self.adj_matrices) |
6,603 | command list end | # Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from types import ModuleType
import pytest
import libqtile.config
from libqtile import widget
class MockMPD(ModuleType):
class ConnectionError(Exception):
pass
class CommandError(Exception):
pass
class MPDClient:
tracks = [
{"title": "Never gonna give you up", "artist": "Rick Astley", "song": "0"},
{"title": "Sweet Caroline", "artist": "Neil Diamond"},
{"title": "Marea", "artist": "Fred Again.."},
{},
{"title": "Sweden", "performer": "C418"},
]
def __init__(self):
self._index = 0
self._connected = False
self._state_override = True
self._status = {"state": "pause"}
@property
def _current_song(self):
return self.tracks[self._index]
def ping(self):
if not self._connected:
raise ConnectionError()
return self._state_override
def connect(self, host, port):
return True
def command_list_ok_begin(self):
pass
def status(self):
return self._status
def currentsong(self):
return self._index + 1
def METHOD_NAME(self):
return (self.status(), self._current_song)
def close(self):
pass
def disconnect(self):
pass
def pause(self):
self._status["state"] = "pause"
def play(self):
print("PLAYING")
self._status["state"] = "play"
def stop(self):
self._status["state"] = "stop"
def next(self):
self._index = (self._index + 1) % len(self.tracks)
def previous(self):
self._index = (self._index - 1) % len(self.tracks)
def add_states(self):
self._status.update(
{"repeat": "1", "random": "1", "single": "1", "consume": "1", "updating_db": "1"}
)
def force_idle(self):
self._status["state"] = "stop"
self._index = 3
@pytest.fixture
def mpd2_manager(manager_nospawn, monkeypatch, minimal_conf_noscreen, request):
# monkeypatch.setattr("libqtile.widget.mpd2widget.MPDClient", MockMPDClient)
monkeypatch.setitem(sys.modules, "mpd", MockMPD("mpd"))
config = minimal_conf_noscreen
config.screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar(
[widget.Mpd2(**getattr(request, "param", dict()))],
50,
),
)
]
manager_nospawn.start(config)
yield manager_nospawn
def test_mpd2_widget_display_and_actions(mpd2_manager):
widget = mpd2_manager.c.widget["mpd2"]
assert widget.info()["text"] == "⏸ Rick Astley/Never gonna give you up [-----]"
# Button 1 toggles state
mpd2_manager.c.bar["top"].fake_button_press(0, "top", 0, 0, 1)
widget.eval("self.update(self.poll())")
assert widget.info()["text"] == "▶ Rick Astley/Never gonna give you up [-----]"
# Button 3 stops
mpd2_manager.c.bar["top"].fake_button_press(0, "top", 0, 0, 3)
widget.eval("self.update(self.poll())")
assert widget.info()["text"] == "■ Rick Astley/Never gonna give you up [-----]"
# Button 1 toggles state
mpd2_manager.c.bar["top"].fake_button_press(0, "top", 0, 0, 1)
widget.eval("self.update(self.poll())")
assert widget.info()["text"] == "▶ Rick Astley/Never gonna give you up [-----]"
mpd2_manager.c.bar["top"].fake_button_press(0, "top", 0, 0, 1)
widget.eval("self.update(self.poll())")
assert widget.info()["text"] == "⏸ Rick Astley/Never gonna give you up [-----]"
# Button 5 is "next"
mpd2_manager.c.bar["top"].fake_button_press(0, "top", 0, 0, 5)
widget.eval("self.update(self.poll())")
assert widget.info()["text"] == "⏸ Neil Diamond/Sweet Caroline [-----]"
mpd2_manager.c.bar["top"].fake_button_press(0, "top", 0, 0, 5)
widget.eval("self.update(self.poll())")
assert widget.info()["text"] == "⏸ Fred Again../Marea [-----]"
# Button 4 is previous
mpd2_manager.c.bar["top"].fake_button_press(0, "top", 0, 0, 4)
widget.eval("self.update(self.poll())")
assert widget.info()["text"] == "⏸ Neil Diamond/Sweet Caroline [-----]"
mpd2_manager.c.bar["top"].fake_button_press(0, "top", 0, 0, 4)
widget.eval("self.update(self.poll())")
assert widget.info()["text"] == "⏸ Rick Astley/Never gonna give you up [-----]"
def test_mpd2_widget_extra_info(mpd2_manager):
"""Quick test to check extra info is displayed ok."""
widget = mpd2_manager.c.widget["mpd2"]
# Inject everything to make test quicker
widget.eval("self.client.add_states()")
# Update widget and check text
widget.eval("self.update(self.poll())")
assert widget.info()["text"] == "⏸ Rick Astley/Never gonna give you up [rz1cU]"
def test_mpd2_widget_idle_message(mpd2_manager):
"""Quick test to check idle message."""
widget = mpd2_manager.c.widget["mpd2"]
# Inject everything to make test quicker
widget.eval("self.client.force_idle()")
# Update widget and check text
widget.eval("self.update(self.poll())")
assert widget.info()["text"] == "■ MPD IDLE[-----]"
@pytest.mark.parametrize(
"mpd2_manager", [{"status_format": "{currentsong}: {artist}/{title}"}], indirect=True
)
def test_mpd2_widget_current_song(mpd2_manager):
"""Quick test to check currentsong info"""
widget = mpd2_manager.c.widget["mpd2"]
assert widget.info()["text"] == "1: Rick Astley/Never gonna give you up"
@pytest.mark.parametrize(
"mpd2_manager",
[{"undefined_value": "Unknown", "status_format": "{title} ({year})"}],
indirect=True,
)
def test_mpd2_widget_custom_undefined_value(mpd2_manager):
"""Quick test to check undefined_value option"""
widget = mpd2_manager.c.widget["mpd2"]
assert widget.info()["text"] == "Never gonna give you up (Unknown)"
def test_mpd2_widget_dynamic_artist_value(mpd2_manager):
"""Quick test to check dynamic artist value"""
widget = mpd2_manager.c.widget["mpd2"]
widget.eval("self.client._index = 4")
widget.eval("self.update(self.poll())")
assert widget.info()["text"] == "⏸ C418/Sweden [-----]" |
6,604 | teller3 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
from functools import partial
import random
import numpy as np
class TestTransposeOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
x86_places = [
Place(TargetType.X86, PrecisionType.FP32, DataLayoutType.NCHW)
]
self.enable_testing_on_place(places=x86_places)
arm_places = [
Place(TargetType.ARM, PrecisionType.FP32, DataLayoutType.NCHW)
]
self.enable_testing_on_place(places=arm_places)
# opencl having diffs , big diff
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
metal_places = [
Place(TargetType.Metal, PrecisionType.FP32,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.Metal, PrecisionType.FP16,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.ARM, PrecisionType.FP32),
Place(TargetType.Host, PrecisionType.FP32)
]
# self.enable_testing_on_place(places=metal_places)
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP16,
DataLayoutType.NCHW,
thread=[1, 4])
self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)
self.enable_devices_on_nnadapter(device_names=[
"kunlunxin_xtcl", "nvidia_tensorrt", "intel_openvino"
])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
N = draw(st.integers(min_value=1, max_value=4))
C = draw(st.integers(min_value=1, max_value=17))
H = draw(st.integers(min_value=1, max_value=17))
W = draw(st.integers(min_value=1, max_value=17))
in_shape = draw(st.sampled_from([[N, C, H, W], []]))
# tranpose only support float32
# so we only feed input np.float
in_dtype = draw(st.sampled_from([np.float32]))
use_mkldnn_data = False
target = self.get_target()
if (target == "X86"):
use_mkldnn_data = True
axis_int32_data = draw(
st.lists(
st.integers(
min_value=0, max_value=3), min_size=3, max_size=4))
if (len(axis_int32_data) == 3):
assume(
sorted(axis_int32_data) == [0, 1, 2] and
axis_int32_data != [0, 1, 2])
in_shape = draw(st.sampled_from([[C, H, W]]))
elif (len(axis_int32_data) == 4):
assume(
sorted(axis_int32_data) == [0, 1, 2, 3] and
axis_int32_data != [0, 1, 2, 3])
if in_shape == []:
axis_int32_data = []
def generate_X_data():
return np.random.normal(0.0, 5.0, in_shape).astype(in_dtype)
if (target == "Metal" and len(axis_int32_data) == 4):
for i in range(4):
for j in range(4):
if i != j:
assume(in_shape[axis_int32_data.index(i)] *
(in_shape[axis_int32_data.index(j)] + 3
) / 4 <= 2048)
transpose_op = OpConfig(
type="transpose",
inputs={"X": ["X_data"]},
outputs={"Out": ["output_data"]},
attrs={
"axis": axis_int32_data,
"data_format": "AnyLayout",
"use_mkldnn": use_mkldnn_data,
})
program_config = ProgramConfig(
ops=[transpose_op],
weights={},
inputs={
"X_data": TensorConfig(data_gen=partial(generate_X_data)),
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
atol, rtol = 1e-5, 1e-5
target_str = self.get_target()
if target_str == "Metal":
atol, rtol = 5e-4, 5e-4
return self.get_predictor_configs(), ["transpose"], (atol, rtol)
def add_ignore_pass_case(self):
def _teller1(program_config, predictor_config):
x_shape = list(program_config.inputs["X_data"].shape)
if predictor_config.target() == TargetType.Metal:
return True
self.add_ignore_check_case(
_teller1, IgnoreReasons.ACCURACY_ERROR,
"The op output has diff on metal. We need to fix it as soon as possible."
)
def _teller2(program_config, predictor_config):
if "nvidia_tensorrt" in self.get_nnadapter_device_name():
in_shape = program_config.inputs["X_data"].shape
axis = program_config.ops[0].attrs["axis"]
if len(in_shape) == 1 or axis[0] != 0:
return True
self.add_ignore_check_case(
_teller2, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Lite does not support 'in_shape_size == 1' or 'axis[0] != 0' on nvidia_tensorrt."
)
def METHOD_NAME(program_config, predictor_config):
target_type = predictor_config.target()
in_x_shape = list(program_config.inputs["X_data"].shape)
if target_type not in [
TargetType.ARM, TargetType.Host, TargetType.X86
]:
if len(in_x_shape) == 0:
return True
self.add_ignore_check_case(
METHOD_NAME, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Only test 0D-tensor on CPU(ARM/Host/X86/Metal/OpenCL) now.")
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=['']) |
6,605 | reachable |
"""OID tree traversal for the garbage collection phase of packing.
Optimized for memory efficiency. Uses sets of native integers
rather than Python integers because native integers take up a lot less
room in RAM.
"""
from __future__ import absolute_import
import collections
import gc
import logging
import BTrees
from relstorage._compat import iteritems
IIunion32 = BTrees.family32.II.union # pylint:disable=no-member
IISet32 = BTrees.family32.II.Set
IISet64 = BTrees.family64.II.Set
log = logging.getLogger(__name__)
class IISet32X(object):
"""An IISet32 extended with a Python set layer for efficient inserts."""
def __init__(self):
self._base = IISet32()
self._layer = set()
def add(self, key):
layer = self._layer
if key not in layer and key not in self._base:
layer.add(key)
if len(layer) > 10000:
self._apply()
def _apply(self):
"""Add the layer to the base and reset the layer."""
if self._layer:
self._base = IIunion32(self._base, IISet32(self._layer))
self._layer.clear()
def __iter__(self):
self._apply()
return iter(self._base)
def __contains__(self, key):
return key in self._layer or key in self._base
class TreeMarker(object):
"""Finds all OIDs reachable from a set of root OIDs."""
# This class groups OIDs by their upper 33 bits. Why 33 instead
# of 32? Because IISet and IIBucket are signed, they can not accept
# positive integers >= (1 << 31). The solution is to simply
# add an extra grouping bit.
hi = 0xffffffff80000000 # 33 high bits
lo = 0x000000007fffffff # 31 low bits
def __init__(self):
# self._refs:
# {from_oid_hi: {to_oid_hi: IISet64([from_oid_lo << 32 | to_oid_lo])}}
self._refs = collections.defaultdict(
lambda: collections.defaultdict(IISet64))
# self._reachable: {oid_hi: IISet32X}
self._reachable = collections.defaultdict(IISet32X)
self.reachable_count = 0
def add_refs(self, pairs):
"""Add a list of (from_oid, to_oid) reference pairs.
`from_oid` and `to_oid` must be 64 bit integers.
"""
refs = self._refs
hi = self.hi
lo = self.lo
for from_oid, to_oid in pairs:
s = refs[from_oid & hi][to_oid & hi]
s.add(((from_oid & lo) << 32) | (to_oid & lo))
def mark(self, oids):
"""Mark specific OIDs and descendants of those OIDs as reachable."""
hi = self.hi
lo = self.lo
pass_count = 1
# this_pass: {oid_hi: IISet32X}
this_pass = collections.defaultdict(IISet32X)
for oid in sorted(oids):
this_pass[oid & hi].add(int(oid & lo))
while this_pass:
gc.collect()
found, next_pass = self._mark_pass(this_pass)
log.debug(
"Found %d more referenced object(s) in pass %d",
found, pass_count)
if not found:
break
self.reachable_count += found
pass_count += 1
this_pass = next_pass
return pass_count
def _mark_pass(self, this_pass):
"""Mark OIDs as reachable. Produce an OID set for the next pass.
Return (found, next_pass), where `found` is the number of
new OIDs marked and `next_pass` is the collection of OIDs to
follow in the next pass.
"""
# pylint:disable=too-many-locals
# next_pass: {oid_hi: IISet32X}
next_pass = collections.defaultdict(IISet32X)
found = 0
refs = self._refs
METHOD_NAME = self._reachable
lo = self.lo
for oid_hi, oids_lo in iteritems(this_pass):
from_reachable_set = METHOD_NAME[oid_hi]
for oid_lo in oids_lo:
if oid_lo in from_reachable_set:
# This OID is already known to be reachable.
continue
found += 1
from_reachable_set.add(oid_lo)
if oid_hi not in refs:
# This OID doesn't reference anything.
continue
# Add the children of this OID to next_pass.
for to_oid_hi, s in iteritems(refs[oid_hi]):
min_key = oid_lo << 32
max_key = min_key | 0xffffffff
keys = s.keys(min=min_key, max=max_key)
if not keys:
# No references found here.
continue
to_reachable_set = METHOD_NAME[to_oid_hi]
next_pass_add = next_pass[to_oid_hi].add
for key in keys:
child_oid_lo = int(key & lo)
if child_oid_lo not in to_reachable_set:
next_pass_add(child_oid_lo)
return found, next_pass
def free_refs(self):
"""Free the collection of refs to save RAM."""
self._refs = None
gc.collect()
@property
def METHOD_NAME(self):
"""Iterate over all the reachable OIDs."""
for oid_hi, oids_lo in iteritems(self._reachable):
for oid_lo in oids_lo:
# Decode the OID.
yield oid_hi | oid_lo |
6,606 | vocab init | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from fairseq.model_parallel.models.transformer import ModelParallelTransformerDecoder
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer_lm import TransformerLanguageModel
try:
from fairseq.model_parallel.megatron.mpu import VocabParallelEmbedding
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model("model_parallel_transformer_lm")
class ModelParallelTransformerLanguageModel(TransformerLanguageModel):
@staticmethod
def add_args(parser):
TransformerLanguageModel.add_args(parser)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
# make sure all arguments are present in older models
base_lm_architecture(args)
task.source_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
task.target_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
if args.character_embeddings:
raise NotImplementedError(
"Character embeddings is not supported for model parallel"
)
elif args.adaptive_input:
raise NotImplementedError(
"Adaptive input is not supported for model parallel"
)
else:
embed_tokens = cls.build_embedding(
args, task.source_dictionary, args.decoder_input_dim
)
decoder = ModelParallelTransformerDecoder(
args,
task.target_dictionary,
embed_tokens,
no_encoder_attn=True,
)
return cls(decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
def METHOD_NAME(tensor, **kwargs):
nn.init.normal_(tensor, mean=0, std=embed_dim**-0.5)
nn.init.constant_(tensor[1], 0)
embed_tokens = VocabParallelEmbedding(
len(dictionary), embed_dim, dictionary.pad(), init_method=METHOD_NAME
)
return embed_tokens
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if hasattr(args, "no_tie_adaptive_proj"):
# previous models defined --no-tie-adaptive-proj, so use the existence of
# that option to determine if this is an "old" model checkpoint
args.no_decoder_final_norm = True # old models always set this to True
if args.no_tie_adaptive_proj is False:
args.tie_adaptive_proj = True
if hasattr(args, "decoder_final_norm"):
args.no_decoder_final_norm = not args.decoder_final_norm
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.relu_dropout = getattr(args, "relu_dropout", 0.0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.character_embeddings = getattr(args, "character_embeddings", False)
args.character_filters = getattr(
args,
"character_filters",
"[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]",
)
args.character_embedding_dim = getattr(args, "character_embedding_dim", 4)
args.char_embedder_highway_layers = getattr(args, "char_embedder_highway_layers", 2)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4)
args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0.0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0.0)
args.add_bos_token = getattr(args, "add_bos_token", False)
@register_model_architecture("model_parallel_transformer_lm", "transformer_lm_megatron")
def transformer_lm_megatron(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 3072)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072 * 4)
args.decoder_layers = getattr(args, "decoder_layers", 72)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
@register_model_architecture(
"model_parallel_transformer_lm", "transformer_lm_megatron_11b"
)
def transformer_lm_megatron_11b(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 3072)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072 * 6)
args.decoder_layers = getattr(args, "decoder_layers", 72)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 32)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args) |
6,607 | diff viewer | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
# !/usr/bin/env python
"""
Provides "diff-like" comparison of images.
Currently relies on matplotlib for image processing so limited to PNG format.
"""
import argparse
from pathlib import Path
import re
import sys
import warnings
# Force iris.tests to use the ```tkagg``` backend by using the '-d'
# command-line argument as idiff is an interactive tool that requires a
# gui interface.
sys.argv.append("-d")
from PIL import Image # noqa
import matplotlib.image as mimg # noqa
import matplotlib.pyplot as plt # noqa
import matplotlib.testing.compare as mcompare # noqa
from matplotlib.testing.exceptions import ImageComparisonFailure # noqa
import matplotlib.widgets as mwidget # noqa
import iris.tests # noqa
import iris.tests.graphics as graphics # noqa
# Allows restoration of test id from result image name
_RESULT_NAME_PATTERN = re.compile(graphics.RESULT_PREFIX + r"(.*).png")
def extract_test_key(result_image_name):
"""
Extracts the name of the test which a result image refers to
"""
name_match = _RESULT_NAME_PATTERN.match(str(result_image_name))
if name_match:
test_key = name_match.group(1)
else:
emsg = f"Incorrectly named image in result dir: {result_image_name}"
raise ValueError(emsg)
return test_key
_POSTFIX_DIFF = "-failed-diff.png"
def METHOD_NAME(
test_id,
status,
phash,
expected_path,
result_path,
diff_fname,
):
fig = plt.figure(figsize=(14, 12))
plt.suptitle(expected_path.name)
ax = plt.subplot(221)
ax.imshow(mimg.imread(expected_path))
ax = plt.subplot(222, sharex=ax, sharey=ax)
ax.imshow(mimg.imread(result_path))
ax = plt.subplot(223, sharex=ax, sharey=ax)
ax.imshow(mimg.imread(diff_fname))
result_dir = result_path.parent
repo = graphics.read_repo_json()
def accept(event):
if test_id not in repo:
repo[test_id] = phash
graphics.write_repo_json(repo)
out_file = result_dir / (test_id + ".png")
result_path.rename(out_file)
msg = f"ACCEPTED: {result_path.name} -> {out_file.name}"
print(msg)
else:
msg = f"DUPLICATE: {result_path.name} -> {expected_path.name} (ignored)"
print(msg)
result_path.unlink()
diff_fname.unlink()
plt.close()
def reject(event):
if test_id not in repo:
print(f"REJECTED: {result_path.name}")
else:
msg = f"DUPLICATE: {result_path.name} -> {expected_path.name} (ignored)"
print(msg)
result_path.unlink()
diff_fname.unlink()
plt.close()
def skip(event):
# Let's keep both the result and the diff files.
print(f"SKIPPED: {result_path.name}")
plt.close()
ax_accept = plt.axes([0.59, 0.05, 0.1, 0.075])
ax_reject = plt.axes([0.7, 0.05, 0.1, 0.075])
ax_skip = plt.axes([0.81, 0.05, 0.1, 0.075])
baccept = mwidget.Button(ax_accept, "Accept")
baccept.on_clicked(accept)
breject = mwidget.Button(ax_reject, "Reject")
breject.on_clicked(reject)
bskip = mwidget.Button(ax_skip, "Skip")
bskip.on_clicked(skip)
plt.text(0.59, 0.15, status, transform=fig.transFigure)
plt.show()
def step_over_diffs(result_dir, display=True):
processed = False
if display:
msg = "\nComparing the expected image with the test result image."
print(msg)
# Remove old image diff results.
for fname in result_dir.glob(f"*{_POSTFIX_DIFF}"):
fname.unlink()
reference_image_dir = Path(iris.tests.get_data_path("images"))
repo = graphics.read_repo_json()
# Filter out all non-test result image files.
results = []
for fname in sorted(result_dir.glob(f"{graphics.RESULT_PREFIX}*.png")):
# We only care about PNG images.
try:
im = Image.open(fname)
if im.format != "PNG":
# Ignore - it's not a png image.
continue
except IOError:
# Ignore - it's not an image.
continue
results.append(fname)
count = len(results)
for count_index, result_path in enumerate(results):
test_key = extract_test_key(result_path.name)
test_key = graphics.fully_qualify(test_key, repo)
reference_image_path = reference_image_dir / (test_key + ".png")
try:
# Calculate the test result perceptual image hash.
phash = graphics.get_phash(result_path)
distance = graphics.get_phash(reference_image_path) - phash
except FileNotFoundError:
wmsg = "Ignoring unregistered test result {!r}."
warnings.warn(wmsg.format(test_key))
continue
processed = True
try:
# Creates the diff file when the images aren't identical
mcompare.compare_images(reference_image_path, result_path, tol=0)
except Exception as e:
if isinstance(e, ValueError) or isinstance(
e, ImageComparisonFailure
):
print(f"Could not compare {result_path}: {e}")
continue
else:
# Propagate the exception, keeping the stack trace
raise
diff_path = result_dir / Path(f"{result_path.stem}{_POSTFIX_DIFF}")
args = phash, reference_image_path, result_path, diff_path
if display:
status = f"Image {count_index + 1} of {count}: hamming distance = {distance}"
prefix = test_key, status
yield prefix + args
else:
yield args
if display and not processed:
print("\nThere are no iris test result images to process.\n")
if __name__ == "__main__":
default = Path(iris.tests.__file__).parent / Path(
"result_image_comparison"
)
description = "Iris graphic test difference tool."
formatter_class = argparse.RawTextHelpFormatter
parser = argparse.ArgumentParser(
description=description, formatter_class=formatter_class
)
help = "path to iris tests result image directory (default: %(default)s)"
parser.add_argument("--resultdir", "-r", default=default, help=help)
help = 'force "iris.tests" to use the tkagg backend (default: %(default)s)'
parser.add_argument("-d", action="store_true", default=True, help=help)
args = parser.parse_args()
result_dir = Path(args.resultdir)
if not result_dir.is_dir():
emsg = f"Invalid results directory: {result_dir}"
raise ValueError(emsg)
for args in step_over_diffs(result_dir):
METHOD_NAME(*args) |
6,608 | create next atlas entity | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import (
Any, Iterator, Optional, Union,
)
from amundsen_common.utils.atlas import AtlasCommonParams, AtlasDashboardTypes
from amundsen_rds.models import RDSModel
from amundsen_rds.models.dashboard import DashboardTimestamp as RDSDashboardTimestamp
from databuilder.models.atlas_entity import AtlasEntity
from databuilder.models.atlas_relationship import AtlasRelationship
from databuilder.models.atlas_serializable import AtlasSerializable
from databuilder.models.dashboard.dashboard_metadata import DashboardMetadata
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.models.table_serializable import TableSerializable
from databuilder.models.timestamp import timestamp_constants
from databuilder.serializers.atlas_serializer import get_entity_attrs
from databuilder.utils.atlas import AtlasSerializedEntityOperation
LOGGER = logging.getLogger(__name__)
class DashboardLastModifiedTimestamp(GraphSerializable, TableSerializable, AtlasSerializable):
"""
A model that encapsulate Dashboard's last modified timestamp in epoch
"""
DASHBOARD_LAST_MODIFIED_KEY_FORMAT = '{product}_dashboard://{cluster}.{dashboard_group_id}/' \
'{dashboard_id}/_last_modified_timestamp'
def __init__(self,
dashboard_group_id: Optional[str],
dashboard_id: Optional[str],
last_modified_timestamp: int,
product: Optional[str] = '',
cluster: str = 'gold',
**kwargs: Any
) -> None:
self._dashboard_group_id = dashboard_group_id
self._dashboard_id = dashboard_id
self._last_modified_timestamp = last_modified_timestamp
self._product = product
self._cluster = cluster
self._node_iterator = self._create_node_iterator()
self._relation_iterator = self._create_relation_iterator()
self._record_iterator = self._create_record_iterator()
self._atlas_entity_iterator = self._create_next_atlas_entity()
def create_next_node(self) -> Union[GraphNode, None]:
try:
return next(self._node_iterator)
except StopIteration:
return None
def _create_node_iterator(self) -> Iterator[GraphNode]:
node_attributes = {
timestamp_constants.TIMESTAMP_PROPERTY: self._last_modified_timestamp,
timestamp_constants.TIMESTAMP_NAME_PROPERTY: timestamp_constants.TimestampName.last_updated_timestamp.name
}
node = GraphNode(
key=self._get_last_modified_node_key(),
label=timestamp_constants.NODE_LABEL,
attributes=node_attributes
)
yield node
def create_next_relation(self) -> Union[GraphRelationship, None]:
try:
return next(self._relation_iterator)
except StopIteration:
return None
def _create_relation_iterator(self) -> Iterator[GraphRelationship]:
relationship = GraphRelationship(
start_key=DashboardMetadata.DASHBOARD_KEY_FORMAT.format(
product=self._product,
cluster=self._cluster,
dashboard_group=self._dashboard_group_id,
dashboard_name=self._dashboard_id
),
start_label=DashboardMetadata.DASHBOARD_NODE_LABEL,
end_key=self._get_last_modified_node_key(),
end_label=timestamp_constants.NODE_LABEL,
type=timestamp_constants.LASTUPDATED_RELATION_TYPE,
reverse_type=timestamp_constants.LASTUPDATED_REVERSE_RELATION_TYPE,
attributes={}
)
yield relationship
def METHOD_NAME(self) -> Union[AtlasEntity, None]:
try:
return next(self._atlas_entity_iterator)
except StopIteration:
return None
def _create_next_atlas_entity(self) -> Iterator[AtlasEntity]:
# last modified
attrs_mapping = [
(
AtlasCommonParams.qualified_name, DashboardMetadata.DASHBOARD_KEY_FORMAT.format(
product=self._product,
cluster=self._cluster,
dashboard_group=self._dashboard_group_id,
dashboard_name=self._dashboard_id
)
),
(AtlasCommonParams.last_modified_timestamp, self._last_modified_timestamp),
]
dashboard_entity_attrs = get_entity_attrs(attrs_mapping)
last_modified = AtlasEntity(
typeName=AtlasDashboardTypes.metadata,
operation=AtlasSerializedEntityOperation.UPDATE,
relationships=None,
attributes=dashboard_entity_attrs
)
yield last_modified
def create_next_atlas_relation(self) -> Union[AtlasRelationship, None]:
return None
def create_next_record(self) -> Union[RDSModel, None]:
try:
return next(self._record_iterator)
except StopIteration:
return None
def _create_record_iterator(self) -> Iterator[RDSModel]:
yield RDSDashboardTimestamp(
rk=self._get_last_modified_node_key(),
timestamp=self._last_modified_timestamp,
name=timestamp_constants.TimestampName.last_updated_timestamp.name,
dashboard_rk=DashboardMetadata.DASHBOARD_KEY_FORMAT.format(
product=self._product,
cluster=self._cluster,
dashboard_group=self._dashboard_group_id,
dashboard_name=self._dashboard_id
)
)
def _get_last_modified_node_key(self) -> str:
return DashboardLastModifiedTimestamp.DASHBOARD_LAST_MODIFIED_KEY_FORMAT.format(
product=self._product,
cluster=self._cluster,
dashboard_group_id=self._dashboard_group_id,
dashboard_id=self._dashboard_id,
)
def __repr__(self) -> str:
return f'DashboardLastModifiedTimestamp({self._dashboard_group_id!r}, {self._dashboard_id!r}, ' \
f'{self._last_modified_timestamp!r}, {self._product!r}, {self._cluster!r})' |
6,609 | nunique series | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ... import opcodes as OperandDef
from ...core import OutputType
from ...config import options
from ...serialization.serializables import BoolField
from ...utils import lazy_import
from ..arrays import ArrowListArray, ArrowListDtype
from .core import DataFrameReductionOperand, DataFrameReductionMixin, CustomReduction
cudf = lazy_import("cudf")
class NuniqueReduction(CustomReduction):
pre_with_agg = True
def __init__(
self, name="unique", axis=0, dropna=True, use_arrow_dtype=False, is_gpu=False
):
super().__init__(name, is_gpu=is_gpu)
self._axis = axis
self._dropna = dropna
self._use_arrow_dtype = use_arrow_dtype
@staticmethod
def _drop_duplicates_to_arrow(v, explode=False):
if explode:
v = v.explode()
try:
return ArrowListArray([v.drop_duplicates().to_numpy()])
except pa.ArrowInvalid:
# fallback due to diverse dtypes
return [v.drop_duplicates().to_list()]
def pre(self, in_data): # noqa: W0221 # pylint: disable=arguments-differ
xdf = cudf if self.is_gpu() else pd
if isinstance(in_data, xdf.Series):
unique_values = in_data.drop_duplicates()
return xdf.Series(unique_values, name=in_data.name)
else:
if self._axis == 0:
data = dict()
for d, v in in_data.iteritems():
if not self._use_arrow_dtype or xdf is cudf:
data[d] = [v.drop_duplicates().to_list()]
else:
data[d] = self._drop_duplicates_to_arrow(v)
df = xdf.DataFrame(data)
else:
df = xdf.DataFrame(columns=[0])
for d, v in in_data.iterrows():
if not self._use_arrow_dtype or xdf is cudf:
df.loc[d] = [v.drop_duplicates().to_list()]
else:
df.loc[d] = self._drop_duplicates_to_arrow(v)
return df
def agg(self, in_data): # noqa: W0221 # pylint: disable=arguments-differ
xdf = cudf if self.is_gpu() else pd
if isinstance(in_data, xdf.Series):
unique_values = in_data.explode().drop_duplicates()
return xdf.Series(unique_values, name=in_data.name)
else:
if self._axis == 0:
data = dict()
for d, v in in_data.iteritems():
if not self._use_arrow_dtype or xdf is cudf:
data[d] = [v.explode().drop_duplicates().to_list()]
else:
v = pd.Series(v.to_numpy())
data[d] = self._drop_duplicates_to_arrow(v, explode=True)
df = xdf.DataFrame(data)
else:
df = xdf.DataFrame(columns=[0])
for d, v in in_data.iterrows():
if not self._use_arrow_dtype or xdf is cudf:
df.loc[d] = [v.explode().drop_duplicates().to_list()]
else:
df.loc[d] = self._drop_duplicates_to_arrow(v, explode=True)
return df
def post(self, in_data): # noqa: W0221 # pylint: disable=arguments-differ
xdf = cudf if self.is_gpu() else pd
if isinstance(in_data, xdf.Series):
return in_data.explode().nunique(dropna=self._dropna)
else:
in_data_iter = (
in_data.iteritems() if self._axis == 0 else in_data.iterrows()
)
data = dict()
for d, v in in_data_iter:
if isinstance(v.dtype, ArrowListDtype):
v = xdf.Series(v.to_numpy())
data[d] = v.explode().nunique(dropna=self._dropna)
return xdf.Series(data)
class DataFrameNunique(DataFrameReductionOperand, DataFrameReductionMixin):
_op_type_ = OperandDef.NUNIQUE
_func_name = "nunique"
_dropna = BoolField("dropna")
_use_arrow_dtype = BoolField("use_arrow_dtype")
def __init__(self, dropna=None, use_arrow_dtype=None, **kw):
super().__init__(_dropna=dropna, _use_arrow_dtype=use_arrow_dtype, **kw)
@property
def dropna(self):
return self._dropna
@property
def use_arrow_dtype(self):
return self._use_arrow_dtype
@classmethod
def get_reduction_callable(cls, op):
return NuniqueReduction(
name=cls._func_name,
axis=op.axis,
dropna=op.dropna,
use_arrow_dtype=op.use_arrow_dtype,
is_gpu=op.is_gpu(),
)
def nunique_dataframe(df, axis=0, dropna=True, combine_size=None):
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
combine_size : int, optional
The number of chunks to combine.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> import mars.dataframe as md
>>> df = md.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique().execute()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1).execute()
0 1
1 2
2 2
dtype: int64
"""
op = DataFrameNunique(
axis=axis,
dropna=dropna,
combine_size=combine_size,
output_types=[OutputType.series],
use_arrow_dtype=options.dataframe.use_arrow_dtype,
)
return op(df)
def METHOD_NAME(series, dropna=True, combine_size=None):
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don't include NaN in the count.
combine_size : int, optional
The number of chunks to combine.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> import mars.dataframe as md
>>> s = md.Series([1, 3, 5, 7, 7])
>>> s.execute()
0 1
1 3
2 5
3 7
4 7
dtype: int64
>>> s.nunique().execute()
4
"""
op = DataFrameNunique(
dropna=dropna,
combine_size=combine_size,
output_types=[OutputType.scalar],
use_arrow_dtype=options.dataframe.use_arrow_dtype,
)
return op(series) |
6,610 | get raz cluster name | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import errno
import logging
import re
import sys
from hadoop import conf
from hadoop import confparse
from desktop.lib.paths import get_config_root_hadoop
if sys.version_info[0] > 2:
open_file = open
else:
open_file = file
__all = ['get_conf', 'get_trash_interval', 'get_s3a_access_key', 'get_s3a_secret_key']
LOG = logging.getLogger()
_CORE_SITE_PATH = None # Path to core-site.xml
_CORE_SITE_DICT = None # A dictionary of name/value config options
_CNF_TRASH_INTERVAL = 'fs.trash.interval'
_CNF_S3A_ACCESS_KEY = 'fs.s3a.access.key'
_CNF_S3A_SECRET_KEY = 'fs.s3a.secret.key'
_CNF_S3A_SESSION_TOKEN = 'fs.s3a.session.token'
_CNF_S3A_RAZ_API_URL = 'fs.s3a.ext.raz.rest.host.url'
_CNF_S3A_RAZ_CLUSTER_NAME = 'fs.s3a.ext.raz.s3.access.cluster.name'
_CNF_S3A_RAZ_BUCKET_ENDPOINT = 'fs.s3a.bucket.(?P<bucket>[^.]+).endpoint'
_CNF_ADLS_RAZ_API_URL = 'fs.azure.ext.raz.rest.host.url'
_CNF_ADLS_RAZ_CLUSTER_NAME = 'fs.azure.ext.raz.adls.access.cluster.name'
_CNF_DEFAULT_FS = 'fs.defaultFS'
_CNF_ADLS_CLIENT_ID = 'dfs.adls.oauth2.client.id'
_CNF_ADLS_AUTHENTICATION_CODE = 'dfs.adls.oauth2.credential'
_CNF_ADLS_REFRESH_URL = 'dfs.adls.oauth2.refresh.url'
_CNF_ADLS_GRANT_TYPE = 'dfs.adls.oauth2.access.token.provider.type'
_CNF_AZURE_CLIENT_ID = 'fs.azure.account.oauth2.client.id'
_CNF_AZURE_CLIENT_SECRET = 'fs.azure.account.oauth2.client.secret'
_CNF_AZURE_CLIENT_ENDPOINT = 'fs.azure.account.oauth2.client.endpoint'
_CNF_SECURITY = 'hadoop.security.authentication'
def reset():
"""Reset the cached conf"""
global _CORE_SITE_DICT
_CORE_SITE_DICT = None
def get_conf():
"""get_conf() -> ConfParse object for core-site.xml"""
if _CORE_SITE_DICT is None:
_parse_core_site()
return _CORE_SITE_DICT
def _parse_core_site():
"""
Parse core-site.xml and store in _CORE_SITE_DICT
"""
global _CORE_SITE_DICT
global _CORE_SITE_PATH
try:
_CORE_SITE_PATH = get_config_root_hadoop('core-site.xml')
data = open_file(_CORE_SITE_PATH, 'r').read()
except IOError as err:
if err.errno != errno.ENOENT:
LOG.error('Cannot read from "%s": %s' % (_CORE_SITE_PATH, err))
return
# Keep going and make an empty ConfParse
data = ""
_CORE_SITE_DICT = confparse.ConfParse(data)
def get_trash_interval():
"""
Get trash interval
Also indicates whether trash is enabled or not.
"""
return get_conf().get(_CNF_TRASH_INTERVAL, 0)
def get_s3a_access_key():
"""
Get S3A AWS access key ID
https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html
"""
return get_conf().get(_CNF_S3A_ACCESS_KEY)
def get_s3a_secret_key():
"""
Get S3A AWS secret key
https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html
"""
return get_conf().get(_CNF_S3A_SECRET_KEY)
def get_s3a_session_token():
return get_conf().get(_CNF_S3A_SESSION_TOKEN)
def get_raz_api_url():
"""
Get Raz API.
"""
s3a_raz_url = get_conf().get(_CNF_S3A_RAZ_API_URL)
adls_raz_url = get_conf().get(_CNF_ADLS_RAZ_API_URL)
if s3a_raz_url != adls_raz_url:
LOG.warning('Raz API: S3A and ADLS URLs are different')
return s3a_raz_url or adls_raz_url
def METHOD_NAME():
"""
Get the name of the Cluster where Raz is running.
"""
return get_conf().get(_CNF_S3A_RAZ_CLUSTER_NAME, '') or get_conf().get(_CNF_ADLS_RAZ_CLUSTER_NAME, '')
def get_raz_s3_default_bucket():
"""
Get the name of the default S3 bucket of Raz
"""
for key, val in get_conf().items():
match = re.search(_CNF_S3A_RAZ_BUCKET_ENDPOINT, key)
if match:
return {
'host': val,
'bucket': match.group('bucket')
}
def get_default_fs():
return get_conf().get(_CNF_DEFAULT_FS)
def get_adls_client_id():
"""
Get ADLS client id
https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html
"""
return get_conf().get(_CNF_ADLS_CLIENT_ID)
def get_adls_authentication_code():
"""
Get ADLS secret key
https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html
"""
return get_conf().get(_CNF_ADLS_AUTHENTICATION_CODE)
def get_adls_refresh_url():
"""
Get ADLS secret key
https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html
"""
return get_conf().get(_CNF_ADLS_REFRESH_URL)
def get_adls_grant_type():
"""
Get ADLS provider type
https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html
"""
return get_conf().get(_CNF_ADLS_GRANT_TYPE)
def is_kerberos_enabled():
return get_conf().get(_CNF_SECURITY) == 'kerberos'
def get_azure_client_id():
return get_conf().get(_CNF_AZURE_CLIENT_ID)
def get_azure_client_secret():
return get_conf().get(_CNF_AZURE_CLIENT_SECRET)
def get_azure_client_endpoint():
return get_conf().get(_CNF_AZURE_CLIENT_ENDPOINT) |
6,611 | test verb serializer with valid data | """Tests for the XAPIStatementSerializer serializer of the Marsha project."""
from django.test import TestCase
from marsha.core.serializers import (
ExtensionSerializer,
VerbSerializer,
XAPIStatementSerializer,
)
class VerbSerializerTest(TestCase):
"""Test the serializer validating a xAPI verb."""
def METHOD_NAME(self):
"""The VerbSerializer should be valid with valid data."""
data = {"id": "http://url.tld", "display": {"foo": "bar"}}
serializer = VerbSerializer(data=data)
self.assertTrue(serializer.is_valid())
def test_verb_serializer_with_missing_id_property(self):
"""Property id is mandatory. If missing it should fail."""
data = {"display": {"foo": "bar"}}
serializer = VerbSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertIn("id", serializer.errors)
error = serializer.errors.get("id").pop(0)
self.assertEqual(error.code, "required")
def test_verb_serializer_with_invalid_id_property(self):
"""Id property should be a valid URL."""
data = {"id": "foo", "display": {"foo": "bar"}}
serializer = VerbSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertIn("id", serializer.errors)
error = serializer.errors.get("id").pop(0)
self.assertEqual(error.code, "invalid")
def test_verb_serializer_with_missing_display_property(self):
"""Property display is mandatory. If missing it should fail."""
data = {"id": "http://url.tld"}
serializer = VerbSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertIn("display", serializer.errors)
error = serializer.errors.get("display").pop(0)
self.assertEqual(error.code, "required")
def test_verb_serializer_with_invalid_display_property(self):
"""Display property only accept Dictionary."""
data = {"id": "http://url.tld", "display": "foo"}
serializer = VerbSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertIn("display", serializer.errors)
error = serializer.errors.get("display").pop(0)
self.assertEqual(error.code, "not_a_dict")
class ExtensionSerializerTest(TestCase):
"""Test the serializer validating a xAPI context."""
def test_extension_serializer_with_valid_data(self):
"""The ContextSerializer should be valid with valid data."""
data = {"extensions": {"foo": "bar"}}
serializer = ExtensionSerializer(data=data)
self.assertTrue(serializer.is_valid())
def test_extension_serializer_with_no_extensions_property(self):
"""Property extensions is missing, the serializer must not be valid."""
data = {}
serializer = ExtensionSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertIn("extensions", serializer.errors)
error = serializer.errors.get("extensions").pop(0)
self.assertEqual(error.code, "required")
def test_extension_serializer_with_invalid_extensions_property(self):
"""Property extensions is invalid, the serializer must not be valid."""
data = {"extensions": "foo"}
serializer = ExtensionSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertIn("extensions", serializer.errors)
error = serializer.errors.get("extensions").pop(0)
self.assertEqual(error.code, "not_a_dict")
class XAPIStatementSerializersTest(TestCase):
"""Test the serializer validating a xAPI statement."""
def test_xapi_statement_serializer_without_result_and_id(self):
"""The XAPIStatementSerializer should be valid with only required data."""
data = {
"verb": {"id": "http://url.tld", "display": {"foo": "bar"}},
"context": {"extensions": {"foo": "bar"}},
}
serializer = XAPIStatementSerializer(data=data)
self.assertTrue(serializer.is_valid())
self.assertTrue("id" in serializer.validated_data)
self.assertFalse("result" in serializer.validated_data)
def test_xapi_statement_serializer_with_result_and_id(self):
"""The XAPIStatementSerializer should be valid with all possible data."""
data = {
"verb": {"id": "http://url.tld", "display": {"foo": "bar"}},
"context": {"extensions": {"foo": "bar"}},
"result": {"extensions": {"foo": "bar"}},
"id": "cc8868ac-d84b-4826-89df-a6171e9e3641",
}
serializer = XAPIStatementSerializer(data=data)
self.assertTrue(serializer.is_valid())
self.assertTrue("id" in serializer.validated_data)
self.assertEqual(
"cc8868ac-d84b-4826-89df-a6171e9e3641", serializer.validated_data["id"]
)
self.assertTrue("result" in serializer.validated_data)
def test_xapi_statement_serializer_with_missing_verb(self):
"""The XAPIStatementSerializer should fail when verb is missing."""
data = {"context": {"extensions": {"foo": "bar"}}}
serializer = XAPIStatementSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertIn("verb", serializer.errors)
error = serializer.errors.get("verb").pop(0)
self.assertEqual(error.code, "required")
def test_xapi_statement_serializer_with_missing_context(self):
"""The XAPIStatementSerializer should fail when context is missing."""
data = {"verb": {"id": "http://url.tld", "display": {"foo": "bar"}}}
serializer = XAPIStatementSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertIn("context", serializer.errors)
error = serializer.errors.get("context").pop(0)
self.assertEqual(error.code, "required")
def test_xapi_statement_serializer_with_extra_data(self):
"""The XAPIStatementSerializer should not be valid with extra data."""
data = {
"verb": {"id": "http://url.tld", "display": {"foo": "bar"}},
"context": {"extensions": {"foo": "bar"}},
"foo": "bar",
}
serializer = XAPIStatementSerializer(data=data)
self.assertFalse(serializer.is_valid()) |
6,612 | set layer data json | """This module contains function to save building element classes."""
import teaser.logic.utilities as utilities
import warnings
import collections
import json
def save_type_element(element, data_class):
"""Save information about building element to json.
Saves typical building elements according to their construction
year and their construction type in the json file for type building
elements. If the Project parent is set, it automatically saves it to
the file given in Project.data. Alternatively you can specify a path to
a file of TypeBuildingElements. If this file does not exist,
a new file is created.
Parameters
----------
element : BuildingElement()
Instance of BuildingElement or inherited Element of TEASER
data_class : DataClass()
DataClass containing the bindings for TypeBuildingElement and
Material (typically this is the data class stored in prj.data,
but the user can individually change that.
"""
data_class.element_bind["version"] = "0.7"
add_to_json = True
warning_text = (
"Construction Type and building age "
"group already exist in this json, consider revising "
"your inputs. The Element is NOT saved into json"
)
check_str = "{}_{}_{}".format(
type(element).__name__, element.building_age_group, element.construction_type
)
if check_str in data_class.element_bind.keys():
warnings.warn(warning_text)
add_to_json = False
return
if add_to_json is True:
data_class.element_bind[check_str] = collections.OrderedDict()
_set_basic_data_json(
element=element, wall_out=data_class.element_bind[check_str]
)
METHOD_NAME(
element=element, wall_out=data_class.element_bind[check_str]
)
with open(utilities.get_full_path(data_class.path_tb), "w") as file:
file.write(
json.dumps(data_class.element_bind, indent=4, separators=(",", ": "))
)
def delete_type_element(element, data_class):
"""Delete typical element in json.
Deletes typical building elements according to their construction
year and their construction type in the the json file for type building
elements. If the Project parent is set, it automatically saves it to
the file given in Project.data. Alternatively you can specify a path to
a file of TypeBuildingElements. If this file does not exist,
a new file is created.
Parameters
----------
element : BuildingElement()
Instance of BuildingElement or inherited Element of TEASER
data_class : DataClass()
DataClass containing the bindings for TypeBuildingElement and
Material (typically this is the data class stored in prj.data,
but the user can individually change that.
"""
check_str = "{}_{}_{}".format(
type(element).__name__, element.building_age_group, element.construction_type
)
del data_class.element_bind[check_str]
with open(utilities.get_full_path(data_class.path_tb), "w") as file:
file.write(
json.dumps(data_class.element_bind, indent=4, separators=(",", ": "))
)
def _set_basic_data_json(element, wall_out):
"""Set basic data of building element.
Helper function.
Parameters
----------
element : BuildingElement()
Instance of BuildingElement or inherited Element of TEASER
wall_out: dictionary
Dictionary with information about walls.
"""
wall_out["building_age_group"] = element.building_age_group
wall_out["construction_type"] = element.construction_type
wall_out["inner_radiation"] = element.inner_radiation
wall_out["inner_convection"] = element.inner_convection
if type(element).__name__ == "Window":
wall_out["outer_radiation"] = element.outer_radiation
wall_out["outer_convection"] = element.outer_convection
wall_out["g_value"] = element.g_value
wall_out["a_conv"] = element.a_conv
wall_out["shading_g_total"] = element.shading_g_total
wall_out["shading_max_irr"] = element.shading_max_irr
elif (
type(element).__name__ == "OuterWall"
or type(element).__name__ == "Rooftop"
or type(element).__name__ == "Door"
):
wall_out["outer_radiation"] = element.outer_radiation
wall_out["outer_convection"] = element.outer_convection
def METHOD_NAME(element, wall_out):
"""Set layer data of building element.
Helper function.
Parameters
----------
element : BuildingElement()
Instance of BuildingElement or inherited Element of TEASER
wall_out: dictionary
Dictionary with information about walls.
"""
layer_dict = collections.OrderedDict()
for layer in element.layer:
layer_dict[layer.id] = collections.OrderedDict()
layer_dict[layer.id]["thickness"] = layer.thickness
layer_dict[layer.id]["material"] = collections.OrderedDict()
layer_dict[layer.id]["material"]["name"] = layer.material.name
layer_dict[layer.id]["material"]["material_id"] = layer.material.material_id
wall_out["layer"] = layer_dict |
6,613 | get | # coding=utf-8
"""Request handler for series and episodes."""
from __future__ import unicode_literals
import logging
from os.path import basename
from medusa import db
from medusa.common import DOWNLOADED, FAILED, SNATCHED, SUBTITLED, statusStrings
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.generic_provider import GenericProvider
from medusa.server.api.v2.base import BaseRequestHandler
from medusa.server.api.v2.history import HistoryHandler
from medusa.tv.episode import Episode, EpisodeNumber
from medusa.tv.series import Series, SeriesIdentifier
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class EpisodeHistoryHandler(BaseRequestHandler):
"""Episode history request handler."""
#: parent resource handler
parent_handler = HistoryHandler
#: resource name
name = 'episode'
#: identifier
identifier = ('episode_slug', r'[\w-]+')
#: path param
path_param = ('path_param', r'\w+')
#: allowed HTTP methods
allowed_methods = ('GET',)
def METHOD_NAME(self, series_slug, episode_slug, path_param):
"""Query episode's history information.
:param series_slug: series slug. E.g.: tvdb1234
:param episode_slug: episode slug. E.g.: s01e01
:param path_param:
"""
series_identifier = SeriesIdentifier.from_slug(series_slug)
if not series_identifier:
return self._bad_request('Invalid series slug')
series = Series.find_by_identifier(series_identifier)
if not series:
return self._not_found('Series not found')
if not episode_slug:
return self._bad_request('Invalid episode slug')
episode_number = EpisodeNumber.from_slug(episode_slug)
if not episode_number:
return self._not_found('Invalid episode number')
episode = Episode.find_by_series_and_episode(series, episode_number)
if not episode:
return self._not_found('Episode not found')
sql_base = """
SELECT rowid, date, action, quality,
provider, version, resource, size, proper_tags,
indexer_id, showid, season, episode, manually_searched, info_hash
FROM history
WHERE showid = ? AND indexer_id = ? AND season = ? AND episode = ?
"""
params = [series.series_id, series.indexer, episode.season, episode.episode]
sql_base += ' ORDER BY date DESC'
results = db.DBConnection().select(sql_base, params)
def data_generator():
"""Read history data and normalize key/value pairs."""
for item in results:
provider = {}
release_group = None
release_name = None
file_name = None
subtitle_language = None
if item['action'] in (SNATCHED, FAILED):
provider.update({
'id': GenericProvider.make_id(item['provider']),
'name': item['provider']
})
release_name = item['resource']
if item['action'] == DOWNLOADED:
release_group = item['provider']
file_name = item['resource']
if item['action'] == SUBTITLED:
subtitle_language = item['resource']
provider.update({
'id': item['provider'],
'name': item['provider']
})
if item['action'] == SUBTITLED:
subtitle_language = item['resource']
yield {
'id': item['rowid'],
'series': SeriesIdentifier.from_id(item['indexer_id'], item['showid']).slug,
'status': item['action'],
'statusName': statusStrings.METHOD_NAME(item['action']),
'actionDate': item['date'],
'quality': item['quality'],
'resource': basename(item['resource']),
'size': item['size'],
'properTags': item['proper_tags'],
'season': item['season'],
'episode': item['episode'],
'manuallySearched': bool(item['manually_searched']),
'infoHash': item['info_hash'],
'provider': provider,
'release_name': release_name,
'releaseGroup': release_group,
'fileName': file_name,
'subtitleLanguage': subtitle_language
}
if not results:
return self._not_found('History data not found for show {show} and episode {episode}'.format(
show=series.identifier.slug, episode=episode.slug
))
return self._ok(data=list(data_generator())) |
6,614 | test suite | from __future__ import annotations
import json
import os
from os import chdir, environ, getcwd
from os import path as p
from typing import Tuple
import pytest
from rdflib.term import URIRef
from . import runner
TC_BASE = "https://w3c.github.io/json-ld-api/tests/toRdf/"
testsuite_dir = p.join(p.abspath(p.dirname(__file__)), "1.1")
unsupported_tests: Tuple[str, ...] = ("frame", "normalize")
unsupported_tests += (
"error",
"remote",
)
unsupported_tests += ("flatten", "compact", "expand")
unsupported_tests += ("html",)
unsupported_tests += ("fromRdf",) # The JSON-LD 1.1 enhancement applies to parsing only
known_bugs: Tuple[str, ...] = (
# TODO: Literal doesn't preserve representations
"fromRdf/0002-in",
# RDflib does not print Integer with scientific notation
"toRdf/0035-in",
# TODO: "http:g" should serialize to "http:g", not "//g"
"toRdf/0120-in",
"toRdf/0121-in",
"toRdf/0122-in",
"toRdf/0123-in",
"toRdf/0124-in",
"toRdf/0125-in",
"toRdf/0126-in",
# TODO: RDFLib collapses http://ab//de to http://ab/de
"toRdf/0128-in",
# TODO: RDFLib does not allow arbitrary "urn:ex:s307" as a URI in predicate place
"toRdf/0130-in",
"toRdf/0131-in",
"toRdf/0132-in",
# TODO: Odd context lookup bug with scoped context (v1.1 bug)
"toRdf/c013-in",
# Type with @context of null should fall back to @vocab (I think), not baseuri
"toRdf/c014-in",
# <http://example/typed-base#subject-reference-id> != <http://example/base-base#subject-reference-id>
"toRdf/c015-in",
# context null clears vocab from parent context?
"toRdf/c018-in",
# TODO: Bug with resolving relative context url from top-level context which is not doc_root
"toRdf/c031-in",
# TODO: Nested Contexts don't quite work properly yet
"toRdf/c037-in",
"toRdf/c038-in",
# TODO: @direction doesn't quite work properly in this implementation
"toRdf/di09-in",
"toRdf/di10-in",
"toRdf/di11-in",
"toRdf/di12-in",
# TODO: empty list inside a list is represented wrong?
"toRdf/e004-in",
# Same problem as 0002-in
"toRdf/e061-in",
# Trying to use BNode as predicate, RDFLIB doesn't support
"toRdf/e075-in",
# @id and @vocab in literal datatype expansion doesn't work
"toRdf/e088-in",
# TODO: relative-iri keeps . on end of IRI?
"toRdf/e076-in",
"toRdf/e089-in",
"toRdf/e090-in",
"toRdf/e091-in",
"toRdf/e110-in",
"toRdf/e129-in",
"toRdf/e130-in",
# TODO: Just broken expansion...
"toRdf/e080-in",
"toRdf/e092-in",
"toRdf/e093-in",
"toRdf/e094-in",
"toRdf/e104-in",
"toRdf/e108-in",
# TODO: Odd result in list expansion
"toRdf/e105-in",
"toRdf/e107-in",
# no expandContent option?
"toRdf/e077-in",
# TODO: Investigate:
"toRdf/e111-in",
"toRdf/e112-in",
"toRdf/e119-in",
"toRdf/e120-in",
"toRdf/e122-in",
# RDFLib cannot keep a colon on the end of a prefix uri
"toRdf/e117-in",
"toRdf/e118-in",
# <ex:ns/> doesn't expand to <http://example.org/ns/>
"toRdf/e124-in",
# Similar to above?
"toRdf/e125-in",
# Recursive Inclusion triggered!
"toRdf/e128-in",
# JSON-native double representation
"toRdf/js04-in",
"toRdf/js10-in",
# JSON character escaping
"toRdf/js12-in",
"toRdf/js13-in",
# Broken list comprehension
"toRdf/li05-in",
"toRdf/li06-in",
"toRdf/li07-in",
"toRdf/li08-in",
"toRdf/li09-in",
"toRdf/li10-in",
"toRdf/li11-in",
"toRdf/li14-in",
# Bad URI?
"toRdf/li12-in",
# cannot use property-index to add property to graph object?
"toRdf/pi11-in",
"toRdf/pr25-in",
# Investigate property issues:
"toRdf/pr38-in",
"toRdf/pr39-in",
"toRdf/pr40-in",
# Negative zero representation?
"toRdf/rt01-in",
# Property scope with @propagate not working
"toRdf/so06-in",
# Expand string as value gives wrong number representation
"toRdf/tn02-in",
# TODO: Rdflib should silently reject bad predicate URIs
"toRdf/wf02-in",
# TODO: we don't extract context or json-ld that's embedded in HTML
"remote-doc/0013-in",
"remote-doc/la01-in",
"remote-doc/la02-in",
"remote-doc/la03-in",
"remote-doc/la04-in",
"remote-doc/la05-in",
)
if os.name == "nt":
# nquad parser does not correctly handle unnormalized unicode on windows.
known_bugs += ("toRdf/js11-in",)
TC_BASE = "https://w3c.github.io/json-ld-api/tests/"
allow_lists_of_lists = True
SKIP_KNOWN_BUGS = True
SKIP_1_0_TESTS = True
testsuite_dir = environ.get("JSONLD_TESTSUITE") or p.join(
p.abspath(p.dirname(__file__)), "1.1"
)
test_dir = p.join(testsuite_dir, "tests")
if not p.isdir(test_dir): # layout of 1.1 testsuite
test_dir = testsuite_dir
else:
TC_BASE = "http://json-ld.org/test-suite/tests/"
allow_lists_of_lists = False
def read_manifest(skiptests):
f = open(p.join(testsuite_dir, "manifest.jsonld"), "r")
manifestdata = json.load(f)
f.close()
# context = manifestdata.get('context')
for m in manifestdata.get("sequence"):
if any(token in m for token in skiptests):
continue
f = open(p.join(testsuite_dir, m), "r")
md = json.load(f)
f.close()
for test in md.get("sequence"):
parts = test.get("input", "").split(".")[0]
cat_num, direction = parts.rsplit("-", 1)
category, testnum = (
cat_num.split("/") if "/" in cat_num else cat_num.split("-")
)
if (
test.get("input", "").split(".")[0] in skiptests
or category in skiptests
):
pass
else:
inputpath = test.get("input")
expectedpath = test.get("expect")
expected_error = test.get("expect") # TODO: verify error # noqa: F841
context = test.get("context", False)
options = test.get("option") or {}
if expectedpath:
yield category, testnum, inputpath, expectedpath, context, options
def get_test_suite_cases():
skiptests = unsupported_tests
if SKIP_KNOWN_BUGS:
skiptests += known_bugs
for cat, num, inputpath, expectedpath, context, options in read_manifest(skiptests):
if options:
if (
SKIP_1_0_TESTS
and "specVersion" in options
and str(options["specVersion"]).lower() == "json-ld-1.0"
):
# Skip the JSON v1.0 tests
continue
if inputpath.endswith((".jldt", ".json", ".jsonld")): # toRdf
if expectedpath.endswith(".jsonld"): # compact/expand/flatten
func = runner.do_test_json
else: # toRdf
func = runner.do_test_parser
else: # fromRdf
func = runner.do_test_serializer
rdf_test_uri = URIRef("{0}{1}-manifest#t{2}".format(TC_BASE, cat, num))
yield rdf_test_uri, func, TC_BASE, cat, num, inputpath, expectedpath, context, options
@pytest.fixture(scope="module", autouse=True)
def global_state():
old_cwd = getcwd()
chdir(test_dir)
yield
chdir(old_cwd)
@pytest.mark.webtest
# TODO: apply webtest marker to individual tests
# Marking this whole function as webtest is too broad, as many tests don't
# require the web, but making it narrower requires more refactoring.
@pytest.mark.parametrize(
"rdf_test_uri, func, suite_base, cat, num, inputpath, expectedpath, context, options",
get_test_suite_cases(),
)
def METHOD_NAME(
rdf_test_uri: URIRef,
func,
suite_base,
cat,
num,
inputpath,
expectedpath,
context,
options,
):
func(suite_base, cat, num, inputpath, expectedpath, context, options) |
6,615 | cyclic support get expanded element ids | import ctypes
from ansys.dpf.gate import utils
from ansys.dpf.gate import errors
from ansys.dpf.gate.generated import capi
from ansys.dpf.gate.generated import cyclic_support_abstract_api
from ansys.dpf.gate.generated.data_processing_capi import DataProcessingCAPI
#-------------------------------------------------------------------------------
# CyclicSupport
#-------------------------------------------------------------------------------
class CyclicSupportCAPI(cyclic_support_abstract_api.CyclicSupportAbstractAPI):
@staticmethod
def init_cyclic_support_environment(object):
# get core api
DataProcessingCAPI.init_data_processing_environment(object)
object._deleter_func = (DataProcessingCAPI.data_processing_delete_shared_object, lambda obj: obj)
@staticmethod
def cyclic_support_delete(support):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.CyclicSupport_delete(support._internal_obj if support is not None else None, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def cyclic_support_get_num_sectors(support, istage):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.CyclicSupport_getNumSectors(support._internal_obj if support is not None else None, utils.to_int32(istage), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def cyclic_support_get_num_stages(support):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.CyclicSupport_getNumStages(support._internal_obj if support is not None else None, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def cyclic_support_get_sectors_scoping(support, istage):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.CyclicSupport_getSectorsScoping(support._internal_obj if support is not None else None, utils.to_int32(istage), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def cyclic_support_get_cyclic_phase(support):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.CyclicSupport_getCyclicPhase(support._internal_obj if support is not None else None, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def cyclic_support_get_base_nodes_scoping(support, istage):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.CyclicSupport_getBaseNodesScoping(support._internal_obj if support is not None else None, utils.to_int32(istage), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def cyclic_support_get_base_elements_scoping(support, istage):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.CyclicSupport_getBaseElementsScoping(support._internal_obj if support is not None else None, utils.to_int32(istage), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def cyclic_support_get_expanded_node_ids(support, baseNodeId, istage, sectorsScoping):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.CyclicSupport_getExpandedNodeIds(support._internal_obj if support is not None else None, utils.to_int32(baseNodeId), utils.to_int32(istage), sectorsScoping._internal_obj if sectorsScoping is not None else None, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def METHOD_NAME(support, baseElementId, istage, sectorsScoping):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.CyclicSupport_getExpandedElementIds(support._internal_obj if support is not None else None, utils.to_int32(baseElementId), utils.to_int32(istage), sectorsScoping._internal_obj if sectorsScoping is not None else None, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def cyclic_support_get_cs(support):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.CyclicSupport_getCS(support._internal_obj if support is not None else None, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def cyclic_support_get_low_high_map(support, istage):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.CyclicSupport_getLowHighMap(support._internal_obj if support is not None else None, utils.to_int32(istage), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def cyclic_support_get_high_low_map(support, istage):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.CyclicSupport_getHighLowMap(support._internal_obj if support is not None else None, utils.to_int32(istage), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
|
6,616 | clean data | import json
from typing import List, cast
import pandas as pd
from owid.catalog import Dataset, Table
from owid.catalog.utils import underscore_table
from structlog import get_logger
from etl.data_helpers import geo
from etl.helpers import PathFinder
from etl.paths import DATA_DIR
log = get_logger()
# naming conventions
paths = PathFinder(__file__)
def run(dest_dir: str) -> None:
log.info("unodc.start")
# read dataset from meadow
ds_meadow = Dataset(DATA_DIR / "meadow/homicide/2023-01-04/unodc")
tb_meadow = ds_meadow["unodc"]
df = pd.DataFrame(tb_meadow)
log.info("unodc.exclude_countries")
df = exclude_countries(df)
log.info("unodc.harmonize_countries")
df = harmonize_countries(df)
df = METHOD_NAME(df)
# create new dataset with the same metadata as meadow
ds_garden = Dataset.create_empty(dest_dir, metadata=ds_meadow.metadata)
# create new table with the same metadata as meadow and add it to dataset
tb_garden = underscore_table(Table(df, short_name=tb_meadow.metadata.short_name))
ds_garden.add(tb_garden)
# update metadata from yaml file
ds_garden.update_metadata(paths.metadata_path)
ds_garden.save()
log.info("unodc.end")
def load_excluded_countries() -> List[str]:
with open(paths.excluded_countries_path, "r") as f:
data = json.load(f)
assert isinstance(data, list)
return data
def exclude_countries(df: pd.DataFrame) -> pd.DataFrame:
excluded_countries = load_excluded_countries()
return cast(pd.DataFrame, df.loc[~df.country.isin(excluded_countries)])
def harmonize_countries(df: pd.DataFrame) -> pd.DataFrame:
unharmonized_countries = df["country"]
df = geo.harmonize_countries(df=df, countries_file=str(paths.country_mapping_path))
missing_countries = set(unharmonized_countries[df.country.isnull()])
if any(missing_countries):
raise RuntimeError(
"The following raw country names have not been harmonized. "
f"Please: (a) edit {paths.country_mapping_path} to include these country "
f"names; or (b) add them to {paths.excluded_countries_path}."
f"Raw country names: {missing_countries}"
)
return df
def METHOD_NAME(df: pd.DataFrame) -> pd.DataFrame:
df = df.copy(deep=True)
# Splitting the data into that which has the totals and that which is disaggregated by mechanism
df_mech = df[df["dimension"] == "by mechanisms"]
df_mech = create_mechanism_df(df_mech)
df_tot = df[df["dimension"] == "Total"]
df_tot = create_total_df(df_tot)
df = pd.merge(df_mech, df_tot, how="outer", on=["country", "year"])
# Reconciling the variable names with previous aggregated version
df = df.rename(
columns={
"Both sexes_All ages_Rate per 100,000 population": "Rate per 100,000 population",
"Both sexes_All ages_Counts": "Counts",
}
)
return df
def pivot_and_format_df(df, drop_columns, pivot_index, pivot_values, pivot_columns):
"""
- Dropping a selection of columns
- Pivoting by the desired disaggregations e.g. category, unit of measurement
- Tidying the column names
"""
df = df.drop(columns=drop_columns)
df = df.pivot(index=pivot_index, values=pivot_values, columns=pivot_columns)
# Make the columns nice
df.columns = df.columns.droplevel(0)
df.columns = df.columns.map("_".join)
df = df.reset_index()
return df
def create_total_df(df_tot: pd.DataFrame) -> pd.DataFrame:
"""
Create the total homicides dataframe where we will have total homicides/homicide rate
disaggregated by age and sex
"""
# To escape the dataframe slice warnings
df_tot = df_tot.copy(deep=True)
# There are some duplicates when sex is unknown so let's remove those rows
df_tot = df_tot[df_tot["sex"] != "Unknown"]
# Make it more obvious what total age and total sex means
df_tot["age"] = df_tot["age"].map({"Total": "All ages"}, na_action="ignore").fillna(df_tot["age"])
df_tot["sex"] = df_tot["sex"].map({"Total": "Both sexes"}, na_action="ignore").fillna(df_tot["sex"])
df_tot = pivot_and_format_df(
df_tot,
drop_columns=["region", "subregion", "indicator", "dimension", "category", "source"],
pivot_index=["country", "year"],
pivot_values=["value"],
pivot_columns=["sex", "age", "unit_of_measurement"],
)
return df_tot
def create_mechanism_df(df_mech: pd.DataFrame) -> pd.DataFrame:
"""
Create the homicides by mechanism dataframe where we will have homicides/homicide rate
disaggregated by mechanism (e.g. weapon)
"""
# df_mech = df_mech.drop(columns=["region", "subregion", "indicator", "dimension", "source", "sex", "age"])
df_mech = df_mech.copy(deep=True)
df_mech["category"] = (
df_mech["category"]
.map({"Firearms or explosives - firearms": "Firearms", "Another weapon - sharp object": "Sharp object"})
.fillna(df_mech["category"])
)
# Make the table wider so we have a column for each mechanism
df_mech = pivot_and_format_df(
df_mech,
drop_columns=["region", "subregion", "indicator", "dimension", "source", "sex", "age"],
pivot_index=["country", "year"],
pivot_values=["value"],
pivot_columns=["category", "unit_of_measurement"],
)
return df_mech |
6,617 | test get columns | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import object
import json
import os
import uuid
from django.urls import reverse
from nose.tools import assert_true, assert_equal
from desktop.lib.django_test_util import make_logged_in_client
from librdbms import conf as rdbms_conf
from librdbms.server import dbms
class MockRdbms(object):
def get_databases(self):
return ['db1', 'db2']
def get_tables(self, database):
return ['table1', 'table2']
class TestMockedRdbms(object):
def setUp(self):
self.client = make_logged_in_client()
# Mock DB calls as we don't need the real ones
self.prev_dbms = dbms.get
dbms.get = lambda a, b: MockRdbms()
def tearDown(self):
# Remove monkey patching
dbms.get = self.prev_dbms
def test_basic_flow(self):
response = self.client.get("/rdbms/")
assert_true(b'DB Query' in response.content, response.content)
def test_config_error(self):
self.finish = rdbms_conf.DATABASES.set_for_testing({})
response = self.client.get("/rdbms/")
assert_true(b'There are currently no databases configured.' in response.content)
response = self.client.get("/rdbms/execute/")
assert_true(b'There are currently no databases configured.' in response.content)
self.finish()
class TestSQLiteRdbmsBase(object):
@classmethod
def setup_class(cls):
cls.database = '/tmp/%s.db' % uuid.uuid4()
cls.prefillDatabase()
@classmethod
def teardown_class(cls):
os.remove(cls.database)
def setUp(self):
self.client = make_logged_in_client()
self.finish = rdbms_conf.DATABASES.set_for_testing({
'sqlitee': {
'name': self.database,
'engine': 'sqlite'
}
})
def tearDown(self):
self.finish()
@classmethod
def prefillDatabase(cls):
import sqlite3
connection = sqlite3.connect(cls.database)
connection.execute("CREATE TABLE test1 (date text, trans text, symbol text, qty real, price real)")
connection.execute("INSERT INTO test1 VALUES ('2006-01-05','BUY','RHAT',100,35.14)")
connection.commit()
connection.close()
class TestAPI(TestSQLiteRdbmsBase):
def test_get_servers(self):
response = self.client.get(reverse('rdbms:api_servers'))
response_dict = json.loads(response.content)
assert_true('sqlitee' in response_dict['servers'], response_dict)
def test_get_databases(self):
response = self.client.get(reverse('rdbms:api_databases', args=['sqlitee']))
response_dict = json.loads(response.content)
assert_true(self.database in response_dict['databases'], response_dict)
def test_get_tables(self):
response = self.client.get(reverse('rdbms:api_tables', args=['sqlitee', self.database]))
response_dict = json.loads(response.content)
assert_true('test1' in response_dict['tables'], response_dict)
def METHOD_NAME(self):
response = self.client.get(reverse('rdbms:api_columns', args=['sqlitee', self.database, 'test1']))
response_dict = json.loads(response.content)
assert_true('date' in response_dict['columns'], response_dict)
assert_true('trans' in response_dict['columns'], response_dict)
assert_true('symbol' in response_dict['columns'], response_dict)
assert_true('qty' in response_dict['columns'], response_dict)
assert_true('price' in response_dict['columns'], response_dict)
def test_execute_query(self):
data = {
'server': 'sqlitee',
'database': self.database,
'query': 'SELECT * FROM test1'
}
response = self.client.post(reverse('rdbms:api_execute_query'), data, follow=True)
import traceback
for tb in traceback.extract_stack():
print(tb)
response_dict = json.loads(response.content)
assert_equal(1, len(response_dict['results']['rows']), response_dict)
def test_explain_query(self):
data = {
'server': 'sqlitee',
'database': self.database,
'query': 'SELECT * FROM test1'
}
response = self.client.post(reverse('rdbms:api_explain_query'), data, follow=True)
response_dict = json.loads(response.content)
assert_true(len(response_dict['results']['rows']) > 0, response_dict)
def test_options(self):
finish = rdbms_conf.DATABASES['sqlitee'].OPTIONS.set_for_testing({'nonsensical': None})
try:
self.client.get(reverse('rdbms:api_tables', args=['sqlitee', self.database]))
except TypeError as e:
assert_true('nonsensical' in str(e), e)
finish() |
6,618 | system data | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSkusNestedResourceTypeFirstResult',
'AwaitableGetSkusNestedResourceTypeFirstResult',
'get_skus_nested_resource_type_first',
'get_skus_nested_resource_type_first_output',
]
@pulumi.output_type
class GetSkusNestedResourceTypeFirstResult:
def __init__(__self__, id=None, name=None, properties=None, METHOD_NAME=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", METHOD_NAME)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.SkuResourceResponseProperties':
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSkusNestedResourceTypeFirstResult(GetSkusNestedResourceTypeFirstResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSkusNestedResourceTypeFirstResult(
id=self.id,
name=self.name,
properties=self.properties,
METHOD_NAME=self.METHOD_NAME,
type=self.type)
def get_skus_nested_resource_type_first(nested_resource_type_first: Optional[str] = None,
provider_namespace: Optional[str] = None,
resource_type: Optional[str] = None,
sku: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSkusNestedResourceTypeFirstResult:
"""
Gets the sku details for the given resource type and sku name.
:param str nested_resource_type_first: The first child resource type.
:param str provider_namespace: The name of the resource provider hosted within ProviderHub.
:param str resource_type: The resource type.
:param str sku: The SKU.
"""
__args__ = dict()
__args__['nestedResourceTypeFirst'] = nested_resource_type_first
__args__['providerNamespace'] = provider_namespace
__args__['resourceType'] = resource_type
__args__['sku'] = sku
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:providerhub/v20210901preview:getSkusNestedResourceTypeFirst', __args__, opts=opts, typ=GetSkusNestedResourceTypeFirstResult).value
return AwaitableGetSkusNestedResourceTypeFirstResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
METHOD_NAME=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_skus_nested_resource_type_first)
def get_skus_nested_resource_type_first_output(nested_resource_type_first: Optional[pulumi.Input[str]] = None,
provider_namespace: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSkusNestedResourceTypeFirstResult]:
"""
Gets the sku details for the given resource type and sku name.
:param str nested_resource_type_first: The first child resource type.
:param str provider_namespace: The name of the resource provider hosted within ProviderHub.
:param str resource_type: The resource type.
:param str sku: The SKU.
"""
... |
6,619 | fuse modules | # Copyright (c) Alibaba, Inc. and its affiliates.
import os
import sys
import tempfile
from typing import Dict, Optional, Tuple
import torch
import torch.nn as nn
from modelscope.metainfo import Models
from modelscope.models import TorchModel
from modelscope.models.base import Tensor
from modelscope.models.builder import MODELS
from modelscope.utils.audio.audio_utils import update_conf
from modelscope.utils.constant import Tasks
from .cmvn import GlobalCMVN, load_kaldi_cmvn
from .fsmn import FSMN
@MODELS.register_module(
Tasks.keyword_spotting,
module_name=Models.speech_kws_fsmn_char_ctc_nearfield)
class FSMNDecorator(TorchModel):
r""" A decorator of FSMN for integrating into modelscope framework """
def __init__(self,
model_dir: str,
cmvn_file: str = None,
backbone: dict = None,
input_dim: int = 400,
output_dim: int = 2599,
training: Optional[bool] = False,
*args,
**kwargs):
"""initialize the fsmn model from the `model_dir` path.
Args:
model_dir (str): the model path.
cmvn_file (str): cmvn file
backbone (dict): params related to backbone
input_dim (int): input dimension of network
output_dim (int): output dimension of network
training (bool): training or inference mode
"""
super().__init__(model_dir, *args, **kwargs)
self.model = None
self.model_cfg = None
if training:
self.model = self.init_model(cmvn_file, backbone, input_dim,
output_dim)
else:
self.model_cfg = {
'model_workspace': model_dir,
'config_path': os.path.join(model_dir, 'config.yaml')
}
def __del__(self):
if hasattr(self, 'tmp_dir'):
self.tmp_dir.cleanup()
def forward(self, input) -> Dict[str, Tensor]:
"""
Args:
input (torch.Tensor): Input tensor (B, T, D)
"""
if self.model is not None and input is not None:
return self.model.forward(input)
else:
return self.model_cfg
def init_model(self, cmvn_file, backbone, input_dim, output_dim):
if cmvn_file is not None:
mean, istd = load_kaldi_cmvn(cmvn_file)
global_cmvn = GlobalCMVN(
torch.from_numpy(mean).float(),
torch.from_numpy(istd).float(),
)
else:
global_cmvn = None
hidden_dim = 128
preprocessing = None
input_affine_dim = backbone['input_affine_dim']
num_layers = backbone['num_layers']
linear_dim = backbone['linear_dim']
proj_dim = backbone['proj_dim']
left_order = backbone['left_order']
right_order = backbone['right_order']
left_stride = backbone['left_stride']
right_stride = backbone['right_stride']
output_affine_dim = backbone['output_affine_dim']
backbone = FSMN(input_dim, input_affine_dim, num_layers, linear_dim,
proj_dim, left_order, right_order, left_stride,
right_stride, output_affine_dim, output_dim)
classifier = None
activation = None
kws_model = KWSModel(input_dim, output_dim, hidden_dim, global_cmvn,
preprocessing, backbone, classifier, activation)
return kws_model
class KWSModel(nn.Module):
"""Our model consists of four parts:
1. global_cmvn: Optional, (idim, idim)
2. preprocessing: feature dimension projection, (idim, hdim)
3. backbone: backbone or feature extractor of the whole network, (hdim, hdim)
4. classifier: output layer or classifier of KWS model, (hdim, odim)
5. activation:
nn.Sigmoid for wakeup word
nn.Identity for speech command dataset
"""
def __init__(
self,
idim: int,
odim: int,
hdim: int,
global_cmvn: Optional[nn.Module],
preprocessing: Optional[nn.Module],
backbone: nn.Module,
classifier: nn.Module,
activation: nn.Module,
):
"""
Args:
idim (int): input dimension of network
odim (int): output dimension of network
hdim (int): hidden dimension of network
global_cmvn (nn.Module): cmvn for input feature, (idim, idim)
preprocessing (nn.Module): feature dimension projection, (idim, hdim)
backbone (nn.Module): backbone or feature extractor of the whole network, (hdim, hdim)
classifier (nn.Module): output layer or classifier of KWS model, (hdim, odim)
activation (nn.Module): nn.Identity for training, nn.Sigmoid for inference
"""
super().__init__()
self.idim = idim
self.odim = odim
self.hdim = hdim
self.global_cmvn = global_cmvn
self.preprocessing = preprocessing
self.backbone = backbone
self.classifier = classifier
self.activation = activation
def to_kaldi_net(self):
return self.backbone.to_kaldi_net()
def to_pytorch_net(self, kaldi_file):
return self.backbone.to_pytorch_net(kaldi_file)
def forward(
self,
x: torch.Tensor,
in_cache: torch.Tensor = torch.zeros(0, 0, 0, dtype=torch.float)
) -> Tuple[torch.Tensor, torch.Tensor]:
if self.global_cmvn is not None:
x = self.global_cmvn(x)
if self.preprocessing is not None:
x = self.preprocessing(x)
x, out_cache = self.backbone(x, in_cache)
if self.classifier is not None:
x = self.classifier(x)
if self.activation is not None:
x = self.activation(x)
return x, out_cache
def METHOD_NAME(self):
if self.preprocessing is not None:
self.preprocessing.METHOD_NAME()
self.backbone.METHOD_NAME() |
6,620 | embed input | '''build label embedding model
'''
import math
import pgl
import paddle.fluid as F
import paddle.fluid.layers as L
from pgl.utils import paddle_helper
from module.model_unimp import graph_transformer, linear, attn_appnp
class Arxiv_baseline_model():
def __init__(self, gw, hidden_size, num_heads, dropout, num_layers):
'''Arxiv_baseline_model
'''
self.gw=gw
self.hidden_size=hidden_size
self.num_heads= num_heads
self.dropout= dropout
self.num_layers=num_layers
self.out_size=40
self.embed_size=128
self.checkpoints=[]
self.build_model()
def METHOD_NAME(self, feature):
lay_norm_attr = F.ParamAttr(initializer=F.initializer.ConstantInitializer(value=1))
lay_norm_bias = F.ParamAttr(initializer=F.initializer.ConstantInitializer(value=0))
feature = L.layer_norm(feature, name='layer_norm_feature_input',
param_attr=lay_norm_attr,
bias_attr=lay_norm_bias,
scale=False,
shift=False)
return feature
def build_model(self):
feature_batch = self.METHOD_NAME(self.gw.node_feat['feat'])
feature_batch = L.dropout(feature_batch, dropout_prob=self.dropout,
dropout_implementation='upscale_in_train')
for i in range(self.num_layers - 1):
feature_batch = graph_transformer(str(i), self.gw, feature_batch,
hidden_size=self.hidden_size,
num_heads=self.num_heads,
concat=True, skip_feat=True,
layer_norm=True, relu=True, gate=True)
if self.dropout > 0:
feature_batch = L.dropout(feature_batch, dropout_prob=self.dropout,
dropout_implementation='upscale_in_train')
self.checkpoints.append(feature_batch)
feature_batch = graph_transformer(str(self.num_layers - 1), self.gw, feature_batch,
hidden_size=self.out_size,
num_heads=self.num_heads,
concat=False, skip_feat=True,
layer_norm=False, relu=False, gate=True)
self.checkpoints.append(feature_batch)
self.out_feat = feature_batch
def train_program(self,):
label = F.data(name="label", shape=[None, 1], dtype="int64")
train_idx = F.data(name='train_idx', shape=[None], dtype="int64")
prediction = L.gather(self.out_feat, train_idx, overwrite=False)
label = L.gather(label, train_idx, overwrite=False)
cost = L.softmax_with_cross_entropy(logits=prediction, label=label)
avg_cost = L.mean(cost)
self.avg_cost = avg_cost
class Arxiv_label_embedding_model():
def __init__(self, gw, hidden_size, num_heads, dropout, num_layers):
'''Arxiv_label_embedding_model
'''
self.gw = gw
self.hidden_size = hidden_size
self.num_heads = num_heads
self.dropout = dropout
self.num_layers = num_layers
self.out_size = 40
self.embed_size = 128
self.checkpoints = []
self.build_model()
def label_embed_input(self, feature):
label = F.data(name="label", shape=[None, 1], dtype="int64")
label_idx = F.data(name='label_idx', shape=[None], dtype="int64")
label = L.reshape(label, shape=[-1])
label = L.gather(label, label_idx, overwrite=False)
lay_norm_attr = F.ParamAttr(initializer=F.initializer.ConstantInitializer(value=1))
lay_norm_bias = F.ParamAttr(initializer=F.initializer.ConstantInitializer(value=0))
feature = L.layer_norm(feature, name='layer_norm_feature_input1',
param_attr=lay_norm_attr,
bias_attr=lay_norm_bias,
scale=False,
shift=False)
embed_attr = F.ParamAttr(initializer=F.initializer.NormalInitializer(loc=0.0, scale=1.0))
embed = F.embedding(input=label, size=(self.out_size, self.embed_size), param_attr=embed_attr )
lay_norm_attr = F.ParamAttr(initializer=F.initializer.ConstantInitializer(value=1))
lay_norm_bias = F.ParamAttr(initializer=F.initializer.ConstantInitializer(value=0))
embed = L.layer_norm(embed, name='layer_norm_feature_input2',
param_attr=lay_norm_attr,
bias_attr=lay_norm_bias,
scale=False,
shift=False)
embed = L.relu(embed)
feature_label = L.gather(feature, label_idx, overwrite=False)
feature_label = feature_label + embed
feature = L.scatter(feature, label_idx, feature_label, overwrite=True)
return feature
def build_model(self):
label_feature = self.label_embed_input(self.gw.node_feat['feat'])
feature_batch = L.dropout(label_feature, dropout_prob=0.1,
dropout_implementation='upscale_in_train')
for i in range(self.num_layers - 1):
feature_batch, _, cks = graph_transformer(str(i), self.gw, feature_batch,
hidden_size=self.hidden_size,
num_heads=self.num_heads,
attn_drop=True,
concat=True, skip_feat=True,
layer_norm=True, relu=True, gate=True)
if self.dropout > 0:
feature_batch = L.dropout(feature_batch, dropout_prob=self.dropout,
dropout_implementation='upscale_in_train')
self.checkpoints = self.checkpoints + cks
feature_batch, attn, cks = graph_transformer(str(self.num_layers - 1), self.gw, feature_batch,
hidden_size=self.out_size,
num_heads=self.num_heads+1,
concat=False, skip_feat=True,
layer_norm=False, relu=False, gate=True)
self.checkpoints.append(feature_batch)
feature_batch = attn_appnp(self.gw, feature_batch, attn, alpha=0.2, k_hop=10)
self.checkpoints.append(feature_batch)
self.out_feat = feature_batch
def train_program(self,):
label = F.data(name="label", shape=[None, 1], dtype="int64")
train_idx = F.data(name='train_idx', shape=[None], dtype="int64")
prediction = L.gather(self.out_feat, train_idx, overwrite=False)
label = L.gather(label, train_idx, overwrite=False)
cost = L.softmax_with_cross_entropy(logits=prediction, label=label)
avg_cost = L.mean(cost)
self.avg_cost = avg_cost
|
6,621 | method | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"redisenterprise operation-status show",
)
class Show(AAZCommand):
"""Get the status of operation.
:example: Get the status of an operation
az redisenterprise operation-status show --operation-id "testoperationid" --location "West US"
"""
_aaz_info = {
"version": "2023-03-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/providers/microsoft.cache/locations/{}/operationsstatus/{}", "2023-03-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.location = AAZResourceLocationArg(
required=True,
id_part="name",
)
_args_schema.operation_id = AAZStrArg(
options=["-n", "--name", "--operation-id"],
help="The ID of an ongoing async operation.",
required=True,
id_part="child_name_1",
fmt=AAZStrArgFormat(
min_length=1,
),
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.OperationsStatusGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class OperationsStatusGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/providers/Microsoft.Cache/locations/{location}/operationsStatus/{operationId}",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"location", self.ctx.args.location,
required=True,
),
**self.serialize_url_param(
"operationId", self.ctx.args.operation_id,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-03-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.end_time = AAZStrType(
serialized_name="endTime",
)
_schema_on_200.error = AAZObjectType()
_ShowHelper._build_schema_error_response_read(_schema_on_200.error)
_schema_on_200.id = AAZStrType()
_schema_on_200.name = AAZStrType()
_schema_on_200.start_time = AAZStrType(
serialized_name="startTime",
)
_schema_on_200.status = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
_schema_error_detail_read = None
@classmethod
def _build_schema_error_detail_read(cls, _schema):
if cls._schema_error_detail_read is not None:
_schema.additional_info = cls._schema_error_detail_read.additional_info
_schema.code = cls._schema_error_detail_read.code
_schema.details = cls._schema_error_detail_read.details
_schema.message = cls._schema_error_detail_read.message
_schema.target = cls._schema_error_detail_read.target
return
cls._schema_error_detail_read = _schema_error_detail_read = AAZObjectType()
error_detail_read = _schema_error_detail_read
error_detail_read.additional_info = AAZListType(
serialized_name="additionalInfo",
flags={"read_only": True},
)
error_detail_read.code = AAZStrType(
flags={"read_only": True},
)
error_detail_read.details = AAZListType(
flags={"read_only": True},
)
error_detail_read.message = AAZStrType(
flags={"read_only": True},
)
error_detail_read.target = AAZStrType(
flags={"read_only": True},
)
additional_info = _schema_error_detail_read.additional_info
additional_info.Element = AAZObjectType()
_element = _schema_error_detail_read.additional_info.Element
_element.type = AAZStrType(
flags={"read_only": True},
)
details = _schema_error_detail_read.details
details.Element = AAZObjectType()
cls._build_schema_error_detail_read(details.Element)
_schema.additional_info = cls._schema_error_detail_read.additional_info
_schema.code = cls._schema_error_detail_read.code
_schema.details = cls._schema_error_detail_read.details
_schema.message = cls._schema_error_detail_read.message
_schema.target = cls._schema_error_detail_read.target
_schema_error_response_read = None
@classmethod
def _build_schema_error_response_read(cls, _schema):
if cls._schema_error_response_read is not None:
_schema.error = cls._schema_error_response_read.error
return
cls._schema_error_response_read = _schema_error_response_read = AAZObjectType()
error_response_read = _schema_error_response_read
error_response_read.error = AAZObjectType()
cls._build_schema_error_detail_read(error_response_read.error)
_schema.error = cls._schema_error_response_read.error
__all__ = ["Show"] |
6,622 | diff config | # Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import base64
from concurrent import futures
import sys
import time
import dill
import grpc
from google.protobuf import empty_pb2
from pulumi.runtime import proto, rpc
from pulumi.runtime.proto import provider_pb2_grpc, ResourceProviderServicer
from pulumi.dynamic import ResourceProvider
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
PROVIDER_KEY = "__provider"
# _MAX_RPC_MESSAGE_SIZE raises the gRPC Max Message size from `4194304` (4mb) to `419430400` (400mb)
_MAX_RPC_MESSAGE_SIZE = 1024 * 1024 * 400
_GRPC_CHANNEL_OPTIONS = [("grpc.max_receive_message_length", _MAX_RPC_MESSAGE_SIZE)]
def get_provider(props) -> ResourceProvider:
byts = base64.b64decode(props[PROVIDER_KEY])
return dill.loads(byts)
class DynamicResourceProviderServicer(ResourceProviderServicer):
def CheckConfig(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("CheckConfig is not implemented by the dynamic provider")
raise NotImplementedError(
"CheckConfig is not implemented by the dynamic provider"
)
def METHOD_NAME(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("DiffConfig is not implemented by the dynamic provider")
raise NotImplementedError(
"DiffConfig is not implemented by the dynamic provider"
)
def Invoke(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Invoke is not implemented by the dynamic provider")
raise NotImplementedError(f"unknown function {request.token}")
def Diff(self, request, context):
olds = rpc.deserialize_properties(request.olds, True)
news = rpc.deserialize_properties(request.news, True)
if news[PROVIDER_KEY] == rpc.UNKNOWN:
provider = get_provider(olds)
else:
provider = get_provider(news)
result = provider.diff(request.id, olds, news) # pylint: disable=no-member
fields = {}
if result.changes is not None:
if result.changes:
fields[
"changes"
] = proto.DiffResponse.DIFF_SOME # pylint: disable=no-member
else:
fields[
"changes"
] = proto.DiffResponse.DIFF_NONE # pylint: disable=no-member
else:
fields[
"changes"
] = proto.DiffResponse.DIFF_UNKNOWN # pylint: disable=no-member
if result.replaces is not None:
fields["replaces"] = result.replaces
if result.delete_before_replace is not None:
fields["deleteBeforeReplace"] = result.delete_before_replace
return proto.DiffResponse(**fields)
def Update(self, request, context):
olds = rpc.deserialize_properties(request.olds)
news = rpc.deserialize_properties(request.news)
provider = get_provider(news)
result = provider.update(request.id, olds, news) # pylint: disable=no-member
outs = {}
if result.outs is not None:
outs = result.outs
outs[PROVIDER_KEY] = news[PROVIDER_KEY]
loop = asyncio.new_event_loop()
outs_proto = loop.run_until_complete(rpc.serialize_properties(outs, {}))
loop.close()
fields = {"properties": outs_proto}
return proto.UpdateResponse(**fields)
def Delete(self, request, context):
id_ = request.id
props = rpc.deserialize_properties(request.properties)
provider = get_provider(props)
provider.delete(id_, props) # pylint: disable=no-member
return empty_pb2.Empty()
def Cancel(self, request, context):
return empty_pb2.Empty()
def Create(self, request, context):
props = rpc.deserialize_properties(request.properties)
provider = get_provider(props)
result = provider.create(props) # pylint: disable=no-member
outs = result.outs if result.outs is not None else {}
outs[PROVIDER_KEY] = props[PROVIDER_KEY]
loop = asyncio.new_event_loop()
outs_proto = loop.run_until_complete(rpc.serialize_properties(outs, {}))
loop.close()
fields = {"id": result.id, "properties": outs_proto}
return proto.CreateResponse(**fields)
def Check(self, request, context):
olds = rpc.deserialize_properties(request.olds, True)
news = rpc.deserialize_properties(request.news, True)
if news[PROVIDER_KEY] == rpc.UNKNOWN:
provider = get_provider(olds)
else:
provider = get_provider(news)
result = provider.check(olds, news) # pylint: disable=no-member
inputs = result.inputs
failures = result.failures
inputs[PROVIDER_KEY] = news[PROVIDER_KEY]
loop = asyncio.new_event_loop()
inputs_proto = loop.run_until_complete(rpc.serialize_properties(inputs, {}))
loop.close()
failures_proto = [
proto.CheckFailure(property=f.property, reason=f.reason) for f in failures
]
fields = {"inputs": inputs_proto, "failures": failures_proto}
return proto.CheckResponse(**fields)
def Configure(self, request, context):
fields = {"acceptSecrets": False}
return proto.ConfigureResponse(**fields)
def GetPluginInfo(self, request, context):
fields = {"version": "0.1.0"}
return proto.PluginInfo(**fields)
def GetSchema(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("GetSchema is not implemented by the dynamic provider")
raise NotImplementedError(
"GetSchema is not implemented by the dynamic provider"
)
def Read(self, request, context):
id_ = request.id
props = rpc.deserialize_properties(request.properties)
provider = get_provider(props)
result = provider.read(id_, props) # pylint: disable=no-member
outs = result.outs
outs[PROVIDER_KEY] = props[PROVIDER_KEY]
loop = asyncio.new_event_loop()
outs_proto = loop.run_until_complete(rpc.serialize_properties(outs, {}))
loop.close()
fields = {"id": result.id, "properties": outs_proto}
return proto.ReadResponse(**fields)
def __init__(self):
pass
def main():
monitor = DynamicResourceProviderServicer()
server = grpc.server(
futures.ThreadPoolExecutor(
max_workers=4
), # pylint: disable=consider-using-with
options=_GRPC_CHANNEL_OPTIONS,
)
provider_pb2_grpc.add_ResourceProviderServicer_to_server(monitor, server)
port = server.add_insecure_port(address="127.0.0.1:0")
server.start()
sys.stdout.buffer.write(f"{port}\n".encode())
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
main() |
6,623 | via url | from datetime import timedelta
from urllib.parse import quote
import requests
from lms.services.exceptions import ExternalRequestError, SerializableError
from lms.services.http import HTTPService
from lms.services.jstor._article_metadata import ArticleMetadata
from lms.services.jwt import JWTService
from lms.views.helpers import METHOD_NAME
class ArticleNotFound(SerializableError):
def __init__(self, article_id):
super().__init__(message=f"Article '{article_id}' not found")
class JSTORService:
"""An interface for dealing with JSTOR documents."""
DEFAULT_DOI_PREFIX = "10.2307"
"""Used when no DOI prefix can be found."""
# pylint: disable=too-many-arguments
def __init__(self, api_url, secret, enabled, site_code, headers=None):
"""
Initialise the JSTOR service.
:param api_url: JSTOR API url
:param secret: Secret for authenticating with JSTOR
:param enabled: Whether JSTOR is enabled on this instance
:param site_code: The site code to use to identify the organization
:param headers: Additional headers to pass onto JSTOR when making
requests
"""
self._api_url = api_url
self._secret = secret
self._enabled = enabled
self._site_code = site_code
self._http = HTTPService()
self._http.session.headers = headers
@property
def enabled(self) -> bool:
"""Get whether this instance is configured for JSTOR."""
return bool(self._enabled and self._api_url and self._site_code)
def METHOD_NAME(self, request, document_url):
"""
Get a VIA url for a document.
:param request: Pyramid request
:param document_url: The URL to annotate
:return: A URL for Via configured to launch the requested document
:raises ExternalRequestError: If we get a value which doesn't look like
a public URL from JSTOR
"""
# Get a signed S3 URL for the given JSTOR URL.
s3_url = self._api_request(
"/pdf/{doi}", doi=document_url.replace("jstor://", "")
).text
if not s3_url.startswith("https://"):
raise ExternalRequestError(
f"Expected to get an S3 URL but got: '{s3_url}' instead"
)
return METHOD_NAME(
request,
document_url=s3_url,
content_type="pdf",
# Show content partner banner in client for JSTOR.
options={"via.client.contentPartner": "jstor"},
)
def get_article_metadata(self, article_id: str) -> dict:
"""
Fetch metadata about a JSTOR article.
:param article_id: A JSTOR article ID or DOI
:raise ArticleNotFound: If the article cannot be found
:raise ExternalRequestError: For any unexpected errors
"""
try:
response = self._api_request("/metadata/{doi}", doi=article_id)
except ExternalRequestError as err:
# We can distinguish between a 404 when the URL is wrong, and when
# the URL is correct but the article can't be found by carefully
# inspecting the response. There's no documentation of this, it's
# all from experimentation.
resp = err.response
if (
resp.status_code == 404
and resp.headers.get("Content-Type", "").startswith("application/json")
and resp.text == "null"
):
raise ArticleNotFound(article_id) from err
raise
return ArticleMetadata.from_response(response).as_dict()
def thumbnail(self, article_id: str) -> str:
"""
Fetch a thumbnail image for an article.
Returns a `data:` URI with base64-encoded data which can be used as the
source for an `<img>` element.
:param article_id: A JSTOR article ID or DOI
:raise ExternalRequestError: If the response doesn't look like a valid
`data:` URI
:raise ArticleNotFound: If the article cannot be found
"""
try:
data_uri = self._api_request(
"/thumbnail/{doi}",
doi=article_id,
params={
# `offset` specifies the page number. The default value of 0
# returns the thumbnail of the last page. Setting it to 1
# returns the first page.
"offset": 1,
# The frontend displays the image with a width of ~140px,
# so 280px has enough resolution for a 2x device pixel ratio.
# The height will be adjusted to maintain the aspect ratio.
"width": 280,
},
).text
except ExternalRequestError as err:
# We can distinguish between a 404 when the URL is wrong, and when
# the URL is correct but the article can't be found by carefully
# inspecting the response. There's no documentation of this, it's
# all from experimentation.
resp = err.response
if resp.status_code == 404 and resp.headers.get(
"Content-Type", ""
).startswith("text/plain"):
raise ArticleNotFound(article_id) from err
raise
if not data_uri.startswith("data:"):
raise ExternalRequestError(
f"Expected to get data URI but got '{data_uri}' instead"
)
return data_uri
def _api_request(self, path_template, doi, params=None) -> requests.Response:
"""
Call the JSTOR API with a URL based on an article id.
See the JSTOR API's `/docs` endpoint for details.
"""
if "/" not in doi:
doi = f"{self.DEFAULT_DOI_PREFIX}/{doi}"
url = self._api_url.rstrip("/") + path_template.format(doi=quote(doi, safe="/"))
token = JWTService.encode_with_secret(
{"site_code": self._site_code},
secret=self._secret,
lifetime=timedelta(hours=1),
)
return self._http.get(
url=url, headers={"Authorization": f"Bearer {token}"}, params=params
) |
6,624 | test multivariate normal size types | import sys
import pytest
from numpy.testing import (
assert_, assert_array_equal, assert_raises,
)
import numpy as np
from numpy import random
class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits and sys.platform != 'win32':
# Check for 64-bit systems
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
for arg in args:
assert_(random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
random.seed(0)
rvsn = random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def METHOD_NAME(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
random.multivariate_normal([0], [[0]], size=1)
random.multivariate_normal([0], [[0]], size=np.int_(1))
random.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
random.seed(1234567890)
x = random.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in random.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
random.seed(1234)
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = random.choice(a, p=probs)
assert_(c in a)
assert_raises(ValueError, random.choice, a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
random.seed(1234)
a = np.array(['a', 'a' * 1000])
for _ in range(100):
random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
random.seed(1234)
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
random.seed(1)
orig = np.arange(3).view(N)
perm = random.permutation(orig)
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self):
return self.a
random.seed(1)
m = M()
perm = random.permutation(m)
assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
assert_array_equal(m.__array__(), np.arange(5))
def test_warns_byteorder(self):
# GH 13159
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.deprecated_call(match='non-native byteorder is not'):
random.randint(0, 200, size=10, dtype=other_byteord_dt)
def test_named_argument_initialization(self):
# GH 13669
rs1 = np.random.RandomState(123456789)
rs2 = np.random.RandomState(seed=123456789)
assert rs1.randint(0, 100) == rs2.randint(0, 100)
def test_choice_retun_dtype(self):
# GH 9867
c = np.random.choice(10, p=[.1]*10, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, p=[.1]*10, replace=False, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, replace=False, size=2)
assert c.dtype == np.dtype(int)
@pytest.mark.skipif(np.iinfo('l').max < 2**32,
reason='Cannot test with 32-bit C long')
def test_randint_117(self):
# GH 14189
random.seed(0)
expected = np.array([2357136044, 2546248239, 3071714933, 3626093760,
2588848963, 3684848379, 2340255427, 3638918503,
1819583497, 2678185683], dtype='int64')
actual = random.randint(2**32, size=10)
assert_array_equal(actual, expected)
def test_p_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(12345)
assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]),
[0, 0, 0, 1, 1])
def test_n_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(8675309)
expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 2, 3, 3, 1, 5, 3, 1, 3]])
assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)),
expected)
def test_multinomial_empty():
# gh-20483
# Ensure that empty p-vals are correctly handled
assert random.multinomial(10, []).shape == (0,)
assert random.multinomial(3, [], size=(7, 5, 3)).shape == (7, 5, 3, 0)
def test_multinomial_1d_pval():
# gh-20483
with pytest.raises(TypeError, match="pvals must be a 1-d"):
random.multinomial(10, 0.3) |
6,625 | test decompress | #!/usr/bin/env python
#
# Project: Bitshuffle-LZ4 decompression in OpenCL
# https://github.com/silx-kit/silx
#
# Copyright (C) 2022-2023 European Synchrotron Radiation Facility,
# Grenoble, France
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Test suite for byte-offset decompression
"""
__authors__ = ["Jérôme Kieffer"]
__contact__ = "jerome.kieffer@esrf.eu"
__license__ = "MIT"
__copyright__ = "2022 European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "07/11/2022"
import struct
import numpy
import pytest
try:
import bitshuffle
except ImportError:
bitshuffle = None
from silx.opencl.common import ocl, pyopencl
from silx.opencl.codec.bitshuffle_lz4 import BitshuffleLz4
TESTCASES = ( # dtype, shape
("uint64", (103, 503)),
("int64", (101, 509)),
("uint32", (229, 659)),
("int32", (233, 653)),
("uint16", (743, 647)),
("int16", (751, 643)),
("uint8", (157, 1373)),
("int8", (163, 1367)),
)
@pytest.mark.skipif(
not ocl or not pyopencl or bitshuffle is None,
reason="PyOpenCl or bitshuffle is missing"
)
class TestBitshuffleLz4:
"""Test pyopencl bishuffle+LZ4 decompression"""
@staticmethod
def _create_test_data(shape, lam=100, dtype="uint32"):
"""Create test (image, compressed stream) pair.
:param shape: Shape of test image
:param lam: Expectation of interval argument for numpy.random.poisson
:return: (reference image array, compressed stream)
"""
ref = numpy.random.poisson(lam, size=shape).astype(dtype)
raw = struct.pack(">Q", ref.nbytes) + b"\x00"*4 + bitshuffle.compress_lz4(ref).tobytes()
return ref, raw
@pytest.mark.parametrize("dtype,shape", TESTCASES)
def METHOD_NAME(self, dtype, shape):
"""
Tests the byte offset decompression on GPU with various configuration
"""
ref, raw = self._create_test_data(shape=shape, dtype=dtype)
bs = BitshuffleLz4(len(raw), numpy.prod(shape), dtype=dtype)
res = bs.decompress(raw).get()
assert numpy.array_equal(res, ref.ravel()), "Checks decompression works"
@pytest.mark.parametrize("dtype,shape", TESTCASES)
def test_decompress_from_buffer(self, dtype, shape):
"""Test reading compressed data from pyopencl Buffer"""
ref, raw = self._create_test_data(shape=shape, dtype=dtype)
bs = BitshuffleLz4(0, numpy.prod(shape), dtype=dtype)
buffer = pyopencl.Buffer(
bs.ctx,
flags=pyopencl.mem_flags.COPY_HOST_PTR | pyopencl.mem_flags.READ_ONLY,
hostbuf=raw,
)
res = bs.decompress(buffer).get()
assert numpy.array_equal(res, ref.ravel()), "Checks decompression works"
@pytest.mark.parametrize("dtype,shape", TESTCASES)
def test_decompress_from_array(self, dtype, shape):
"""Test reading compressed data from pyopencl Array"""
ref, raw = self._create_test_data(shape=shape, dtype=dtype)
bs = BitshuffleLz4(0, numpy.prod(shape), dtype=dtype)
array = pyopencl.array.to_device(
bs.queue,
numpy.frombuffer(raw, dtype=numpy.uint8),
array_queue=bs.queue,
)
res = bs.decompress(array).get()
assert numpy.array_equal(res, ref.ravel()), "Checks decompression works" |
6,626 | create child files | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: datalake_samples_directory_async.py
DESCRIPTION:
This sample demonstrates create directory, rename directory, get directory properties, delete directory etc.
USAGE:
python datalake_samples_directory_async.py
Set the environment variables with your own values before running the sample:
1) STORAGE_ACCOUNT_NAME - the storage account name
2) STORAGE_ACCOUNT_KEY - the storage account key
"""
import asyncio
import os
import random
import uuid
from azure.core.exceptions import ResourceExistsError
from azure.storage.filedatalake.aio import (
DataLakeServiceClient,
)
async def directory_sample(filesystem_client):
# create a parent directory
dir_name = "testdir"
print("Creating a directory named '{}'.".format(dir_name))
# Create directory from file system client
await filesystem_client.create_directory(dir_name)
directory_client = filesystem_client.get_directory_client(dir_name)
try:
# Create the existing directory again will throw exception
# [START create_directory]
await directory_client.create_directory()
# [END create_directory]
except ResourceExistsError:
pass
# populate the directory with some child files
await METHOD_NAME(directory_client, 35)
# rename the directory
# [START rename_directory]
new_dir_name = "testdir2"
print("Renaming the directory named '{}' to '{}'.".format(dir_name, new_dir_name))
new_directory = await directory_client\
.rename_directory(new_name=directory_client.file_system_name + '/' + new_dir_name)
# [END rename_directory]
# display the properties of the new directory to make sure it was renamed successfully
# [START get_directory_properties]
props = await new_directory.get_directory_properties()
# [END get_directory_properties]
print("Properties of the new directory named '{}' are: {}.".format(new_dir_name, props))
# remove the newly renamed directory
print("Removing the directory named '{}'.".format(new_dir_name))
# [START delete_directory]
await new_directory.delete_directory()
# [END delete_directory]
async def METHOD_NAME(directory_client, num_child_files):
import itertools
# Use a thread pool because it is too slow otherwise
async def create_file():
# generate a random name
file_name = str(uuid.uuid4()).replace('-', '')
file_client = directory_client.get_file_client(file_name)
await file_client.create_file()
futures = [asyncio.ensure_future(create_file()) for _ in itertools.repeat(None, num_child_files)]
await asyncio.wait(futures)
print("Created {} files under the directory '{}'.".format(num_child_files, directory_client.path_name))
async def main():
account_name = os.getenv('STORAGE_ACCOUNT_NAME', "")
account_key = os.getenv('STORAGE_ACCOUNT_KEY', "")
# set up the service client with the credentials from the environment variables
service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format(
"https",
account_name
), credential=account_key)
async with service_client:
# generate a random name for testing purpose
fs_name = "testfs{}".format(random.randint(1, 1000))
print("Generating a test filesystem named '{}'.".format(fs_name))
# create the filesystem
filesystem_client = await service_client.create_file_system(file_system=fs_name)
# invoke the sample code
try:
await directory_sample(filesystem_client)
finally:
# clean up the demo filesystem
await filesystem_client.delete_file_system()
if __name__ == '__main__':
asyncio.run(main()) |
6,627 | patch | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
from spack.package import *
class RocprofilerDev(CMakePackage):
"""ROCPROFILER library for AMD HSA runtime API extension support"""
homepage = "https://github.com/ROCm-Developer-Tools/rocprofiler"
git = "https://github.com/ROCm-Developer-Tools/rocprofiler.git"
url = "https://github.com/ROCm-Developer-Tools/rocprofiler/archive/refs/tags/rocm-5.4.3.tar.gz"
tags = ["rocm"]
maintainers("srekolam", "renjithravindrankannath")
libraries = ["librocprofiler64"]
version("5.4.3", sha256="86c3f43ee6cb9808796a21409c853cc8fd496578b9eef4de67ca77830229cac1")
version("5.4.0", sha256="0322cbe5d1d3182e616f472da31f0707ad6040833c38c28f2b39381a85210f43")
version("5.3.3", sha256="07ee28f3420a07fc9d45910e78ad7961b388109cfc0e74cfdf2666789e6af171")
version("5.3.0", sha256="b0905a329dc1c97a362b951f3f8ef5da9d171cabb001ed4253bd59a2742e7d39")
version("5.2.3", sha256="4ed22e86633ab177eed85fed8994fcb71017c4c4774998e4d3fc36b6c0a15eac")
version("5.2.1", sha256="c6768ec428590aadfb0e7ef6e22b8dc5ac8ed97babeb56db07f2d5d41cd122e2")
version("5.2.0", sha256="1f4db27b56ef1863d4c9e1d96bac9117d66be45156d0637cfe4fd38cae61a23a")
version("5.1.3", sha256="eca7be451c7bf000fd9c75683e7f5dfbed32dbb385b5ac685d2251ee8c3abc96")
version("5.1.0", sha256="4a1c6ed887b0159392406af8796508df2794353a4c3aacc801116044fb4a10a5")
version(
"5.0.2",
sha256="48f58c3c16dd45fead2086f89a175f74636e81bc2437e30bb6e9361b1083e71d",
deprecated=True,
)
version(
"5.0.0",
sha256="2ed521f400e4aafd17405c2f9ad2fb3b906a982d3767b233122d9c2964c3245f",
deprecated=True,
)
version(
"4.5.2",
sha256="baa59826f8fb984993c03d05e2e3cdf0b830b08f8056b18ba206dfbaa367aca9",
deprecated=True,
)
version(
"4.5.0",
sha256="9b47b086d28fc831dbe0f83ec7e4640057b97edc961f2f050a0968633f32a06b",
deprecated=True,
)
version(
"4.3.1",
sha256="c6f5fa192c9cdb32553d24ed5c847107d312042e39fa3dd17c83e237c9542a2d",
deprecated=True,
)
version(
"4.3.0",
sha256="3b876a0e601d2c6ae56ddf2a6027afe45b3533f4445b0c2da748d020b6b00cf2",
deprecated=True,
)
version(
"4.2.0",
sha256="c5888eda1404010f88219055778cfeb00d9c21901e172709708720008b1af80f",
deprecated=True,
)
version(
"4.1.0",
sha256="2eead5707016da606d636b97f3af1c98cb471da78659067d5a77d4a2aa43ef4c",
deprecated=True,
)
version(
"4.0.0",
sha256="e9960940d1ec925814a0e55ee31f5fc2fb23fa839d1c6a909f72dd83f657fb25",
deprecated=True,
)
version(
"3.10.0",
sha256="fbf5ce9fbc13ba2b3f9489838e00b54885aba92336f055e8b03fef3e3347071e",
deprecated=True,
)
version(
"3.9.0",
sha256="f07ddd9bf2f86550c8d243f887e9bde9d4f2ceec81ecc6393012aaf2a45999e8",
deprecated=True,
)
version(
"3.8.0",
sha256="38ad3ac20f60f3290ce750c34f0aad442354b1d0a56b81167a018e44ecdf7fff",
deprecated=True,
)
version(
"3.7.0",
sha256="d3f03bf850cbd86ca9dfe6e6cc6f559d8083b0f3ea4711d8260b232cb6fdd1cc",
deprecated=True,
)
version(
"3.5.0",
sha256="c42548dd467b7138be94ad68c715254eb56a9d3b670ccf993c43cd4d43659937",
deprecated=True,
)
depends_on("cmake@3:", type="build")
for ver in [
"3.5.0",
"3.7.0",
"3.8.0",
"3.9.0",
"3.10.0",
"4.0.0",
"4.1.0",
"4.2.0",
"4.3.0",
"4.3.1",
"4.5.0",
"4.5.2",
"5.0.0",
"5.0.2",
"5.1.0",
"5.1.3",
"5.2.0",
"5.2.1",
"5.2.3",
"5.3.0",
"5.3.3",
"5.4.0",
"5.4.3",
]:
depends_on("hsakmt-roct@" + ver, when="@" + ver)
depends_on("hsa-rocr-dev@" + ver, when="@" + ver)
depends_on("rocminfo@" + ver, when="@" + ver)
depends_on("roctracer-dev-api@" + ver, when="@" + ver)
depends_on("numactl", type="link", when="@4.3.1")
# See https://github.com/ROCm-Developer-Tools/rocprofiler/pull/50
METHOD_NAME("fix-includes.patch")
METHOD_NAME("0001-Continue-build-in-absence-of-aql-profile-lib.patch", when="@5.3:")
def METHOD_NAME(self):
filter_file(
"${HSA_RUNTIME_LIB_PATH}/../include",
"${HSA_RUNTIME_LIB_PATH}/../include ${HSA_KMT_LIB_PATH}/..\
/include",
"test/CMakeLists.txt",
string=True,
)
@classmethod
def determine_version(cls, lib):
match = re.search(r"lib\S*\.so\.\d+\.\d+\.(\d)(\d\d)(\d\d)", lib)
if match:
ver = "{0}.{1}.{2}".format(
int(match.group(1)), int(match.group(2)), int(match.group(3))
)
else:
ver = None
return ver
def cmake_args(self):
return [
self.define(
"PROF_API_HEADER_PATH", self.spec["roctracer-dev-api"].prefix.roctracer.include.ext
),
self.define("ROCM_ROOT_DIR", self.spec["hsakmt-roct"].prefix.include),
] |
6,628 | test description prev | from datetime import datetime
from os.path import dirname, join
import pytest
from city_scrapers_core.constants import COMMISSION, PASSED
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from scrapy.settings import Settings
from city_scrapers.spiders.chi_board_elections import ChiBoardElectionsSpider
test_response = file_response(
join(dirname(__file__), "files", "chi_board_elections.html"),
url="https://app.chicagoelections.com/pages/en/board-meetings.aspx",
)
spider = ChiBoardElectionsSpider()
spider.settings = Settings(values={"CITY_SCRAPERS_ARCHIVE": False})
freezer = freeze_time("2018-11-30")
freezer.start()
parsed_items = [item for item in spider._next_meeting(test_response)]
freezer.stop()
def test_title():
assert parsed_items[0]["title"] == "Electoral Board"
def test_description():
assert parsed_items[0]["description"] == ""
def test_start():
assert parsed_items[0]["start"] == datetime(2018, 11, 27, 9, 30)
def test_end():
assert parsed_items[0]["end"] is None
def test_id():
assert parsed_items[0]["id"] == "chi_board_elections/201811270930/x/electoral_board"
def test_status():
assert parsed_items[0]["status"] == PASSED
def test_location():
assert parsed_items[0]["location"] == {
"address": "8th Floor Office, 69 W. Washington St. Chicago, IL 60602",
"name": "Cook County Administration Building",
}
def test_source():
assert (
parsed_items[0]["source"]
== "https://app.chicagoelections.com/pages/en/board-meetings.aspx"
)
def test_links():
assert parsed_items[0]["links"] == [
{
"title": "Agenda",
"href": "https://app.chicagoelections.com/documents/general/Standard-Board-Meeting-Agenda.pdf?date=20181127", # noqa
}
]
@pytest.mark.parametrize("item", parsed_items)
def test_all_day(item):
assert item["all_day"] is False
@pytest.mark.parametrize("item", parsed_items)
def test_classification(item):
assert item["classification"] is COMMISSION
# Previous meetings on different page
prev_url = "https://app.chicagoelections.com/pages/en/meeting-minutes-and-videos.aspx"
test_response_prev = file_response(
join(dirname(__file__), "files", "chi_board_elections_prev.html"), url=prev_url
)
freezer.start()
parsed_items_prev = [item for item in spider._prev_meetings(test_response_prev)]
freezer.stop()
def test_count():
assert len(parsed_items_prev) == 12
def test_title_prev():
assert parsed_items_prev[0]["title"] == "Electoral Board"
def METHOD_NAME():
assert parsed_items_prev[0]["description"] == ""
def test_start_prev():
assert parsed_items_prev[0]["start"] == datetime(2018, 11, 27, 9, 30)
def test_end_prev():
assert parsed_items_prev[0]["end"] is None
def test_id_prev():
assert (
parsed_items_prev[0]["id"]
== "chi_board_elections/201811270930/x/electoral_board"
)
def test_status_prev():
assert parsed_items_prev[0]["status"] == PASSED
def test_location_prev():
assert parsed_items_prev[0]["location"] == {
"address": "8th Floor Office, 69 W. Washington St. Chicago, IL 60602",
"name": "Cook County Administration Building",
}
def test_source_prev():
assert (
parsed_items_prev[0]["source"]
== "https://app.chicagoelections.com/pages/en/meeting-minutes-and-videos.aspx"
)
def test_links_prev():
assert parsed_items_prev[4]["links"] == [
{
"title": "Minutes",
"href": "https://app.chicagoelections.com/documents/general/BoardMeetingMinutes-2018-10-30.pdf", # noqa
},
{"title": "Video", "href": "https://youtu.be/AKFNigWEkc0"},
]
@pytest.mark.parametrize("item", parsed_items_prev)
def test_all_day_prev(item):
assert item["all_day"] is False
@pytest.mark.parametrize("item", parsed_items_prev)
def test_classification_prev(item):
assert item["classification"] == COMMISSION |
6,629 | get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._child_resources_operations import build_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ChildResourcesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.resourcehealth.v2015_01_01.aio.ResourceHealthMgmtClient`'s
:attr:`child_resources` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, resource_uri: str, filter: Optional[str] = None, expand: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.AvailabilityStatus"]:
"""Lists the all the children and its current health status for a parent resource. Use the
nextLink property in the response to get the next page of children current health.
:param resource_uri: The fully qualified ID of the resource, including the resource name and
resource type. Currently the API only support not nested parent resource type:
/subscriptions/{subscriptionId}/resourceGroups/{resource-group-name}/providers/{resource-provider-name}/{resource-type}/{resource-name}.
Required.
:type resource_uri: str
:param filter: The filter to apply on the operation. For more information please see
https://docs.microsoft.com/en-us/rest/api/apimanagement/apis?redirectedfrom=MSDN. Default value
is None.
:type filter: str
:param expand: Setting $expand=recommendedactions in url query expands the recommendedactions
in the response. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilityStatus or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resourcehealth.v2015_01_01.models.AvailabilityStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2015-01-01"))
cls: ClsType[_models.AvailabilityStatusListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_uri=resource_uri,
filter=filter,
expand=expand,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AvailabilityStatusListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(METHOD_NAME, extract_data)
list.metadata = {"url": "/{resourceUri}/providers/Microsoft.ResourceHealth/childResources"} |
6,630 | pre process coco resnet34 tf | """
dataset related classes and methods
"""
# pylint: disable=unused-argument,missing-docstring
import logging
import sys
import time
import cv2
import numpy as np
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("dataset")
class Item():
def __init__(self, label, img, idx):
self.label = label
self.img = img
self.idx = idx
self.start = time.time()
def usleep(sec):
if sys.platform == 'win32':
# on windows time.sleep() doesn't work to well
import ctypes
kernel32 = ctypes.windll.kernel32
timer = kernel32.CreateWaitableTimerA(ctypes.c_void_p(), True, ctypes.c_void_p())
delay = ctypes.c_longlong(int(-1 * (10 * 1000000 * sec)))
kernel32.SetWaitableTimer(timer, ctypes.byref(delay), 0, ctypes.c_void_p(), ctypes.c_void_p(), False)
kernel32.WaitForSingleObject(timer, 0xffffffff)
else:
time.sleep(sec)
class Dataset():
def __init__(self):
self.arrival = None
self.image_list = []
self.label_list = []
self.image_list_inmemory = {}
self.last_loaded = -1
def preprocess(self, use_cache=True):
raise NotImplementedError("Dataset:preprocess")
def get_item_count(self):
return len(self.image_list)
def get_list(self):
raise NotImplementedError("Dataset:get_list")
def load_query_samples(self, sample_list):
self.image_list_inmemory = {}
for sample in sample_list:
self.image_list_inmemory[sample], _ = self.get_item(sample)
self.last_loaded = time.time()
def unload_query_samples(self, sample_list):
if sample_list:
for sample in sample_list:
if sample in self.image_list_inmemory :
del self.image_list_inmemory[sample]
else:
self.image_list_inmemory = {}
def get_samples(self, id_list):
data = np.array([self.image_list_inmemory[id] for id in id_list])
return data, self.label_list[id_list]
def get_item_loc(self, id):
raise NotImplementedError("Dataset:get_item_loc")
#
# Post processing
#
class PostProcessCommon:
def __init__(self, offset=0):
self.offset = offset
self.good = 0
self.total = 0
def __call__(self, results, ids, expected=None, result_dict=None):
processed_results = []
n = len(results[0])
for idx in range(0, n):
result = results[0][idx] + self.offset
processed_results.append([result])
if result == expected[idx]:
self.good += 1
self.total += n
return processed_results
def add_results(self, results):
pass
def start(self):
self.good = 0
self.total = 0
def finalize(self, results, ds=False, output_dir=None):
results["good"] = self.good
results["total"] = self.total
class PostProcessArgMax:
def __init__(self, offset=0):
self.offset = offset
self.good = 0
self.total = 0
def __call__(self, results, ids, expected=None, result_dict=None):
processed_results = []
results = np.argmax(results[0], axis=1)
n = results.shape[0]
for idx in range(0, n):
result = results[idx] + self.offset
processed_results.append([result])
if result == expected[idx]:
self.good += 1
self.total += n
return processed_results
def add_results(self, results):
pass
def start(self):
self.good = 0
self.total = 0
def finalize(self, results, ds=False, output_dir=None):
results["good"] = self.good
results["total"] = self.total
#
# pre-processing
#
def center_crop(img, out_height, out_width):
height, width, _ = img.shape
left = int((width - out_width) / 2)
right = int((width + out_width) / 2)
top = int((height - out_height) / 2)
bottom = int((height + out_height) / 2)
img = img[top:bottom, left:right]
return img
def resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR):
height, width, _ = img.shape
new_height = int(100. * out_height / scale)
new_width = int(100. * out_width / scale)
if height > width:
w = new_width
h = int(new_height * height / width)
else:
h = new_height
w = int(new_width * width / height)
img = cv2.resize(img, (w, h), interpolation=inter_pol)
return img
def pre_process_vgg(img, dims=None, need_transpose=False):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
output_height, output_width, _ = dims
cv2_interpol = cv2.INTER_AREA
img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2_interpol)
img = center_crop(img, output_height, output_width)
img = np.asarray(img, dtype='float32')
# normalize image
means = np.array([123.68, 116.78, 103.94], dtype=np.float32)
img -= means
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_mobilenet(img, dims=None, need_transpose=False):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
output_height, output_width, _ = dims
img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR)
img = center_crop(img, output_height, output_width)
img = np.asarray(img, dtype='float32')
img /= 255.0
img -= 0.5
img *= 2
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def maybe_resize(img, dims):
img = np.array(img, dtype=np.float32)
if len(img.shape) < 3 or img.shape[2] != 3:
# some images might be grayscale
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if dims != None:
im_height, im_width, _ = dims
img = cv2.resize(img, (im_width, im_height), interpolation=cv2.INTER_LINEAR)
return img
def pre_process_coco_mobilenet(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
img = np.asarray(img, dtype=np.uint8)
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_coco_pt_mobilenet(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
img -= 127.5
img /= 127.5
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_coco_resnet34(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
img = img / 255. - mean
img = img / std
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def METHOD_NAME(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
mean = np.array([123.68, 116.78, 103.94], dtype=np.float32)
img = img - mean
if need_transpose:
img = img.transpose([2, 0, 1])
return img |
6,631 | register dataclass as pytree node | import dataclasses
from typing import Any, Dict, List, Optional, Tuple, Type
import torch
from torch._export import ExportedProgram
from torch.utils._pytree import (
_register_pytree_node,
Context,
DumpableContext,
FlattenFunc,
FromDumpableContextFn,
ToDumpableContextFn,
UnflattenFunc,
)
SERIALIZED_DATACLASS_TO_PYTHON_DATACLASS: Dict[str, Type[Any]] = {}
def METHOD_NAME(
typ: Any,
flatten_fn: Optional[FlattenFunc] = None,
unflatten_fn: Optional[UnflattenFunc] = None,
*,
to_dumpable_context: Optional[ToDumpableContextFn] = None,
from_dumpable_context: Optional[FromDumpableContextFn] = None,
return_none_fields: bool = False,
) -> None:
assert dataclasses.is_dataclass(
typ
), f"Only dataclasses can be registered with this function: {typ}"
serialized_type = f"{typ.__module__}.{typ.__name__}"
SERIALIZED_DATACLASS_TO_PYTHON_DATACLASS[serialized_type] = typ
def default_flatten_fn(obj: Any) -> Tuple[List[Any], Context]:
flattened = []
flat_names = []
none_names = []
for f in dataclasses.fields(obj):
name, val = f.name, getattr(obj, f.name)
if val is not None or return_none_fields:
flattened.append(val)
flat_names.append(name)
else:
none_names.append(name)
return flattened, (typ, flat_names, none_names)
def default_unflatten_fn(values: List[Any], context: Context) -> Any:
typ, flat_names, none_names = context
return typ(**dict(zip(flat_names, values)), **{k: None for k in none_names})
def default_to_dumpable_context(context: Context) -> DumpableContext:
return (serialized_type, context[1], context[2])
def default_from_dumpable_context(dumpable_context: DumpableContext) -> Context:
return (
SERIALIZED_DATACLASS_TO_PYTHON_DATACLASS[dumpable_context[0]],
dumpable_context[1],
dumpable_context[2],
)
flatten_fn = flatten_fn if flatten_fn is not None else default_flatten_fn
unflatten_fn = unflatten_fn if unflatten_fn is not None else default_unflatten_fn
if (to_dumpable_context is None) ^ (from_dumpable_context is None):
raise ValueError(
f"Both to_dumpable_context and from_dumpable_context for {typ} must "
"be None or registered."
)
to_dumpable_context = (
to_dumpable_context
if to_dumpable_context is not None
else default_to_dumpable_context
)
from_dumpable_context = (
from_dumpable_context
if from_dumpable_context is not None
else default_from_dumpable_context
)
_register_pytree_node(
typ,
flatten_fn,
unflatten_fn,
to_dumpable_context=to_dumpable_context,
from_dumpable_context=from_dumpable_context,
)
def is_param(program: ExportedProgram, node: torch.fx.Node) -> bool:
"""
Checks if the given node is a parameter within the exported program
"""
return node.name in program.graph_signature.inputs_to_parameters
def get_param(
program: ExportedProgram,
node: torch.fx.Node,
) -> Optional[torch.nn.Parameter]:
"""
Returns the parameter associated with the given node in the exported program.
Returns None if the node is not a parameter within the exported program
"""
if is_param(program, node):
parameter_name = program.graph_signature.inputs_to_parameters[node.name]
return program.state_dict[parameter_name]
return None
def is_buffer(program: ExportedProgram, node: torch.fx.Node) -> bool:
"""
Checks if the given node is a buffer within the exported program
"""
return node.name in program.graph_signature.inputs_to_buffers
def get_buffer(
program: ExportedProgram,
node: torch.fx.Node,
) -> Optional[torch.Tensor]:
"""
Returns the buffer associated with the given node in the exported program.
Returns None if the node is not a buffer within the exported program
"""
if is_buffer(program, node):
buffer_name = program.graph_signature.inputs_to_buffers[node.name]
return program.state_dict[buffer_name]
return None |
6,632 | test restore contour order warning | import pytest
from shutil import copy2, copytree
import os
from booleanOperations.booleanGlyph import BooleanGlyph
from defcon import Glyph
from afdko.checkoutlinesufo import remove_tiny_sub_paths
from afdko.fdkutils import (
get_temp_file_path,
get_temp_dir_path,
get_font_format,
)
from test_utils import (
get_input_path,
get_expected_path,
generate_ttx_dump,
)
from runner import main as runner
from differ import main as differ
TOOL = 'checkoutlinesufo'
CMD = ['-t', TOOL]
# -----
# Tests
# -----
def test_remove_tiny_sub_paths_large_contour():
g = Glyph()
p = g.getPen()
p.moveTo((100, 100))
p.lineTo((200, 200))
p.lineTo((0, 100))
p.closePath()
assert len(g[0]) == 3
assert g.bounds == (0, 100, 200, 200)
bg = BooleanGlyph(g)
assert remove_tiny_sub_paths(bg, 25, []) == []
def test_remove_tiny_sub_paths_small_contour():
g = Glyph()
p = g.getPen()
p.moveTo((1, 1))
p.lineTo((2, 2))
p.lineTo((0, 1))
p.closePath()
assert len(g[0]) == 3
assert g.bounds == (0, 1, 2, 2)
bg = BooleanGlyph(g)
assert remove_tiny_sub_paths(bg, 25, []) == \
['Contour 0 is too small: bounding box is less than minimum area. '
'Start point: ((1, 1)).']
@pytest.mark.parametrize('ufo_filename', ['ufo2.ufo', 'ufo3.ufo'])
@pytest.mark.parametrize('args, expct_label', [
(['e', 'w', 'q'], 'dflt-layer.ufo'),
(['e', 'q'], 'proc-layer.ufo'),
])
def test_remove_overlap_ufo(args, ufo_filename, expct_label):
actual_path = get_temp_dir_path(ufo_filename)
copytree(get_input_path(ufo_filename), actual_path)
runner(CMD + ['-f', actual_path, '-o'] + args)
expct_filename = f'{ufo_filename[:-4]}-{expct_label}'
expected_path = get_expected_path(expct_filename)
assert differ([expected_path, actual_path])
@pytest.mark.parametrize('filename, diffmode', [
('font.pfa', []),
('font.pfb', ['-m', 'bin']),
('font.cff', ['-m', 'bin']),
])
def test_remove_overlap_type1_cff(filename, diffmode):
actual_path = get_temp_file_path()
copy2(get_input_path(filename), actual_path)
runner(CMD + ['-f', actual_path, '-o', 'e', 'q'])
expected_path = get_expected_path(filename)
assert differ([expected_path, actual_path] + diffmode)
def test_remove_overlap_otf():
actual_path = get_temp_file_path()
copy2(get_input_path('font.otf'), actual_path)
runner(CMD + ['-f', actual_path, '-o', 'e', 'q'])
actual_ttx = generate_ttx_dump(actual_path, ['CFF '])
expected_ttx = get_expected_path('font.ttx')
assert differ([expected_ttx, actual_ttx, '-s', '<ttFont sfntVersion'])
def test_bug790():
"""
Test case where the result of overlap removal resulted in coincident points
at contour start. Previously caused a crash when attempting to set start
point on the second point.
"""
ufoname = 'bug790.ufo'
actual_path = get_temp_dir_path(ufoname)
copytree(get_input_path(ufoname), actual_path)
runner(CMD + ['-f', actual_path, '-o', 'e'])
expected_path = get_expected_path(ufoname)
assert differ([expected_path, actual_path])
@pytest.mark.parametrize('filename, diffmode', [
('cidfont.subset', ['-m', 'bin'])
])
def test_cidkeyed_remove_overlap(filename, diffmode):
actual_path = get_temp_file_path()
copy2(get_input_path(filename), actual_path)
runner(CMD + ['-f', actual_path, '-o', 'e', 'q', '=no-overlap-checks'])
expected_path = get_expected_path('cidfont.subset.checked')
assert differ([expected_path, actual_path] + diffmode)
@pytest.mark.parametrize('input_font, expected_font', [
('ufo3.ufo', 'ufo3-proc-layer.ufo'),
('font.pfa', 'font.pfa'),
])
def test_output_file_option(input_font, expected_font):
"""
Test the '-o' (output file) option.
"""
in_path = get_input_path(input_font)
out_path = os.path.join(get_temp_dir_path(), input_font)
expected_path = get_expected_path(expected_font)
runner(CMD + ['-f', in_path, '-o', 'e', 'o', '_' + out_path])
assert get_font_format(out_path) == get_font_format(in_path)
assert differ([expected_path, out_path])
@pytest.mark.parametrize('input_font, expected_font', [
('contour-restore.ufo', 'contour-restore-ignored.ufo'),
])
def test_ignore_contour_order(input_font, expected_font):
"""
Test the '--ignore-contour-order' option.
"""
in_path = get_input_path(input_font)
out_path = os.path.join(get_temp_dir_path(), input_font)
expected_path = get_expected_path(expected_font)
runner(CMD + ['-f', in_path, '-o', '=ignore-contour-order', '=all', 'e',
'q', 'o', '_' + out_path])
assert get_font_format(out_path) == get_font_format(in_path)
assert differ([expected_path, out_path, '-r', r'^\s*<point'])
@pytest.mark.parametrize('input_font, expected_font', [
('contour-restore.ufo', 'restore_contour_order_warned.ufo'),
])
def METHOD_NAME(input_font, expected_font):
"""
Test the warning message outputted with
unsuccessful restore_contour_order.
"""
in_path = get_input_path(input_font)
out_path = os.path.join(get_temp_dir_path(), input_font)
expected_path = get_expected_path(expected_font)
stderr_path = runner(CMD + ['-s', '-f', in_path, '-o', "e", '=all',
'o', '_' + out_path])
assert get_font_format(out_path) == get_font_format(in_path)
assert differ([expected_path, out_path, '-r', r'^\s*<point'])
with open(stderr_path, 'rb') as f:
output = f.read()
assert (b'Warning: duplicated start point on contour 3 at 616, 597 of '
b'glyph cid51107.') in output
def test_regression_bug1315():
"""
Test for uneccessary duplicated start point error messages.
"""
in_path = get_input_path('bug1315.ufo')
out_path = os.path.join(get_temp_dir_path(), 'bug1315.ufo')
stderr_path = runner(CMD + ['-s', '-f', in_path, '-o', "e", '=all',
'o', '_' + out_path])
with open(stderr_path, 'rb') as f:
output = f.read()
print(output)
assert (b'Warning: duplicated start point on contour ') not in output |
6,633 | prepare request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.ContainerService/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2021_09_01.ContainerServiceClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.OperationValue"]:
"""Gets a list of operations.
Gets a list of operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationValue or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2021_09_01.models.OperationValue]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2021-09-01"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = METHOD_NAME(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.ContainerService/operations"} |
6,634 | test function no runtime | from unittest import mock
from moto.core import DEFAULT_ACCOUNT_ID
from prowler.providers.aws.services.awslambda.awslambda_service import Function
AWS_REGION = "us-east-1"
class Test_awslambda_function_using_supported_runtimes:
def test_no_functions(self):
lambda_client = mock.MagicMock
lambda_client.functions = {}
with mock.patch(
"prowler.providers.aws.services.awslambda.awslambda_service.Lambda",
new=lambda_client,
):
# Test Check
from prowler.providers.aws.services.awslambda.awslambda_function_using_supported_runtimes.awslambda_function_using_supported_runtimes import (
awslambda_function_using_supported_runtimes,
)
check = awslambda_function_using_supported_runtimes()
result = check.execute()
assert len(result) == 0
def test_function_obsolete_runtime(self):
lambda_client = mock.MagicMock
function_name = "test-lambda"
function_runtime = "nodejs4.3"
function_arn = (
f"arn:aws:lambda:{AWS_REGION}:{DEFAULT_ACCOUNT_ID}:function/{function_name}"
)
lambda_client.functions = {
"function_name": Function(
name=function_name,
arn=function_arn,
region=AWS_REGION,
runtime=function_runtime,
)
}
# Mock config
lambda_client.audit_config = {
"obsolete_lambda_runtimes": [
"python3.6",
"python2.7",
"nodejs4.3",
"nodejs4.3-edge",
"nodejs6.10",
"nodejs",
"nodejs8.10",
"nodejs10.x",
"dotnetcore1.0",
"dotnetcore2.0",
"dotnetcore2.1",
"ruby2.5",
]
}
with mock.patch(
"prowler.providers.aws.services.awslambda.awslambda_service.Lambda",
new=lambda_client,
):
# Test Check
from prowler.providers.aws.services.awslambda.awslambda_function_using_supported_runtimes.awslambda_function_using_supported_runtimes import (
awslambda_function_using_supported_runtimes,
)
check = awslambda_function_using_supported_runtimes()
result = check.execute()
assert len(result) == 1
assert result[0].region == AWS_REGION
assert result[0].resource_id == function_name
assert result[0].resource_arn == function_arn
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"Lambda function {function_name} is using {function_runtime} which is obsolete."
)
assert result[0].resource_tags == []
def test_function_supported_runtime(self):
lambda_client = mock.MagicMock
function_name = "test-lambda"
function_runtime = "python3.9"
function_arn = (
f"arn:aws:lambda:{AWS_REGION}:{DEFAULT_ACCOUNT_ID}:function/{function_name}"
)
lambda_client.functions = {
"function_name": Function(
name=function_name,
arn=function_arn,
region=AWS_REGION,
runtime=function_runtime,
)
}
# Mock config
lambda_client.audit_config = {
"obsolete_lambda_runtimes": [
"python3.6",
"python2.7",
"nodejs4.3",
"nodejs4.3-edge",
"nodejs6.10",
"nodejs",
"nodejs8.10",
"nodejs10.x",
"dotnetcore1.0",
"dotnetcore2.0",
"dotnetcore2.1",
"ruby2.5",
]
}
with mock.patch(
"prowler.providers.aws.services.awslambda.awslambda_service.Lambda",
new=lambda_client,
):
# Test Check
from prowler.providers.aws.services.awslambda.awslambda_function_using_supported_runtimes.awslambda_function_using_supported_runtimes import (
awslambda_function_using_supported_runtimes,
)
check = awslambda_function_using_supported_runtimes()
result = check.execute()
assert len(result) == 1
assert result[0].region == AWS_REGION
assert result[0].resource_id == function_name
assert result[0].resource_arn == function_arn
assert result[0].status == "PASS"
assert (
result[0].status_extended
== f"Lambda function {function_name} is using {function_runtime} which is supported."
)
assert result[0].resource_tags == []
def METHOD_NAME(self):
lambda_client = mock.MagicMock
function_name = "test-lambda"
function_arn = (
f"arn:aws:lambda:{AWS_REGION}:{DEFAULT_ACCOUNT_ID}:function/{function_name}"
)
lambda_client.functions = {
"function_name": Function(
name=function_name, arn=function_arn, region=AWS_REGION
)
}
# Mock config
lambda_client.audit_config = {
"obsolete_lambda_runtimes": [
"python3.6",
"python2.7",
"nodejs4.3",
"nodejs4.3-edge",
"nodejs6.10",
"nodejs",
"nodejs8.10",
"nodejs10.x",
"dotnetcore1.0",
"dotnetcore2.0",
"dotnetcore2.1",
"ruby2.5",
]
}
with mock.patch(
"prowler.providers.aws.services.awslambda.awslambda_service.Lambda",
new=lambda_client,
):
# Test Check
from prowler.providers.aws.services.awslambda.awslambda_function_using_supported_runtimes.awslambda_function_using_supported_runtimes import (
awslambda_function_using_supported_runtimes,
)
check = awslambda_function_using_supported_runtimes()
result = check.execute()
assert len(result) == 0 |
6,635 | exponent | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import AbstractSet, Any, Dict, Union
import numpy as np
import sympy
import cirq
from cirq import value, _compat
from cirq.ops import raw_types
@value.value_equality
class QuantumFourierTransformGate(raw_types.Gate):
r"""Switches from the computational basis to the frequency basis.
This gate has the unitary
$$
\frac{1}{2^{n/2}}\sum_{x,y=0}^{2^n-1} \omega^{xy} |x\rangle\langle y|
$$
where
$$
\omega = e^{\frac{2\pi i}{2^n}}
$$
"""
def __init__(self, num_qubits: int, *, without_reverse: bool = False):
"""Inits QuantumFourierTransformGate.
Args:
num_qubits: The number of qubits the gate applies to.
without_reverse: Whether or not to include the swaps at the end
of the circuit decomposition that reverse the order of the
qubits. These are technically necessary in order to perform the
correct effect, but can almost always be optimized away by just
performing later operations on different qubits.
"""
self._num_qubits = num_qubits
self._without_reverse = without_reverse
def _json_dict_(self) -> Dict[str, Any]:
return {'num_qubits': self._num_qubits, 'without_reverse': self._without_reverse}
def _value_equality_values_(self):
return self._num_qubits, self._without_reverse
def num_qubits(self) -> int:
return self._num_qubits
def _decompose_(self, qubits):
if len(qubits) == 0:
return
yield cirq.H(qubits[0])
for i in range(1, len(qubits)):
yield PhaseGradientGate(num_qubits=i, METHOD_NAME=0.5).on(*qubits[:i][::-1]).controlled_by(
qubits[i]
)
yield cirq.H(qubits[i])
if not self._without_reverse:
for i in range(len(qubits) // 2):
yield cirq.SWAP(qubits[i], qubits[-i - 1])
def _has_unitary_(self):
return True
def __str__(self) -> str:
return 'qft[norev]' if self._without_reverse else 'qft'
def __repr__(self) -> str:
return (
'cirq.QuantumFourierTransformGate('
f'num_qubits={self._num_qubits!r}, '
f'without_reverse={self._without_reverse!r})'
)
def _circuit_diagram_info_(
self, args: 'cirq.CircuitDiagramInfoArgs'
) -> 'cirq.CircuitDiagramInfo':
return cirq.CircuitDiagramInfo(
wire_symbols=(str(self),) + tuple(f'#{k+1}' for k in range(1, self._num_qubits)),
exponent_qubit_index=0,
)
@value.value_equality
class PhaseGradientGate(raw_types.Gate):
r"""Phases all computational basis states proportional to the integer value of the state.
The gate `cirq.PhaseGradientGate(n, t)` has the unitary
$$
\sum_{x=0}^{2^n-1} \omega^x |x\rangle \langle x|
$$
where
$$
\omega=e^{2 \pi i/2^n}
$$
This gate makes up a portion of the quantum fourier transform.
"""
def __init__(self, *, num_qubits: int, METHOD_NAME: Union[float, sympy.Basic]):
self._num_qubits = num_qubits
self._exponent = METHOD_NAME
@property
def METHOD_NAME(self) -> Union[float, sympy.Basic]:
return self._exponent
def _json_dict_(self) -> Dict[str, Any]:
return {'num_qubits': self._num_qubits, 'exponent': self.METHOD_NAME}
def _value_equality_values_(self):
return self._num_qubits, self.METHOD_NAME
def num_qubits(self) -> int:
return self._num_qubits
def _decompose_(self, qubits):
for i, q in enumerate(qubits):
yield cirq.Z(q) ** (self.METHOD_NAME / 2**i)
def _apply_unitary_(self, args: 'cirq.ApplyUnitaryArgs'):
if isinstance(self.METHOD_NAME, sympy.Basic):
return NotImplemented
n = int(np.prod([args.target_tensor.shape[k] for k in args.axes], dtype=np.int64))
for i in range(n):
p = 1j ** (4 * i / n * self.METHOD_NAME)
args.target_tensor[args.subspace_index(big_endian_bits_int=i)] *= p
return args.target_tensor
def __pow__(self, power):
new_exponent = cirq.mul(self.METHOD_NAME, power, NotImplemented)
if new_exponent is NotImplemented: # pragma: no cover
return NotImplemented
return PhaseGradientGate(num_qubits=self._num_qubits, METHOD_NAME=new_exponent)
def _unitary_(self):
if isinstance(self.METHOD_NAME, sympy.Basic):
return NotImplemented
size = 1 << self._num_qubits
return np.diag([1j ** (4 * i / size * self.METHOD_NAME) for i in range(size)])
def _has_unitary_(self) -> bool:
return not cirq.is_parameterized(self)
def _is_parameterized_(self) -> bool:
return cirq.is_parameterized(self.METHOD_NAME)
def _parameter_names_(self) -> AbstractSet[str]:
return cirq.parameter_names(self.METHOD_NAME)
def _resolve_parameters_(
self, resolver: 'cirq.ParamResolver', recursive: bool
) -> 'PhaseGradientGate':
new_exponent = cirq.resolve_parameters(self.METHOD_NAME, resolver, recursive)
if new_exponent is self.METHOD_NAME:
return self
return PhaseGradientGate(num_qubits=self._num_qubits, METHOD_NAME=new_exponent)
def __str__(self) -> str:
return f'Grad[{self._num_qubits}]' + (f'^{self.METHOD_NAME}' if self.METHOD_NAME != 1 else '')
def __repr__(self) -> str:
return (
'cirq.PhaseGradientGate('
f'num_qubits={self._num_qubits!r}, '
f'exponent={_compat.proper_repr(self.METHOD_NAME)})'
)
def _circuit_diagram_info_(
self, args: 'cirq.CircuitDiagramInfoArgs'
) -> 'cirq.CircuitDiagramInfo':
return cirq.CircuitDiagramInfo(
wire_symbols=('Grad',) + tuple(f'#{k+1}' for k in range(1, self._num_qubits)),
METHOD_NAME=self.METHOD_NAME,
exponent_qubit_index=0,
)
def qft(
*qubits: 'cirq.Qid', without_reverse: bool = False, inverse: bool = False
) -> 'cirq.Operation':
"""The quantum Fourier transform.
Transforms a qubit register from the computational basis to the frequency
basis.
The inverse quantum Fourier transform is `cirq.qft(*qubits)**-1` or
equivalently `cirq.inverse(cirq.qft(*qubits))`.
Args:
*qubits: The qubits to apply the qft to.
without_reverse: When set, swap gates at the end of the qft are omitted.
This reverses the qubit order relative to the standard qft effect,
but makes the gate cheaper to apply.
inverse: If set, the inverse qft is performed instead of the qft.
Equivalent to calling `cirq.inverse` on the result, or raising it
to the -1.
Returns:
A `cirq.Operation` applying the qft to the given qubits.
"""
result = QuantumFourierTransformGate(len(qubits), without_reverse=without_reverse).on(*qubits)
if inverse:
result = cirq.inverse(result)
return result |
6,636 | get json from cloud | import datetime
import typing
import weakref
from typing import Dict, Iterator, List, Optional, Sequence, Union
from robocorp_code.deps._deps_protocols import (
PyPiInfoTypedDict,
ReleaseData,
Versions,
VersionStr,
)
from robocorp_ls_core.robotframework_log import get_logger
log = get_logger(__name__)
# Interesting:
# https://github.com/python-poetry/poetry/blob/master/src/poetry/repositories/pypi_repository.py
class PackageData:
def __init__(self, package_name: str, info: PyPiInfoTypedDict) -> None:
self.package_name = package_name
self._info: PyPiInfoTypedDict = info
self._releases: Dict[str, ReleaseData] = {}
def add_release(self, version_str: VersionStr, release_info: List[dict]) -> None:
"""
Args:
version_str: The version we have info on.
release_info: For each release we may have a list of files available.
"""
from robocorp_code.deps.pip_impl import pip_packaging_version
version = pip_packaging_version.parse(version_str)
upload_time: Optional[str] = None
for dct in release_info:
upload_time = dct.get("upload_time")
if upload_time:
break
self._releases[version_str] = ReleaseData(version, version_str, upload_time)
@property
def latest_version(self) -> VersionStr:
return self._info["version"]
def get_release_data(self, version: VersionStr) -> Optional[ReleaseData]:
if not self._releases:
return None
try:
return self._releases[version]
except KeyError:
return None
def get_last_release_data(self) -> Optional[ReleaseData]:
"""
Provides the last release data (if there's any release).
"""
return self.get_release_data(self.latest_version)
def iter_versions_released_after(
self, after_datetime: Optional[datetime.datetime]
) -> Iterator[ReleaseData]:
"""
Args:
after_datetime: if none all releases (except pre-releases) will
be provided.
"""
last_release_data = self.get_last_release_data()
latest_upload_datetime: Optional[datetime.datetime] = None
if last_release_data and last_release_data.upload_time:
latest_upload_datetime = datetime.datetime.strptime(
last_release_data.upload_time, "%Y-%m-%dT%H:%M:%S"
)
for release_data in self._releases.values():
if release_data.upload_time:
upload_datetime = datetime.datetime.strptime(
release_data.upload_time, "%Y-%m-%dT%H:%M:%S"
)
if latest_upload_datetime:
# Hide pre-releases.
if upload_datetime > latest_upload_datetime:
continue
if after_datetime is None or upload_datetime >= after_datetime:
yield release_data
def iter_versions_newer_than(self, version: Versions) -> Iterator[ReleaseData]:
for release_data in self.iter_versions_released_after(None):
if release_data.version > version:
yield release_data
@property
def info(self):
return self._info
class PyPiCloud:
def __init__(
self, get_base_urls_weak_method: Optional[weakref.WeakMethod] = None
) -> None:
self._cached_package_data: Dict[str, PackageData] = {}
self._cached_cloud: Dict[str, PackageData] = {}
self._last_base_urls: Sequence[str] = ()
if get_base_urls_weak_method is None:
# use pypi.org
def get_weak():
def get_pypi_url():
return ("https://pypi.org",)
return get_pypi_url
self.get_base_urls_weak_method = get_weak
else:
self.get_base_urls_weak_method = get_base_urls_weak_method
def METHOD_NAME(self, url: str) -> Optional[dict]:
try:
return typing.cast(dict, self._cached_cloud[url])
except KeyError:
pass
import requests
try:
response = requests.get(url)
if response.status_code == 200:
self._cached_cloud[url] = response.json()
else:
log.info(
f"Unable to get url (as json): {url}. Status code: {response.status_code}"
)
return None
except Exception as e:
log.info(f"Unable to get url (as json): {url}. Error: {e}")
return None
return typing.cast(dict, self._cached_cloud[url])
def get_package_data(self, package_name: str) -> Optional[PackageData]:
get_base_urls = self.get_base_urls_weak_method()
if get_base_urls is None:
return None
base_urls = get_base_urls()
if base_urls != self._last_base_urls:
# We need to clear packages if urls changed.
self._last_base_urls = base_urls
self._cached_package_data.clear()
self._cached_cloud.clear()
try:
return self._cached_package_data[package_name]
except KeyError:
pass
for base_url in base_urls:
if base_url.endswith("/"):
base_url = base_url[:-1]
data = self.METHOD_NAME(f"{base_url}/pypi/{package_name}/json")
if not data:
continue # go to the next url
try:
releases = data["releases"]
except KeyError:
continue # go to the next url
try:
info = data["info"]
except KeyError:
continue # go to the next url
package_data = PackageData(package_name, info)
if releases and isinstance(releases, dict):
for release_number, release_info in releases.items():
package_data.add_release(release_number, release_info)
self._cached_package_data[package_name] = package_data
return self._cached_package_data[package_name]
# If there was no match, return.
return None
def get_versions_newer_than(
self, package_name: str, version: Union[Versions, VersionStr]
) -> List[VersionStr]:
"""
Args:
package_name: The name of the package
version: The minimum version (versions returned must be > than this one).
Returns:
A sorted list containing the versions > than the one passed (the last
entry is the latest version).
"""
if isinstance(version, VersionStr):
from robocorp_code.deps.pip_impl import pip_packaging_version
version = pip_packaging_version.parse(version)
package_data = self.get_package_data(package_name)
if package_data is None:
return []
return [
x.version_str
for x in sorted(package_data.iter_versions_newer_than(version))
] |
6,637 | send weekly email | from datetime import timedelta
from zoneinfo import ZoneInfo
from django.conf import settings
from django.template.loader import render_to_string
from django.utils import timezone
from premailer import transform
from structlog import get_logger
from lego import celery_app
from lego.apps.events.constants import EVENT_TYPE_TRANSLATIONS
from lego.apps.events.models import Event
from lego.apps.joblistings.constants import JOB_TYPE_TRANSLATIONS
from lego.apps.joblistings.models import Joblisting
from lego.apps.notifications.constants import EMAIL, WEEKLY_MAIL
from lego.apps.notifications.models import NotificationSetting
from lego.apps.permissions.utils import get_permission_handler
from lego.apps.restricted.message_processor import MessageProcessor
from lego.apps.tags.models import Tag
from lego.apps.users.models import AbakusGroup
from lego.utils.tasks import AbakusTask
log = get_logger()
def add_source_to_url(url):
return f"{url}?utm_source=WeeklyMail&utm_campaign=Email"
def create_weekly_mail(user):
week_number = timezone.now().isocalendar().week
three_days_ago_timestamp = timezone.now() - timedelta(days=3)
last_sunday_timestamp = timezone.now() - timedelta(days=7)
weekly_tag = Tag.objects.filter(tag="weekly").first()
# Check if weekly tag exists so it does not crash if some idiot deletes the weekly tag
todays_weekly = (
weekly_tag.article_set.filter(created_at__gt=three_days_ago_timestamp).first()
if weekly_tag
else None
)
events_next_week = Event.objects.filter(
pools__activation_date__gt=timezone.now(),
pools__activation_date__lt=timezone.now() + timedelta(days=7),
).distinct()
permission_handler = get_permission_handler(events_next_week.model)
filtered_events = permission_handler.filter_queryset(user, events_next_week)
filtered_events = filter(
lambda event: event.get_possible_pools(user, True) or event.is_admitted(user),
filtered_events,
)
joblistings_last_week = Joblisting.objects.filter(
created_at__gt=last_sunday_timestamp, visible_from__lt=timezone.now()
)
joblistings = []
for joblisting in joblistings_last_week:
joblistings.append(
{
"id": joblisting.id,
"company_name": joblisting.company.name,
"type": JOB_TYPE_TRANSLATIONS[joblisting.job_type],
"title": joblisting.title,
}
)
events = []
for event in filtered_events:
pools = []
for pool in event.pools.all():
pools.append(
{
"name": pool.name,
"activation_date": pool.activation_date.astimezone(
ZoneInfo("Europe/Oslo")
).strftime("%d.%m kl. %H:%M"),
}
)
events.append(
{
"title": event.title,
"id": event.id,
"pools": pools,
"start_time": event.start_time.astimezone(
ZoneInfo("Europe/Oslo")
).strftime("%d.%m kl %H:%M"),
"url": add_source_to_url(event.get_absolute_url()),
"type": EVENT_TYPE_TRANSLATIONS[event.event_type],
}
)
html_body = render_to_string(
"email/email/weekly_mail.html",
{
"week_number": week_number,
"events": events,
"todays_weekly": ""
if todays_weekly is None
else add_source_to_url(todays_weekly.get_absolute_url()),
"joblistings": joblistings,
"frontend_url": settings.FRONTEND_URL,
},
)
if events or joblistings or todays_weekly:
return html_body
return None
@celery_app.task(serializer="json", bind=True, base=AbakusTask)
def METHOD_NAME(self, logger_context=None):
self.setup_logger(logger_context)
week_number = timezone.now().isocalendar().week
# Send to all active students
all_users = set(AbakusGroup.objects.get(name="Students").restricted_lookup()[0])
recipients = []
for user in all_users:
if not user.email_lists_enabled:
# Don't send emails to users that don't want mail.
continue
if EMAIL not in NotificationSetting.active_channels(user, WEEKLY_MAIL):
continue
recipients.append(user)
datatuple = (
(
f"Ukesmail uke {week_number}",
transform(html) if (html := create_weekly_mail(user)) is not None else None,
settings.DEFAULT_FROM_EMAIL,
[user.email],
)
for user in recipients
)
datatuple = tuple(tuppel for tuppel in datatuple if tuppel[1] is not None)
if datatuple:
MessageProcessor.send_mass_mail_html(datatuple) |
6,638 | tear down | #!/usr/bin/env python
# Copyright 2020, New York University and the TUF contributors
# SPDX-License-Identifier: MIT OR Apache-2.0
""" Unit tests for 'examples' scripts.
"""
import glob
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
from typing import ClassVar, List
from tests import utils
class TestRepoExamples(unittest.TestCase):
"""Unit test class for 'manual_repo' scripts.
Provides a '_run_example_script' method to run (exec) a script located in
the 'manual_repo' directory.
"""
repo_examples_dir: ClassVar[Path]
@classmethod
def setUpClass(cls) -> None:
"""Locate the example dir."""
base = Path(__file__).resolve().parents[1]
cls.repo_examples_dir = base / "examples" / "manual_repo"
def setUp(self) -> None:
"""Create and change into test dir.
NOTE: Test scripts are expected to create dirs/files in new CWD."""
self.original_cwd = os.getcwd()
self.base_test_dir = os.path.realpath(tempfile.mkdtemp())
os.chdir(self.base_test_dir)
def METHOD_NAME(self) -> None:
"""Change back to original dir and remove test dir, which may contain
dirs/files the test created at test-time CWD."""
os.chdir(self.original_cwd)
shutil.rmtree(self.base_test_dir)
def _run_script_and_assert_files(
self, script_name: str, filenames_created: List[str]
) -> None:
"""Run script in exmple dir and assert that it created the
files corresponding to the passed filenames inside a 'tmp*' test dir at
CWD."""
script_path = str(self.repo_examples_dir / script_name)
with open(script_path, "rb") as f:
# pylint: disable=exec-used
exec(
compile(f.read(), script_path, "exec"),
{"__file__": script_path},
)
test_dirs = glob.glob("tmp*")
self.assertTrue(
len(test_dirs) == 1, f"expected 1 'tmp*' test dir, got {test_dirs}"
)
test_dir = test_dirs.pop()
for name in filenames_created:
metadata_path = Path(test_dir) / f"{name}"
self.assertTrue(
metadata_path.exists(), f"missing '{metadata_path}' file"
)
def test_basic_repo(self) -> None:
"""Run 'basic_repo.py' and assert creation of metadata files."""
self._run_script_and_assert_files(
"basic_repo.py",
[
"1.python-scripts.json",
"1.root.json",
"1.snapshot.json",
"1.targets.json",
"2.root.json",
"2.snapshot.json",
"2.targets.json",
"timestamp.json",
],
)
def test_hashed_bin_delegation(self) -> None:
"""Run 'hashed_bin_delegation.py' and assert creation of metadata files."""
self._run_script_and_assert_files(
"hashed_bin_delegation.py",
[
"1.bins.json",
"1.00-07.json",
"1.08-0f.json",
"1.10-17.json",
"1.18-1f.json",
"1.20-27.json",
"1.28-2f.json",
"1.30-37.json",
"1.38-3f.json",
"1.40-47.json",
"1.48-4f.json",
"1.50-57.json",
"1.58-5f.json",
"1.60-67.json",
"1.68-6f.json",
"1.70-77.json",
"1.78-7f.json",
"1.80-87.json",
"1.88-8f.json",
"1.90-97.json",
"1.98-9f.json",
"1.a0-a7.json",
"1.a8-af.json",
"1.b0-b7.json",
"1.b8-bf.json",
"1.c0-c7.json",
"1.c8-cf.json",
"1.d0-d7.json",
"1.d8-df.json",
"1.e0-e7.json",
"1.e8-ef.json",
"1.f0-f7.json",
"1.f8-ff.json",
],
)
def test_succinct_hash_bin_delegation(self) -> None:
self._run_script_and_assert_files(
"succinct_hash_bin_delegations.py",
[
"1.targets.json",
"1.delegated_bin-00.json",
"1.delegated_bin-01.json",
"1.delegated_bin-02.json",
"1.delegated_bin-03.json",
"1.delegated_bin-04.json",
"1.delegated_bin-05.json",
"1.delegated_bin-06.json",
"1.delegated_bin-07.json",
"1.delegated_bin-08.json",
"1.delegated_bin-09.json",
"1.delegated_bin-0a.json",
"1.delegated_bin-0b.json",
"1.delegated_bin-0c.json",
"1.delegated_bin-0d.json",
"1.delegated_bin-0e.json",
"1.delegated_bin-0f.json",
"1.delegated_bin-10.json",
"1.delegated_bin-11.json",
"1.delegated_bin-12.json",
"1.delegated_bin-13.json",
"1.delegated_bin-14.json",
"1.delegated_bin-15.json",
"1.delegated_bin-16.json",
"1.delegated_bin-17.json",
"1.delegated_bin-18.json",
"1.delegated_bin-19.json",
"1.delegated_bin-1a.json",
"1.delegated_bin-1b.json",
"1.delegated_bin-1c.json",
"1.delegated_bin-1d.json",
"1.delegated_bin-1e.json",
"1.delegated_bin-1f.json",
],
)
if __name__ == "__main__":
utils.configure_test_logging(sys.argv)
unittest.main() |
6,639 | draw | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import wx
import matplotlib
from matplotlib import cbook
from . import wx_compat as wxc
from .backend_agg import FigureCanvasAgg
from .backend_wx import (
_BackendWx, _FigureCanvasWxBase, FigureFrameWx,
NavigationToolbar2Wx as NavigationToolbar2WxAgg)
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
class FigureCanvasWxAgg(FigureCanvasAgg, _FigureCanvasWxBase):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def METHOD_NAME(self, drawDC=None):
"""
Render the figure using agg.
"""
FigureCanvasAgg.METHOD_NAME(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC, origin='WXAgg')
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
@cbook.deprecated("2.2", alternative="NavigationToolbar2WxAgg")
class Toolbar(NavigationToolbar2WxAgg):
pass
# agg/wxPython image conversion functions (wxPython >= 2.8)
def _convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wxc.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wxc.BitmapFromBuffer(int(agg.width), int(agg.height),
agg.buffer_rgba())
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.bounds
r = l + width
t = b + height
srcBmp = wxc.BitmapFromBuffer(int(agg.width), int(agg.height),
agg.buffer_rgba())
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wxc.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
@_BackendWx.export
class _BackendWxAgg(_BackendWx):
FigureCanvas = FigureCanvasWxAgg
_frame_class = FigureFrameWxAgg |
6,640 | test pseudoknot orders | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
import pytest
import json
import numpy as np
import pickle as pkl
import biotite.structure as struc
import biotite.structure.io as strucio
from os.path import join
from ..util import data_dir
@pytest.fixture
def nuc_sample_array():
"""
Sample structure for pseudoknot detection.
"""
return strucio.load_structure(join(data_dir("structure"), "4p5j.cif"))
def test_pseudoknots(nuc_sample_array):
"""
Check the output of :func:`pseudoknots()`.
"""
# Known base pairs with pseudoknot-order = 1:
pseudoknot_order_one = [{2, 74}, {58, 72}, {59, 71}, {60, 70}]
# Known base pairs that can either be of order one or two
pseudoknot_order_one_or_two = [{9, 48}, {10, 49}]
order_one_count = (
len(pseudoknot_order_one) + (len(pseudoknot_order_one_or_two)/2)
)
order_two_count = len(pseudoknot_order_one_or_two)/2
base_pairs = struc.base_pairs(nuc_sample_array)
pseudoknot_order = struc.pseudoknots(base_pairs)
# Sample structure should have two optimal solutions with default
# scoring parameters
assert len(pseudoknot_order) == 2
for optimal_solution in pseudoknot_order:
# Assert that the right number of pseudoknots is present for
# each order
assert len(base_pairs) == len(optimal_solution)
assert np.count_nonzero(optimal_solution == 1) == order_one_count
assert np.count_nonzero(optimal_solution == 2) == order_two_count
assert np.max(optimal_solution) == 2
# Assert that the each base pair has the right pseudoknot order
for base_pair, order in zip(
nuc_sample_array[base_pairs].res_id, optimal_solution
):
if(order == 1):
assert (
set(base_pair) in pseudoknot_order_one or
set(base_pair) in pseudoknot_order_one_or_two
)
elif (order == 2):
assert (
set(base_pair) in pseudoknot_order_one_or_two
)
def load_test(name):
"""
Load sample base pair arrays and reference solutions from file.
"""
# Base pairs as numpy array (input for `pseudoknots()`)
with open(
join(data_dir("structure"), "pseudoknots", f"{name}_knotted.json"),
"r"
) as f:
basepairs = np.array(json.load(f))
# List of solutions (set of tuples)
with open(
join(data_dir("structure"), "pseudoknots", f"{name}_unknotted.json"),
"rb"
) as f:
solutions = json.load(f)
for i, solution in enumerate(solutions):
solutions[i] = set([tuple(pair) for pair in solution])
return basepairs, solutions
@pytest.mark.parametrize("name", [f"test{x}" for x in range(21)])
def test_pseudoknot_removal(name):
"""
Test the implementation of the dynamic programming algorithm
referenced in :func:`pseudoknots()` against the original
implementation.
The reference solutions were created with the following tool:
https://www.ibi.vu.nl/programs/k2nwww/
The original purpose was to remove pseudoknots. Thus, the reference
solutions contain unknotted basepairs.
"""
# Get base pairs and reference solutions
basepairs, reference_solutions = load_test(name)
# Calculate solutions from the base pairs
raw_solutions = struc.pseudoknots(basepairs, max_pseudoknot_order=0)
# The number of solutions calculated
solutions_count = 0
# Verify that each solution is in the reference solutions
for raw_solution in raw_solutions:
solution = set()
for basepair, order in zip(basepairs, raw_solution):
if order == -1:
continue
solution.add(tuple(sorted(basepair)))
solutions_count += 1
assert solution in reference_solutions
# Verify that the number of solutions matches the reference
assert len(reference_solutions) == solutions_count
@pytest.mark.parametrize("seed", range(10))
def METHOD_NAME(seed):
"""
Generate a random set of basepairs. Assert that increasing
pseudoknot orders contain less or equal base pairs. Furthermore,
assert that each individual order only contains unknotted base
pairs.
"""
# Generate Random set of basepairs
np.random.seed(seed)
bases = range(100)
basepairs = np.random.choice(bases, size=(20, 2), replace=False)
# Get pseudoknot order for each basepair
solutions = struc.pseudoknots(basepairs)
# Iterate through the solutions
for solution in solutions:
# Number of base pairs in the previous order
previous_order = -1
for order in range(np.max(solution)+1):
# Ensure that the base pairs of the same order are unknotted
assert (struc.pseudoknots(basepairs[solution == order]) == 0).all()
# Number of base pairs in the current order
this_order = len(solution[solution == order])
# Ensure that that higher orders contain less or equal base
# pairs than lower orders
if previous_order != -1:
assert this_order <= previous_order
previous_order = this_order
def test_empty_base_pairs():
"""
Assert than an empty array of base pairs generates an empty array of
pseudoknot orders.
"""
assert struc.pseudoknots([]).shape == (1,0 |
6,641 | set up | import ctk
import qt
import slicer
from slicer.ScriptedLoadableModule import *
#
# WebEngine
#
class WebEngine(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/main/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
parent.title = "WebEngine"
parent.categories = ["Testing.TestCases"]
parent.dependencies = []
parent.contributors = ["Steve Pieper (Isomics)"]
parent.helpText = """
Module to test WebEngine.
"""
parent.acknowledgementText = """
This file was originally developed by Steve Pieper and was partially funded by NSF grant 1759883
""" # replace with organization, grant and thanks.
#
# qWebEngineWidget
#
class WebEngineWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/main/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModuleWidget.__init__(self, parent)
self.webWidgets = [] # hold references so windows persist
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
# Collapsible button
sitesCollapsibleButton = ctk.ctkCollapsibleButton()
sitesCollapsibleButton.text = "Sample Sites"
self.layout.addWidget(sitesCollapsibleButton)
# Layout within the collapsible button
sitesFormLayout = qt.QFormLayout(sitesCollapsibleButton)
# site buttons
buttons = []
self.sites = [
{
"label": "Web Console", "url": "http://localhost:1337"
},
{
"label": "Crowds Cure Cancer", "url": "http://cancer.crowds-cure.org"
},
{
"label": "Slicer Home Page", "url": "https://slicer.org"
},
{
"label": "MorphoSource", "url": "https://www.morphosource.org"
},
{
"label": "Slicer SampleData", "url": "https://www.slicer.org/wiki/SampleData"
},
{
"label": "SlicerMorph", "url": "https://slicermorph.github.io"
},
]
for site in self.sites:
button = qt.QPushButton(site["label"])
button.toolTip = "Open %s" % site["url"]
sitesFormLayout.addWidget(button)
onClick = lambda click, site=site: self.onSiteButtonClicked(site)
button.connect('clicked(bool)', onClick)
buttons.append(button)
button = qt.QPushButton("Close All")
button.toolTip = "Close all the web views"
button.connect('clicked(bool)', self.onCloseAll)
self.layout.addWidget(button)
# Add vertical spacer
self.layout.addStretch(1)
def onSiteButtonClicked(self, site):
webWidget = slicer.qSlicerWebWidget()
slicerGeometry = slicer.util.mainWindow().geometry
webWidget.size = qt.QSize(1536, 1024)
webWidget.pos = qt.QPoint(slicerGeometry.x() + 256, slicerGeometry.y() + 128)
webWidget.url = site["url"]
webWidget.show()
self.webWidgets.append(webWidget)
def onCloseAll(self):
for widget in self.webWidgets:
del widget
self.webWidgets = []
class WebEngineTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/main/Base/Python/slicer/ScriptedLoadableModule.py
"""
def METHOD_NAME(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
self.gotResponse = False
self.gotCorrectResponse = False
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.METHOD_NAME()
self.test_WebEngine1()
def onEvalResult(self, js, result):
if js == "valueFromSlicer;":
self.delayDisplay("Got Slicer result back from JavaScript")
self.gotResponse = True
if result == "42":
self.gotCorrectResponse = True
self.delayDisplay("Got the expected result back from JavaScript")
else:
self.delayDisplay("Got a result back from JavaScript")
print((js, result))
def test_WebEngine1(self):
""" Testing WebEngine
"""
self.delayDisplay("Starting the test")
webWidget = slicer.qSlicerWebWidget()
webWidget.size = qt.QSize(1024, 512)
webWidget.webView().url = qt.QUrl("")
webWidget.show()
self.delayDisplay('Showing widget')
webWidget.evalJS("""
const paragraph = document.createElement('p');
paragraph.innerText = 'Hello from Slicer!';
document.body.appendChild(paragraph);
""")
self.delayDisplay('Slicer should be saying hello!')
#
# Test javascript evaluation + use of "evalResult()" signal
#
webWidget.connect("evalResult(QString,QString)", self.onEvalResult)
self.delayDisplay('Slicer setting a javascript value')
webWidget.evalJS("const valueFromSlicer = 42;")
webWidget.evalJS("valueFromSlicer;")
iteration = 0
while not self.gotResponse and iteration < 3:
# Specify an explicit delay to ensure async execution by the
# webengine has completed.
self.delayDisplay('Waiting for response...', msec=500)
iteration += 1
webWidget.disconnect("evalResult(QString,QString)", self.onEvalResult)
if not self.gotResponse:
raise RuntimeError("Never got response from evalJS")
if not self.gotCorrectResponse:
raise AssertionError("Did not get back expected result!")
#
# Test python evaluation from javascript
#
self.delayDisplay('Call a python method')
slicer.app.settings().setValue("WebEngine/AllowPythonExecution", ctk.ctkMessageBox.AcceptRole)
webWidget.evalJS(r"""
let pythonCode = "dialog = qt.QInputDialog(slicer.util.mainWindow())\n";
pythonCode += "dialog.setLabelText('hello')\n";
pythonCode += "dialog.open()\n";
pythonCode += "qt.QTimer.singleShot(1000, dialog.close)\n";
window.slicerPython.evalPython(pythonCode);
""")
self.delayDisplay('Test access to python via js', msec=500)
if hasattr(slicer.modules, 'slicerPythonValueFromJS'):
del slicer.modules.slicerPythonValueFromJS
webWidget.evalJS("""
window.slicerPython.evalPython("slicer.modules.slicerPythonValueFromJS = 42");
""")
iteration = 0
while iteration < 3 and not hasattr(slicer.modules, 'slicerPythonValueFromJS'):
# Specify an explicit delay to ensure async execution by the
# webengine has completed.
self.delayDisplay('Waiting for python value from JS...', msec=500)
iteration += 1
if iteration >= 3:
raise RuntimeError("Couldn't get python value back from JS")
self.delayDisplay('Value of %d received via javascipt' % slicer.modules.slicerPythonValueFromJS)
del slicer.modules.slicerPythonValueFromJS
self.delayDisplay('Test passed!') |
6,642 | cinemarv link generation | # -*- coding: UTF-8 -*-
# ----------------------
# Name: cinemarv_api - XPath and XSLT functions for the CinemaRV.com grabber
# Python Script
# Author: R.D. Vaughan
# Purpose: This python script is intended to perform a variety of utility functions
# for the conversion of data to the MNV standard RSS output format.
# See this link for the specifications:
# http://www.mythtv.org/wiki/MythNetvision_Grabber_Script_Format
#
# License:Creative Commons GNU GPL v2
# (http://creativecommons.org/licenses/GPL/2.0/)
#-------------------------------------
__title__ ="cinemarv_api - XPath and XSLT functions for the CinemaRV.com grabber"
__author__="R.D. Vaughan"
__purpose__='''
This python script is intended to perform a variety of utility functions
for the conversion of data to the MNV standard RSS output format.
See this link for the specifications:
http://www.mythtv.org/wiki/MythNetvision_Grabber_Script_Format
'''
__version__="v0.1.0"
# 0.1.0 Initial development
# Specify the class names that have XPath extention functions
__xpathClassList__ = ['xpathFunctions', ]
# Specify the XSLT extention class names. Each class is a stand lone extention function
#__xsltExtentionList__ = ['xsltExtExample', ]
__xsltExtentionList__ = []
import os, sys, re, time, datetime, shutil, urllib.request, urllib.parse, urllib.error, string
from copy import deepcopy
import io
class OutStreamEncoder(object):
"""Wraps a stream with an encoder"""
def __init__(self, outstream, encoding=None):
self.out = outstream
if not encoding:
self.encoding = sys.getfilesystemencoding()
else:
self.encoding = encoding
def write(self, obj):
"""Wraps the output stream, encoding Unicode strings with the specified encoding"""
if isinstance(obj, str):
obj = obj.encode(self.encoding)
try:
self.out.buffer.write(obj)
except OSError:
pass
def __getattr__(self, attr):
"""Delegate everything but write to the stream"""
return getattr(self.out, attr)
if isinstance(sys.stdout, io.TextIOWrapper):
sys.stdout = OutStreamEncoder(sys.stdout, 'utf8')
sys.stderr = OutStreamEncoder(sys.stderr, 'utf8')
try:
from io import StringIO
from lxml import etree
except Exception as e:
sys.stderr.write('\n! Error - Importing the "lxml" and "StringIO" python libraries failed on error(%s)\n' % e)
sys.exit(1)
# Check that the lxml library is current enough
# From the lxml documents it states: (http://codespeak.net/lxml/installation.html)
# "If you want to use XPath, do not use libxml2 2.6.27. We recommend libxml2 2.7.2 or later"
# Testing was performed with the Ubuntu 9.10 "python-lxml" version "2.1.5-1ubuntu2" repository package
version = ''
for digit in etree.LIBXML_VERSION:
version+=str(digit)+'.'
version = version[:-1]
if version < '2.7.2':
sys.stderr.write('''
! Error - The installed version of the "lxml" python library "libxml" version is too old.
At least "libxml" version 2.7.2 must be installed. Your version is (%s).
''' % version)
sys.exit(1)
class xpathFunctions(object):
"""Functions specific extending XPath
"""
def __init__(self):
self.functList = ['cinemarvLinkGeneration', 'cinemarvIsCustomHTML', 'cinemarvCheckIfDBItem', ]
self.TextTail = etree.XPath("string()")
self.persistence = {}
# end __init__()
######################################################################################################
#
# Start of XPath extension functions
#
######################################################################################################
def METHOD_NAME(self, context, *args):
'''Generate a link for the CinemaRV.com site. A read of the item's web page is required to
extract the flash video id.
Call example: 'mnvXpath:cinemarvLinkGeneration(string(link))'
return the url link
'''
webURL = args[0]
# If this is for the download then just return what was found for the "link" element
if 'cinemarvLinkGeneration' in self.persistence:
if self.persistence['cinemarvLinkGeneration'] is not None:
returnValue = self.persistence['cinemarvLinkGeneration']
self.persistence['cinemarvLinkGeneration'] = None
return returnValue
else:
self.persistence['cinemarvLinkGenerationVideoID'] = etree.XPath('//object[@id="flashObj"]//param[@name="flashVars"]/@value', namespaces=common.namespaces)
self.persistence['cinemarvLinkGenerationParser'] = etree.HTMLParser()
try:
webPageElement = etree.parse(webURL, self.persistence['cinemarvLinkGenerationParser'])
except Exception as errmsg:
sys.stderr.write('!Warning: The web page URL(%s) could not be read, error(%s)\n' % (webURL, errmsg))
return webURL
if webPageElement is None:
self.persistence['cinemarvLinkGeneration'] = webURL
return webURL
tmpVideoID = self.persistence['cinemarvLinkGenerationVideoID'](webPageElement)
if not len(tmpVideoID):
self.persistence['cinemarvLinkGeneration'] = webURL
return webURL
index = tmpVideoID[0].find('&')
if index == -1:
self.persistence['cinemarvLinkGeneration'] = webURL
return webURL
videocode = tmpVideoID[0][:index].replace('videoId=', '')
self.persistence['cinemarvLinkGeneration'] = common.linkWebPage('dummycontext', 'cinemarv')+videocode
return self.persistence['cinemarvLinkGeneration']
# end cinemarvLinkGeneration()
def cinemarvIsCustomHTML(self, context, *args):
'''Check if the link is for a custom HTML
Example call: mnvXpath:cinemarvIsCustomHTML(('dummy'))
return True if the link does not starts with "http://"
return False if the link starts with "http://"
'''
if self.persistence['cinemarvLinkGeneration'] is None:
return False
if self.persistence['cinemarvLinkGeneration'].startswith('http://'):
return False
else:
return True
# end cinemarvIsCustomHTML()
def cinemarvCheckIfDBItem(self, context, *arg):
'''Use a unique key value pairing to find out if the 'internetcontentarticles' table already
has a matching item. This is done to save accessing the Internet when not required.
Call example: 'mnvXpath:cinemarvCheckIfDBItem(.)'
return True if a match was found
return False if a match was not found
'''
return common.checkIfDBItem('dummy', {'feedtitle': 'Movie Trailers', 'title': arg[0].replace('Trailer', '').strip(), 'author': arg[1], 'description': arg[2]})
# end cinemarvCheckIfDBItem()
######################################################################################################
#
# End of XPath extension functions
#
######################################################################################################
######################################################################################################
#
# Start of XSLT extension functions
#
######################################################################################################
######################################################################################################
#
# End of XSLT extension functions
#
###################################################################################################### |
6,643 | internal init | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = ['DeviceGroupArgs', 'DeviceGroup']
@pulumi.input_type
class DeviceGroupArgs:
def __init__(__self__, *,
iot_defender_location: pulumi.Input[str],
device_group_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DeviceGroup resource.
:param pulumi.Input[str] iot_defender_location: Defender for IoT location
:param pulumi.Input[str] device_group_name: Device group name
"""
pulumi.set(__self__, "iot_defender_location", iot_defender_location)
if device_group_name is not None:
pulumi.set(__self__, "device_group_name", device_group_name)
@property
@pulumi.getter(name="iotDefenderLocation")
def iot_defender_location(self) -> pulumi.Input[str]:
"""
Defender for IoT location
"""
return pulumi.get(self, "iot_defender_location")
@iot_defender_location.setter
def iot_defender_location(self, value: pulumi.Input[str]):
pulumi.set(self, "iot_defender_location", value)
@property
@pulumi.getter(name="deviceGroupName")
def device_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Device group name
"""
return pulumi.get(self, "device_group_name")
@device_group_name.setter
def device_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "device_group_name", value)
class DeviceGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
device_group_name: Optional[pulumi.Input[str]] = None,
iot_defender_location: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Device group
Azure REST API version: 2021-02-01-preview. Prior API version in Azure Native 1.x: 2021-02-01-preview
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] device_group_name: Device group name
:param pulumi.Input[str] iot_defender_location: Defender for IoT location
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DeviceGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Device group
Azure REST API version: 2021-02-01-preview. Prior API version in Azure Native 1.x: 2021-02-01-preview
:param str resource_name: The name of the resource.
:param DeviceGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DeviceGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__.METHOD_NAME(resource_name, opts, **resource_args.__dict__)
else:
__self__.METHOD_NAME(resource_name, *args, **kwargs)
def METHOD_NAME(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
device_group_name: Optional[pulumi.Input[str]] = None,
iot_defender_location: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DeviceGroupArgs.__new__(DeviceGroupArgs)
__props__.__dict__["device_group_name"] = device_group_name
if iot_defender_location is None and not opts.urn:
raise TypeError("Missing required property 'iot_defender_location'")
__props__.__dict__["iot_defender_location"] = iot_defender_location
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:iotsecurity/v20210201preview:DeviceGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DeviceGroup, __self__).__init__(
'azure-native:iotsecurity:DeviceGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DeviceGroup':
"""
Get an existing DeviceGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DeviceGroupArgs.__new__(DeviceGroupArgs)
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return DeviceGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
|
6,644 | config options | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd, valid_min_cppstd
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, replace_in_file, rm, rmdir
from conan.tools.scm import Version
import os
required_conan_version = ">=1.54.0"
class OpenSubdivConan(ConanFile):
name = "opensubdiv"
license = "LicenseRef-LICENSE.txt"
homepage = "https://github.com/PixarAnimationStudios/OpenSubdiv"
url = "https://github.com/conan-io/conan-center-index"
description = "An Open-Source subdivision surface library"
topics = ("cgi", "vfx", "animation", "subdivision surface")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_tbb": [True, False],
"with_opengl": [True, False],
"with_omp": [True, False],
"with_cuda": [True, False],
"with_clew": [True, False],
"with_opencl": [True, False],
"with_dx": [True, False],
"with_metal": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_tbb": False,
"with_opengl": False,
"with_omp": False,
"with_cuda": False,
"with_clew": False,
"with_opencl": False,
"with_dx": False,
"with_metal": False,
}
short_paths = True
@property
def _min_cppstd(self):
if self.options.get_safe("with_metal"):
return "14"
return "11"
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "15",
"msvc": "191",
"gcc": "5",
"clang": "11",
"apple-clang": "11.0",
}
def export_sources(self):
export_conandata_patches(self)
def METHOD_NAME(self):
if self.settings.os == "Windows":
del self.options.fPIC
else:
del self.options.with_dx
if self.settings.os != "Macos":
del self.options.with_metal
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
if self.options.with_tbb:
self.requires("onetbb/2021.8.0")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, self._min_cppstd)
min_version = self._minimum_compilers_version.get(str(self.settings.compiler), False)
if min_version and Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support."
)
if self.options.shared and self.settings.os == "Windows":
raise ConanInvalidConfiguration(f"{self.ref} shared not supported on Windows")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
@property
def _osd_gpu_enabled(self):
return any(
[
self.options.with_opengl,
self.options.with_opencl,
self.options.with_cuda,
self.options.get_safe("with_dx"),
self.options.get_safe("with_metal"),
]
)
def generate(self):
tc = CMakeToolchain(self)
if not valid_min_cppstd(self, self._min_cppstd):
tc.variables["CMAKE_CXX_STANDARD"] = self._min_cppstd
tc.variables["NO_TBB"] = not self.options.with_tbb
tc.variables["NO_OPENGL"] = not self.options.with_opengl
tc.variables["BUILD_SHARED_LIBS"] = self.options.get_safe("shared")
tc.variables["NO_OMP"] = not self.options.with_omp
tc.variables["NO_CUDA"] = not self.options.with_cuda
tc.variables["NO_DX"] = not self.options.get_safe("with_dx")
tc.variables["NO_METAL"] = not self.options.get_safe("with_metal")
tc.variables["NO_CLEW"] = not self.options.with_clew
tc.variables["NO_OPENCL"] = not self.options.with_opencl
tc.variables["NO_PTEX"] = True # Note: PTEX is for examples only, but we skip them..
tc.variables["NO_DOC"] = True
tc.variables["NO_EXAMPLES"] = True
tc.variables["NO_TUTORIALS"] = True
tc.variables["NO_REGRESSION"] = True
tc.variables["NO_TESTS"] = True
tc.variables["NO_GLTESTS"] = True
tc.variables["NO_MACOS_FRAMEWORK"] = True
tc.generate()
def _patch_sources(self):
apply_conandata_patches(self)
if self.settings.os == "Macos" and not self._osd_gpu_enabled:
path = os.path.join(self.source_folder, "opensubdiv", "CMakeLists.txt")
replace_in_file(self, path, "$<TARGET_OBJECTS:osd_gpu_obj>", "")
# No warnings as errors
replace_in_file(self, os.path.join(self.source_folder, "CMakeLists.txt"), "/WX", "")
def build(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE.txt", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
if self.options.shared:
rm(self, "*.a", os.path.join(self.package_folder, "lib"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "OpenSubdiv")
target_suffix = "" if self.options.shared else "_static"
self.cpp_info.components["osdcpu"].set_property("cmake_target_name", f"OpenSubdiv::osdcpu{target_suffix}")
self.cpp_info.components["osdcpu"].libs = ["osdCPU"]
if self.options.with_tbb:
self.cpp_info.components["osdcpu"].requires = ["onetbb::onetbb"]
if self._osd_gpu_enabled:
self.cpp_info.components["osdgpu"].set_property("cmake_target_name", f"OpenSubdiv::osdgpu{target_suffix}")
self.cpp_info.components["osdgpu"].libs = ["osdGPU"]
dl_required = self.options.with_opengl or self.options.with_opencl
if self.settings.os in ["Linux", "FreeBSD"] and dl_required:
self.cpp_info.components["osdgpu"].system_libs = ["dl"]
# TODO: to remove in conan v2
self.cpp_info.names["cmake_find_package"] = "OpenSubdiv"
self.cpp_info.names["cmake_find_package_multi"] = "OpenSubdiv"
self.cpp_info.components["osdcpu"].names["cmake_find_package"] = f"osdcpu{target_suffix}"
self.cpp_info.components["osdcpu"].names["cmake_find_package_multi"] = f"osdcpu{target_suffix}"
self.cpp_info.components["osdgpu"].names["cmake_find_package"] = f"osdgpu{target_suffix}"
self.cpp_info.components["osdgpu"].names["cmake_find_package_multi"] = f"osdgpu{target_suffix}" |
6,645 | test armed forces day | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <dr.prodigy.github@gmail.com> (c) 2017-2023
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from holidays.countries.azerbaijan import Azerbaijan, AZ, AZE
from tests.common import TestCase
class TestAzerbaijan(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass(Azerbaijan, years=range(1990, 2050))
def test_country_aliases(self):
self.assertCountryAliases(Azerbaijan, AZ, AZE)
def test_no_holidays(self):
self.assertNoHolidays(Azerbaijan(years=1989))
def test_new_year(self):
self.assertHoliday(f"{year}-01-01" for year in range(1990, 2050))
self.assertHoliday(f"{year}-01-02" for year in range(2006, 2050))
self.assertNoHoliday(f"{year}-01-02" for year in range(1990, 2006))
def test_black_january(self):
self.assertHoliday(f"{year}-01-20" for year in range(2000, 2050))
self.assertNoHoliday(f"{year}-01-20" for year in range(1990, 2000))
self.assertNoHolidayName("Black January", range(1990, 2000))
def test_int_women_day(self):
self.assertHoliday(f"{year}-03-08" for year in range(1990, 2050))
def test_novruz(self):
for year in range(2007, 2050):
self.assertHoliday(
f"{year}-03-20",
f"{year}-03-21",
f"{year}-03-22",
f"{year}-03-23",
f"{year}-03-24",
)
self.assertNoHolidayName("Novruz", range(1990, 2007))
def test_victory_day_may(self):
self.assertHoliday(f"{year}-05-09" for year in range(1990, 2050))
def test_republic_day(self):
self.assertHoliday(f"{year}-05-28" for year in range(1992, 2050))
self.assertNoHoliday(f"{year}-05-28" for year in range(1990, 1992))
self.assertHolidayName("Republic Day", (f"{year}-05-28" for year in range(1992, 2021)))
self.assertHolidayName("Independence Day", (f"{year}-05-28" for year in range(2021, 2050)))
def test_salvation_day(self):
self.assertHoliday(f"{year}-06-15" for year in range(1997, 2050))
self.assertNoHoliday(f"{year}-06-15" for year in range(1990, 1997))
def test_memorial_day(self):
self.assertHoliday(f"{year}-09-27" for year in range(2021, 2050))
self.assertNoHoliday(f"{year}-09-27" for year in range(1990, 2021))
def METHOD_NAME(self):
self.assertHoliday(f"{year}-10-09" for year in range(1992, 1998))
self.assertHoliday(f"{year}-06-26" for year in range(1998, 2050))
self.assertNoHoliday(f"{year}-10-09" for year in range(1990, 1992))
self.assertNoHoliday(f"{year}-06-26" for year in range(1990, 1998))
def test_victory_day(self):
self.assertHoliday(f"{year}-11-08" for year in range(2021, 2050))
for year in range(1990, 2021):
self.assertNotIn("Victory Day", Azerbaijan(years=year).get_list(f"{year}-11-08"))
def test_independence_day(self):
self.assertHoliday(f"{year}-10-18" for year in range(1990, 2006))
self.assertNoHoliday(f"{year}-10-18" for year in range(2006, 2050))
def test_flag_day(self):
self.assertHoliday(f"{year}-11-09" for year in range(2010, 2050))
self.assertNoHoliday(f"{year}-11-09" for year in range(1990, 2010))
def test_int_solidarity_day(self):
self.assertHoliday(f"{year}-12-31" for year in range(1993, 2050))
self.assertNoHoliday(f"{year}-12-31" for year in range(1990, 1993))
def test_hijri_based(self):
# Ramazan Bayrami
for dt in (
"2020-05-24",
"2020-05-25",
"2021-05-13",
"2021-05-14",
"2022-05-02",
"2022-05-03",
):
self.assertIn("Ramazan Bayrami", self.holidays[dt])
# Gurban Bayrami
for dt in (
"2006-01-10",
"2006-01-11",
"2006-12-31",
"2020-07-31",
"2020-08-01",
"2020-08-03",
"2021-07-20",
"2021-07-21",
"2022-07-09",
"2022-07-10",
"2022-07-11",
"2022-07-12",
):
self.assertIn("Gurban Bayrami", self.holidays[dt])
def test_observed_days(self):
observed_holidays = (
"2020-03-09",
"2020-03-25",
"2020-03-26",
"2020-05-11",
"2020-05-26",
"2020-08-03",
"2021-01-04",
"2021-03-25",
"2021-03-26",
"2021-05-10",
"2021-06-28",
"2022-01-03",
"2022-01-04",
"2022-03-25",
"2022-05-30",
"2022-06-27",
"2022-07-11",
"2022-07-12",
# special cases
"2007-01-03",
"2072-01-05",
)
self.assertHoliday(observed_holidays)
self.assertNoNonObservedHoliday(observed_holidays)
def test_2020(self):
self.assertHolidayDates(
Azerbaijan(years=2020),
"2020-01-01",
"2020-01-02",
"2020-01-20",
"2020-03-08",
"2020-03-09",
"2020-03-20",
"2020-03-21",
"2020-03-22",
"2020-03-23",
"2020-03-24",
"2020-03-25",
"2020-03-26",
"2020-05-09",
"2020-05-11",
"2020-05-24",
"2020-05-25",
"2020-05-26",
"2020-05-28",
"2020-06-15",
"2020-06-26",
"2020-07-31",
"2020-08-01",
"2020-08-03",
"2020-11-09",
"2020-12-31",
)
def test_2022(self):
self.assertHolidayDates(
Azerbaijan(years=2022),
"2022-01-01",
"2022-01-02",
"2022-01-03",
"2022-01-04",
"2022-01-20",
"2022-03-08",
"2022-03-20",
"2022-03-21",
"2022-03-22",
"2022-03-23",
"2022-03-24",
"2022-03-25",
"2022-05-02",
"2022-05-03",
"2022-05-09",
"2022-05-28",
"2022-05-30",
"2022-06-15",
"2022-06-26",
"2022-06-27",
"2022-07-09",
"2022-07-10",
"2022-07-11",
"2022-07-12",
"2022-09-27",
"2022-11-08",
"2022-11-09",
"2022-12-31",
) |
6,646 | query parameters | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"dynatrace monitor get-vm-host-payload",
)
class GetVmHostPayload(AAZCommand):
"""Return the payload that need to be passed in the request body for installing Dynatrace agent on a VM
:example: Get-vm-host-payload
az dynatrace monitor get-vm-host-payload -g rg --monitor-name monitor
"""
_aaz_info = {
"version": "2021-09-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/dynatrace.observability/monitors/{}/getvmhostpayload", "2021-09-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.monitor_name = AAZStrArg(
options=["--monitor-name"],
help="Monitor resource name",
required=True,
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.MonitorsGetVMHostPayload(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class MonitorsGetVMHostPayload(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/getVMHostPayload",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"monitorName", self.ctx.args.monitor_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-09-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.environment_id = AAZStrType(
serialized_name="environmentId",
)
_schema_on_200.ingestion_key = AAZStrType(
serialized_name="ingestionKey",
)
return cls._schema_on_200
__all__ = ["GetVmHostPayload"] |
6,647 | spec | """
* GTDynamics Copyright 2020, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* See LICENSE for the license information
*
* @file chain.py
* @brief Prototype Chain class for chain manipulators.
* @author Frank Dellaert
"""
from typing import Optional, Tuple
import gtdynamics as gtd
import numpy as np
from gtsam import Pose3
def compose(aSbj: Tuple[Pose3, np.ndarray], bSck: Tuple[Pose3, np.ndarray]):
"""Monoid operation for chains, i.e., pose,Jacobian pairs."""
aTb, bAj = aSbj
bTc, cAk = bSck
assert bAj.shape[0] == 6 and cAk.shape[0] == 6,\
f"Jacobians should have 6 rows, shapes are {bAj.shape} and {cAk.shape}"
# Compose poses:
aTc = aTb.compose(bTc)
# Adjoint the first Jacobian to the new end-effector frame C:
c_Ad_b = bTc.inverse().AdjointMap()
return aTc, np.hstack((c_Ad_b @ bAj, cAk))
class Chain():
"""Serial kinematic chain."""
def __init__(self, sMb, axes: np.ndarray):
"""Create from end-effector at rest and Jacobian.
Arguments:
sMb: rest pose of "body" with respect to "spatial" frame
axes: screw axes of all joints expressed in body frame
"""
assert isinstance(sMb, Pose3)
assert isinstance(axes, np.ndarray)
self.sMb = sMb
self.axes = np.expand_dims(axes, 1) if len(axes.shape) == 1 else axes
@classmethod
def compose(cls, *components):
"""Create from a variable number of other Chain instances."""
METHOD_NAME = components[0].METHOD_NAME()
for component in components[1:]:
METHOD_NAME = compose(METHOD_NAME, component.METHOD_NAME())
return cls(*METHOD_NAME)
def METHOD_NAME(self):
"""Return end-effector at rest and Jacobian."""
return self.sMb, self.axes
def __repr__(self):
return f"Chain\n: {self.sMb}\n{np.round(self.axes,3)}\n"
@classmethod
def from_robot(cls, robot: gtd.Robot,
base_name: Optional[str] = None,
joint_range: Optional[Tuple[int, int]] = None):
"""Initialize from a robot.
Arguments:
robot: a GTDynamics Robot instance
base_name: add offset for base link, if given
joint_range: a range of joint indices (base 0)
"""
# Get desired joints from robot instance.
if joint_range is None:
joint_range = 0, robot.numJoints()
joints = robot.joints()[joint_range[0]:joint_range[1]]
# Convert all joints into pose/Jacobian pairs.
pairs = [cls(joint.pMc(), joint.cScrewAxis()) for joint in joints]
if base_name is not None:
# Create offset to first link parent.
assert joint_range is None or joint_range[0] == 0, \
"Cannot have base name if first joint is not 0"
base_link = robot.link(base_name)
sM0 = base_link.bMcom()
offset = Chain(sM0, np.zeros((6, 0)))
pairs = [offset] + pairs
# Now, let compose do the work!
return cls.compose(*pairs)
def poe(self, q: np.ndarray,
fTe: Optional[Pose3] = None,
J: Optional[np.ndarray] = None):
""" Perform forward kinematics given q, return Pose of end-effector.
Arguments:
q (np.ndarray): joint angles for all joints.
fTe (optional): the end-effector pose with respect to final link.
J (in/out): optionally, the manipulator Jacobian.
Returns:
jTe (Pose3)
"""
# Check input.
n = len(q)
A = self.axes
assert n == A.shape[1]
# Calculate exponentials.
exp = [Pose3.Expmap(A[:, j] * q[j]) for j in range(n)]
if J is None:
# Just do product.
poe = self.sMb
for T_j in exp:
poe = poe.compose(T_j)
return poe if fTe is None else poe.compose(fTe)
else:
# Compute FK + Jacobian with monoid compose.
assert J.shape == (6, len(q)), f"Needs 6x{len(q)} J."
Empty = np.zeros((6, 0))
pair = self.sMb, Empty
for j in range(n):
pair = compose(pair, (exp[j], np.expand_dims(A[:, j], 1)))
if fTe is not None:
pair = compose(pair, (fTe, Empty)) # Adjoints Jacobian to E!
poe, J[:, :] = pair
return poe |
6,648 | get mcas data connector | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetMCASDataConnectorResult',
'AwaitableGetMCASDataConnectorResult',
'get_mcas_data_connector',
'get_mcas_data_connector_output',
]
@pulumi.output_type
class GetMCASDataConnectorResult:
"""
Represents MCAS (Microsoft Cloud App Security) data connector.
"""
def __init__(__self__, data_types=None, etag=None, id=None, kind=None, name=None, system_data=None, tenant_id=None, type=None):
if data_types and not isinstance(data_types, dict):
raise TypeError("Expected argument 'data_types' to be a dict")
pulumi.set(__self__, "data_types", data_types)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataTypes")
def data_types(self) -> 'outputs.MCASDataConnectorDataTypesResponse':
"""
The available data types for the connector.
"""
return pulumi.get(self, "data_types")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of the data connector
Expected value is 'MicrosoftCloudAppSecurity'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant id to connect to, and get the data from.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetMCASDataConnectorResult(GetMCASDataConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMCASDataConnectorResult(
data_types=self.data_types,
etag=self.etag,
id=self.id,
kind=self.kind,
name=self.name,
system_data=self.system_data,
tenant_id=self.tenant_id,
type=self.type)
def METHOD_NAME(data_connector_id: Optional[str] = None,
operational_insights_resource_provider: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMCASDataConnectorResult:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['operationalInsightsResourceProvider'] = operational_insights_resource_provider
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20210301preview:getMCASDataConnector', __args__, opts=opts, typ=GetMCASDataConnectorResult).value
return AwaitableGetMCASDataConnectorResult(
data_types=pulumi.get(__ret__, 'data_types'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
tenant_id=pulumi.get(__ret__, 'tenant_id'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_mcas_data_connector_output(data_connector_id: Optional[pulumi.Input[str]] = None,
operational_insights_resource_provider: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMCASDataConnectorResult]:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
... |
6,649 | test find executable not found none | from __future__ import annotations
import contextlib
import os.path
import shutil
import sys
import pytest
from pre_commit import parse_shebang
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import Var
from pre_commit.util import make_executable
def _echo_exe() -> str:
exe = shutil.which('echo')
assert exe is not None
return exe
def test_file_doesnt_exist():
assert parse_shebang.parse_filename('herp derp derp') == ()
def test_simple_case(tmpdir):
x = tmpdir.join('f')
x.write('#!/usr/bin/env echo')
make_executable(x.strpath)
assert parse_shebang.parse_filename(x.strpath) == ('echo',)
def test_find_executable_full_path():
assert parse_shebang.find_executable(sys.executable) == sys.executable
def test_find_executable_on_path():
assert parse_shebang.find_executable('echo') == _echo_exe()
def METHOD_NAME():
assert parse_shebang.find_executable('not-a-real-executable') is None
def write_executable(shebang, filename='run'):
os.mkdir('bin')
path = os.path.join('bin', filename)
with open(path, 'w') as f:
f.write(f'#!{shebang}')
make_executable(path)
return path
@contextlib.contextmanager
def bin_on_path():
bindir = os.path.join(os.getcwd(), 'bin')
with envcontext((('PATH', (bindir, os.pathsep, Var('PATH'))),)):
yield
def test_find_executable_path_added(in_tmpdir):
path = os.path.abspath(write_executable('/usr/bin/env sh'))
assert parse_shebang.find_executable('run') is None
with bin_on_path():
assert parse_shebang.find_executable('run') == path
def test_find_executable_path_ext(in_tmpdir):
"""Windows exports PATHEXT as a list of extensions to automatically add
to executables when doing PATH searching.
"""
exe_path = os.path.abspath(
write_executable('/usr/bin/env sh', filename='run.myext'),
)
env_path = {'PATH': os.path.dirname(exe_path)}
env_path_ext = dict(env_path, PATHEXT=os.pathsep.join(('.exe', '.myext')))
assert parse_shebang.find_executable('run') is None
assert parse_shebang.find_executable('run', env=env_path) is None
ret = parse_shebang.find_executable('run.myext', env=env_path)
assert ret == exe_path
ret = parse_shebang.find_executable('run', env=env_path_ext)
assert ret == exe_path
def test_normexe_does_not_exist():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('i-dont-exist-lol')
assert excinfo.value.args == ('Executable `i-dont-exist-lol` not found',)
def test_normexe_does_not_exist_sep():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./i-dont-exist-lol')
assert excinfo.value.args == ('Executable `./i-dont-exist-lol` not found',)
@pytest.mark.xfail(sys.platform == 'win32', reason='posix only')
def test_normexe_not_executable(tmpdir): # pragma: win32 no cover
tmpdir.join('exe').ensure()
with tmpdir.as_cwd(), pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./exe')
assert excinfo.value.args == ('Executable `./exe` is not executable',)
def test_normexe_is_a_directory(tmpdir):
with tmpdir.as_cwd():
tmpdir.join('exe').ensure_dir()
exe = os.path.join('.', 'exe')
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe(exe)
msg, = excinfo.value.args
assert msg == f'Executable `{exe}` is a directory'
def test_normexe_already_full_path():
assert parse_shebang.normexe(sys.executable) == sys.executable
def test_normexe_gives_full_path():
assert parse_shebang.normexe('echo') == _echo_exe()
assert os.sep in _echo_exe()
def test_normalize_cmd_trivial():
cmd = (_echo_exe(), 'hi')
assert parse_shebang.normalize_cmd(cmd) == cmd
def test_normalize_cmd_PATH():
cmd = ('echo', '--version')
expected = (_echo_exe(), '--version')
assert parse_shebang.normalize_cmd(cmd) == expected
def test_normalize_cmd_shebang(in_tmpdir):
echo = _echo_exe().replace(os.sep, '/')
path = write_executable(echo)
assert parse_shebang.normalize_cmd((path,)) == (echo, path)
def test_normalize_cmd_PATH_shebang_full_path(in_tmpdir):
echo = _echo_exe().replace(os.sep, '/')
path = write_executable(echo)
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (echo, os.path.abspath(path))
def test_normalize_cmd_PATH_shebang_PATH(in_tmpdir):
echo = _echo_exe()
path = write_executable('/usr/bin/env echo')
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (echo, os.path.abspath(path)) |
6,650 | get psu presence | #
# psuutil.py
# Platform-specific PSU status interface for SONiC
#
import logging
import os.path
try:
from sonic_psu.psu_base import PsuBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
class PsuUtil(PsuBase):
"""Platform-specific PSUutil class"""
HWMON_PATH = '/sys/class/hwmon/hwmon1/'
PSU1_PREFIX = 'power42_'
PSU2_PREFIX = 'power52_'
MAX_PSUS = 2
def __init__(self):
PsuBase.__init__(self)
# Get sysfs attribute
def get_attr_value(self, attr_path):
retval = 'ERR'
if (not os.path.isfile(attr_path)):
return retval
try:
with open(attr_path, 'r') as fd:
retval = fd.read()
except Exception:
logging.error("Unable to open ", attr_path, " file !")
retval = retval.rstrip('\r\n')
return retval
def get_attr_filename(self, index, attr):
if (index == 1):
attr_file = self.PSU1_PREFIX + attr
elif (index == 2):
attr_file = self.PSU2_PREFIX + attr
else:
logging.error("Invalid PSU number:", index)
return ''
return attr_file
def get_num_psus(self):
"""
Retrieves the number of PSUs available on the device
:return: An integer, the number of PSUs available on the device
"""
return self.MAX_PSUS
def get_psu_status(self, index):
"""
Retrieves the oprational status of power supply unit (PSU) defined
by index <index>
:param index: An integer, index of the PSU of which to query status
:return: Boolean, True if PSU is operating properly, False if PSU is\
faulty
"""
status = False
attr_filename = self.get_attr_filename(index, 'input')
if attr_filename == '':
return status
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
attr_value = float(attr_value)
# Check PSU status
if (attr_value != 0.0):
status = True
return status
def METHOD_NAME(self, index):
"""
Retrieves the presence status of power supply unit (PSU) defined
by index <index>
:param index: An integer, index of the PSU of which to query status
:return: Boolean, True if PSU is plugged, False if not
"""
status = False
attr_filename = self.get_attr_filename(index, 'present')
if attr_filename == '':
return status
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
attr_value = int(attr_value, 16)
# Check PSU status
if (attr_value == 1):
status = True
return status
def get_powergood_status(self, index):
status = False
attr_filename = self.get_attr_filename(index, 'input')
if attr_filename == '':
return status
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
attr_value = float(attr_value)
# Check PSU status
if (attr_value != 0.0):
status = True
return status
def get_model(self, index):
attr_filename = self.get_attr_filename(index, 'model')
if attr_filename == '':
return None
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
return attr_value.rstrip()
def get_mfr_id(self, index):
attr_filename = self.get_attr_filename(index, 'mfrid')
if attr_filename == '':
return None
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
return attr_value.rstrip()
def get_serial(self, index):
attr_filename = self.get_attr_filename(index, 'sn')
if attr_filename == '':
return None
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
return attr_value.rstrip()
def get_direction(self, index):
if (index == 1):
direction_file = 'fan40_direction'
elif (index == 2):
direction_file = 'fan50_direction'
else:
logging.error("Invalid PSU number:", index)
return None
direction = self.get_attr_value(self.HWMON_PATH + direction_file)
direction = direction.rstrip()
"""
1: FB 2: BF
Since the fan is at rear of the switch, FB means Exhaust; BF means Intake
"""
if direction == '2':
return "INTAKE"
else:
return "EXHAUST"
def get_output_voltage(self, index):
if (index == 1):
attr_file = 'in47_input'
elif (index == 2):
attr_file = 'in57_input'
else:
logging.error("Invalid PSU number:", index)
return 0.0
voltage = self.get_attr_value(self.HWMON_PATH + attr_file)
voltage = voltage.rstrip()
if (voltage != 'ERR'):
voltage, dummy = voltage.split('.', 1)
else:
return 0.0
return float(voltage)/1000
def get_output_current(self, index):
if (index == 1):
attr_file = 'curr39_input'
elif (index == 2):
attr_file = 'curr49_input'
else:
logging.error("Invalid PSU number:", index)
return 0.0
current = self.get_attr_value(self.HWMON_PATH + attr_file)
current = current.rstrip()
if (current != 'ERR'):
current, dummy = current.split('.',1)
else:
return 0.0
return float(current)/1000
def get_output_power(self, index):
attr_filename = self.get_attr_filename(index, 'input')
if attr_filename == '':
return 0.0
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
attr_value = float(attr_value)
else:
return 0.0
return float(attr_value/1000)
def get_fan_rpm(self, index, fan_idx):
if (index == 1):
rpm_file = 'fan40_input'
elif (index == 2):
rpm_file = 'fan50_input'
else:
logging.error("Invalid PSU number:", index)
return 0
rpm = self.get_attr_value(self.HWMON_PATH + rpm_file)
rpm = rpm.rstrip()
if (rpm != 'ERR'):
rpm = float(rpm)
else:
return 0
return int(rpm) |
6,651 | serialize ndarray | # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc as abc
from numbers import Integral, Number
from typing import List, Union
import numpy as np
__all__ = ["serialize_ndarray", "deserialize_ndarray",
"serialize_ndarrays", "deserialize_ndarrays",
"pack_samples", "unpack_samples",
]
def _replace_float_with_int(arr: Union[List[float], List[List]]):
"""Replace floats representing integers with ints in a list representing an array.
Take a list of floats, as produced by :meth:`numpy.ndarray.tolist` from an array
of floating types, and convert any ``float`` representing an integer value into
``int``.
This function assumes some uniformity of the list structure. For instance giving it
a list like ``[0.0, 0]`` or ``[0.0, [0.0]]`` will cause it to fail.
Acts on the list(s) in-place.
"""
if not len(arr):
# nothing to do when the list is empty
pass
elif isinstance(arr[0], List):
for subarr in arr:
_replace_float_with_int(subarr)
elif hasattr(arr[0], "is_integer"):
arr[:] = (int(a) if a.is_integer() else a for a in arr)
else:
raise ValueError("expected a (possibly nested) list of floats, "
f"received a (possibly nested) list of {type(arr[0])}")
def METHOD_NAME(arr, use_bytes=False, bytes_type=bytes):
"""Serialize a NumPy array.
Args:
arr (array-like):
An array.
use_bytes (bool, optional, default=False):
If True, a compact representation of the biases as bytes is used.
bytes_type (class, optional, default=bytes):
If `use_bytes` is True, this class is used to wrap the bytes
objects in the serialization. Useful for Python 2 using BSON
encoding, which does not accept the raw `bytes` type;
`bson.Binary` can be used instead.
Returns:
dict: A serializable object.
"""
arr = np.asarray(arr) # support array-like
if use_bytes:
data = bytes_type(arr.tobytes(order='C'))
else:
data = arr.tolist()
if np.issubdtype(arr.dtype, np.floating):
_replace_float_with_int(data)
return dict(type='array',
data=data,
data_type=arr.dtype.name,
shape=arr.shape,
use_bytes=bool(use_bytes))
def deserialize_ndarray(obj):
"""Inverse a serialize_ndarray object.
Args:
obj (dict):
As constructed by :func:`.serialize_ndarray`.
Returns:
:obj:`numpy.ndarray`
"""
if obj['use_bytes']:
arr = np.frombuffer(obj['data'], dtype=obj['data_type'])
else:
arr = np.asarray(obj['data'], dtype=obj['data_type'])
arr = arr.reshape(obj['shape']) # makes a view generally, but that's fine
return arr
def serialize_ndarrays(obj, use_bytes=False, bytes_type=bytes):
"""Look through the object, serializing NumPy arrays.
Developer note: this function was written for serializing info fields
in the sample set and binary quadratic model objects. This is not a general
serialization function.
Notes:
Lists and dicts are copies in the returned object. Does not attempt to
only copy-on-write, even though that would be more performant.
Does not check for recursive references.
"""
if isinstance(obj, np.ndarray):
return METHOD_NAME(obj, use_bytes=use_bytes, bytes_type=bytes_type)
elif isinstance(obj, abc.Mapping):
return {serialize_ndarrays(key): serialize_ndarrays(val)
for key, val in obj.items()}
elif isinstance(obj, abc.Sequence) and not isinstance(obj, str):
return list(map(serialize_ndarrays, obj))
if isinstance(obj, Integral):
return int(obj)
elif isinstance(obj, Number):
return float(obj)
return obj
def deserialize_ndarrays(obj):
"""Inverse of dfs_serialize_ndarray."""
if isinstance(obj, abc.Mapping):
if obj.get('type', '') == 'array':
return deserialize_ndarray(obj)
return {key: deserialize_ndarrays(val) for key, val in obj.items()}
elif isinstance(obj, abc.Sequence) and not isinstance(obj, str):
return list(map(deserialize_ndarrays, obj))
return obj
def pack_samples(states):
# ensure that they are stored big-endian order
if not states.size:
return np.empty(states.shape, dtype=np.uint32)
pad_len = 31 - (states.shape[-1]+31) % 32
pad_sizes = ((0, 0),)*(states.ndim - 1) + ((0, pad_len),)
shape = states.shape[:-1] + (-1, 4, 8)
padded = np.pad(states, pad_sizes, "constant").reshape(shape)[..., ::-1]
return np.packbits(padded).view(np.uint32).reshape(*(states.shape[:-1]+(-1,)))
def unpack_samples(packed, n, dtype=np.uint32):
if not packed.size:
return np.empty((packed.shape[0], n), dtype=dtype)
bytewise_shape = packed.shape[:-1] + (-1, 4, 8)
unpacked = np.unpackbits(packed.view(np.uint8)).reshape(bytewise_shape)
unpacked = unpacked[..., ::-1].reshape(packed.shape[:-1] + (-1,))
return unpacked[..., :n].astype(dtype) |
6,652 | preprocess | import mlflow.pyfunc
import random
import numpy as np
from sklearn import metrics
from sklearn.svm import SVC
from sklearn.feature_selection import SequentialFeatureSelector as sfs
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier as knc
def select_features(X, y):
"""
Dimensional reduction of X using k-nearest neighbors and sequential feature selector.
Final dimension set to three features.
Params:
X: Array which will be reduced in dimension (batch_size, n_features).
y: Array of labels (batch_size,).
Output: function that reduces dimension of array.
"""
knn = knc(n_neighbors=3)
selector = sfs(knn, n_features_to_select=3)
X_transformed = selector.fit_transform(X, y)
def transform(input):
return selector.transform(input)
return transform, X_transformed
def sort_database(X, y):
"""
Random shuffle of training values with its respective labels.
Params:
X: Array of features.
y: Array of labels.
Output: Tuple (X_rand_sorted, y_rand_sorted).
"""
sort_list = list(range(len(y)))
random.shuffle(sort_list)
return X[sort_list], y[sort_list]
def METHOD_NAME(X):
"""
Preprocessing of features (no dimensional reduction) using principal component analysis.
Params:
X: Array of features.
Output: Tuple (processed array of features function that reduces dimension of array).
"""
_, n = X.shape
pca = PCA(n_components=n)
x = pca.fit_transform(normalize(X))
def transform(input):
return pca.transform(normalize(input))
return x, transform
class SVM_recommendation(mlflow.pyfunc.PythonModel):
def __init__(self, test=False, **params):
f"""{SVC.__doc__}"""
params['probability'] = True
self.svm = SVC(**params)
self.transforms = []
self.score = 0
self.confusion_matrix = None
if test:
knn = knc(n_neighbors=3)
self.transform = [PCA(n_components=3), sfs(knn, n_features_to_select=2)]
def fit(self, X, y):
"""
Train preprocess function, feature selection and Support Vector Machine model
Params:
X: Array of features.
y: Array of labels.
"""
assert X.shape[0] == y.shape[0], 'X and y must have same length'
assert len(X.shape) == 2, 'X must be a two dimension vector'
X, t1 = METHOD_NAME(X)
t2, X = select_features(X, y)
self.transforms = [t1, t2]
self.svm.fit(X, y)
pred = self.svm.predict(X)
z = y + 2 * pred
n = len(z)
false_pos = np.count_nonzero(z == 1) / n
false_neg = np.count_nonzero(z == 2) / n
true_pos = np.count_nonzero(z == 3) / n
true_neg = 1 - false_neg - false_pos - true_pos
self.confusion_matrix = np.array([[true_neg, false_pos], [false_neg, true_pos]])
self.score = true_pos + true_neg
def predict(self, x):
"""
Transform and prediction of input features and sorting of each by probability
Params:
X: Array of features.
Output: prediction probability for True (1).
"""
for t in self.transforms:
x = t(x)
return self.svm.predict_proba(x)[:, 1]
def recommendation_order(self, x):
"""
Transform and prediction of input features and sorting of each by probability
Params:
X: Array of features.
Output: Tuple (sorted_features, predictions).
"""
for t in self.transforms:
x = t(x)
pred = self.svm.predict_proba(x)
return sorted(range(len(pred)), key=lambda k: pred[k][1], reverse=True), pred
def plots(self):
"""
Returns the plots in a dict format.
{
'confusion_matrix': confusion matrix figure,
}
"""
display = metrics.ConfusionMatrixDisplay(confusion_matrix=self.confusion_matrix, display_labels=[False, True])
return {'confusion_matrix': display.plot().figure_} |
6,653 | clear | # Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: property_tree.py
author: Cyrus Harrison <cyrush@llnl.gov>
created: 4/15/2010
description:
(almost too) flexible property tree class.
"""
class PropertyTree(object):
def __init__(self,ptype ="tree",init = None):
self._type = ptype
self._locked = False
if self._type == "tree":
self._value = {}
if not init is None:
self.update(init)
else:
self._value = init
def update(self,pval):
if isinstance(pval,dict):
for path,value in list(pval.items()):
self.add_property(path,value)
else:
for path,value in list(pval.properties().items()):
self.add_property(path,value)
def METHOD_NAME(self):
self._locked = False
if self._type == "tree":
self._value = {}
else:
self._value = None
def properties(self):
res = {}
if self._type == "tree":
keys = list(self._value.keys())
for k in keys:
curr = self._value[k]
if curr._type=="tree":
for p,v in list(curr.properties().items()):
res[k + "/" + p] = v
else:
res[k] = curr._value
return res
def children(self):
res = {}
if self._type == "tree":
keys = list(self._value.keys())
keys.sort()
for k in keys:
curr = self._value[k]
res[k] = curr
return res
def add_property(self,path,value=None):
idx = path.find("/")
if idx > 0:
lpath = path[:idx]
rpath = path[idx+1:]
if not lpath in list(self._value.keys()):
tree = PropertyTree()
self._value[lpath] = tree
else:
tree = self._value[lpath]
tree.add_property(rpath,value)
else:
if value is None:
self._value[path] = PropertyTree()
else:
self._value[path] = PropertyTree("node",value)
def has_property(self,path):
node = self.fetch_property(path)
return not node is None
def remove_property(self,path):
# find the proper node in the tree and remove it
idx = path.find("/")
if idx > 0:
lpath = path[:idx]
rpath = path[idx+1:]
tree = self._value[lpath]
tree.remove_property(rpath)
elif path in list(self._value.keys()):
del self._value[path]
def lock(self):
self._locked = True
if self._type == "tree":
for v in list(self._value.values()):
v.lock()
def unlock(self):
self._locked = False
if self._type == "tree":
for v in list(self._value.values()):
v.unlock()
def __getitem__(self,path):
node = self.fetch_property(path)
if node is None:
if self._locked:
raise AttributeError(path)
self.add_property(path)
node = self.fetch_property(path)
if node._type == "tree":
return node
else:
return node._value
def __setitem__(self,path,obj):
node = self.fetch_property(path)
if node is None:
if self._locked:
raise AttributeError(path)
self.add_property(path,obj)
else:
node._type = "node"
node._value = obj
def fetch_property(self,path):
idx = path.find("/")
if idx > 0:
lpath = path[:idx]
if lpath in list(self._value.keys()):
rpath = path[idx+1:]
tree = self._value[lpath]
return tree.fetch_property(rpath)
return None
elif path in list(self._value.keys()):
return self._value[path]
else:
return None
def __str__(self):
return self.__gen_string("")
def __gen_string(self,path):
res = ""
if self._type == "tree":
for k in list(self._value.keys()):
npath = path + k + "/"
res += self._value[k].__gen_string(npath)
else:
res = path + "%s:%s\n" % (self._type,str(self._value))
return res
def __getattr__(self, name):
if name.startswith("__") and name.endswith("__"):
raise AttributeError(name)
try:
return self.__getitem__(name)
except KeyError:
raise AttributeError(name)
def __setattr__(self, name,obj):
if name == "_value" or name == "_type" or name == "_locked":
self.__dict__[name] = obj
else:
self.__setitem__(name, obj)
|
6,654 | export empty string split | # Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.node import expect
class StringSplit(Base):
@staticmethod
def export_basic() -> None:
node = onnx.helper.make_node(
"StringSplit",
inputs=["x"],
outputs=["substrings", "length"],
delimiter=".",
maxsplit=None,
)
x = np.array(["abc.com", "def.net"]).astype(object)
substrings = np.array([["abc", "com"], ["def", "net"]]).astype(object)
length = np.array([2, 2], dtype=np.int64)
expect(
node,
inputs=[x],
outputs=[substrings, length],
name="test_string_split_basic",
)
@staticmethod
def export_maxsplit() -> None:
node = onnx.helper.make_node(
"StringSplit",
inputs=["x"],
outputs=["substrings", "length"],
maxsplit=2,
)
x = np.array(
[["hello world", "def.net"], ["o n n x", "the quick brown fox"]]
).astype(object)
substrings = np.array(
[
[["hello", "world", ""], ["def.net", "", ""]],
[["o", "n", "n x"], ["the", "quick", "brown fox"]],
]
).astype(object)
length = np.array([[2, 1], [3, 3]], np.int64)
expect(
node,
inputs=[x],
outputs=[substrings, length],
name="test_string_split_maxsplit",
)
@staticmethod
def export_consecutive_delimiters() -> None:
node = onnx.helper.make_node(
"StringSplit",
inputs=["x"],
outputs=["substrings", "length"],
delimiter="-",
maxsplit=None,
)
x = np.array(["o-n-n--x-", "o-n----nx"]).astype(object)
substrings = np.array(
[["o", "n", "n", "", "x", ""], ["o", "n", "", "", "", "nx"]]
).astype(object)
length = np.array([6, 6], dtype=np.int64)
expect(
node,
inputs=[x],
outputs=[substrings, length],
name="test_string_split_consecutive_delimiters",
)
@staticmethod
def export_empty_string_delimiter() -> None:
for delimiter, test_name in (
("", "test_string_split_empty_string_delimiter"),
(None, "test_string_split_no_delimiter"),
):
node = onnx.helper.make_node(
"StringSplit",
inputs=["x"],
outputs=["substrings", "length"],
delimiter=delimiter,
maxsplit=None,
)
x = np.array(
["hello world !", " hello world !", " hello world ! "]
).astype(object)
substrings = np.array(
[
["hello", "world", "!"],
["hello", "world", "!"],
["hello", "world", "!"],
]
).astype(object)
length = np.array([3, 3, 3], dtype=np.int64)
expect(
node,
inputs=[x],
outputs=[substrings, length],
name=test_name,
)
@staticmethod
def METHOD_NAME() -> None:
node = onnx.helper.make_node(
"StringSplit",
inputs=["x"],
outputs=["substrings", "length"],
delimiter=None,
maxsplit=None,
)
x = np.array([]).astype(object)
substrings = np.array([]).astype(object).reshape(0, 0)
length = np.array([], dtype=np.int64)
expect(
node,
inputs=[x],
outputs=[substrings, length],
name="test_string_split_empty_tensor",
output_type_protos=[
onnx.helper.make_tensor_type_proto(onnx.TensorProto.STRING, (0, None)),
None,
],
) |
6,655 | get pairs | # CLIP
# Adapted from https://github.com/openai/CLIP.
# Originally MIT License, Copyright (c) 2021 OpenAI.
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'bpe_simple_vocab_16e6.txt.gz')
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord('!'),
ord('~') + 1)) + list(range(
ord('¡'),
ord('¬') + 1)) + list(range(ord('®'),
ord('ÿ') + 1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def METHOD_NAME(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
merges = merges[1:49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + '</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {
'<|startoftext|>': '<|startoftext|>',
'<|endoftext|>': '<|endoftext|>'
}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + '</w>', )
pairs = METHOD_NAME(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(
pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
error_list = []
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except Exception as err:
new_word.extend(word[i:])
error_list.append(err)
break
if word[i] == first and i < len(word) - 1 and word[
i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = METHOD_NAME(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b]
for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token]
for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode(
'utf-8', errors='replace').replace('</w>', ' ')
return text |
6,656 | fix2scalar | '''calculating anova and verifying with NIST test data
compares my implementations, stats.f_oneway and anova using statsmodels.OLS
'''
from statsmodels.compat.python import lmap
import os
import numpy as np
from scipy import stats
from statsmodels.tools.tools import add_constant
from statsmodels.regression.linear_model import OLS
from .try_ols_anova import data2dummy
filenameli = ['SiRstv.dat', 'SmLs01.dat', 'SmLs02.dat', 'SmLs03.dat', 'AtmWtAg.dat',
'SmLs04.dat', 'SmLs05.dat', 'SmLs06.dat', 'SmLs07.dat', 'SmLs08.dat',
'SmLs09.dat']
##filename = 'SmLs03.dat' #'SiRstv.dat' #'SmLs09.dat'#, 'AtmWtAg.dat' #'SmLs07.dat'
##path = __file__
##print(locals().keys()
###print(path
def getnist(filename):
here = os.path.dirname(__file__)
fname = os.path.abspath(os.path.join(here, 'data', filename))
with open(fname, 'r', encoding="utf-8") as fd:
content = fd.read().split('\n')
data = [line.split() for line in content[60:]]
certified = [line.split() for line in content[40:48] if line]
dataf = np.loadtxt(fname, skiprows=60)
y,x = dataf.T
y = y.astype(int)
caty = np.unique(y)
f = float(certified[0][-1])
R2 = float(certified[2][-1])
resstd = float(certified[4][-1])
dfbn = int(certified[0][-4])
dfwn = int(certified[1][-3]) # dfbn->dfwn is this correct
prob = stats.f.sf(f,dfbn,dfwn)
return y, x, np.array([f, prob, R2, resstd]), certified, caty
def anova_oneway(y, x, seq=0):
# new version to match NIST
# no generalization or checking of arguments, tested only for 1d
yrvs = y[:,np.newaxis] #- min(y)
#subracting mean increases numerical accuracy for NIST test data sets
xrvs = x[:,np.newaxis] - x.mean() #for 1d#- 1e12 trick for 'SmLs09.dat'
from .try_catdata import groupsstats_dummy
meang, varg, xdevmeangr, countg = groupsstats_dummy(yrvs[:, :1],
xrvs[:, :1])
# TODO: the following does not work as replacement
# from .try_catdata import groupsstats_dummy, groupstatsbin
# gcount, gmean , meanarr, withinvar, withinvararr = groupstatsbin(y, x)
sswn = np.dot(xdevmeangr.T,xdevmeangr)
ssbn = np.dot((meang-xrvs.mean())**2, countg.T)
nobs = yrvs.shape[0]
ncat = meang.shape[1]
dfbn = ncat - 1
dfwn = nobs - ncat
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = stats.f.sf(f,dfbn,dfwn)
R2 = (ssbn/(sswn+ssbn)) #R-squared
resstd = np.sqrt(msw) #residual standard deviation
#print(f, prob
def METHOD_NAME(z): # return number
if np.shape(z) == (1, 1):
return z[0, 0]
else:
return z
f, prob, R2, resstd = lmap(METHOD_NAME, (f, prob, R2, resstd))
return f, prob, R2, resstd
def anova_ols(y, x):
X = add_constant(data2dummy(x), prepend=False)
res = OLS(y, X).fit()
return res.fvalue, res.f_pvalue, res.rsquared, np.sqrt(res.mse_resid)
if __name__ == '__main__':
print('\n using new ANOVA anova_oneway')
print('f, prob, R2, resstd')
for fn in filenameli:
print(fn)
y, x, cert, certified, caty = getnist(fn)
res = anova_oneway(y, x)
# TODO: figure out why these results are less accurate/precise
# than others
rtol = {
"SmLs08.dat": .027,
"SmLs07.dat": 1.7e-3,
"SmLs09.dat": 1e-4
}.get(fn, 1e-7)
np.testing.assert_allclose(np.array(res), cert, rtol=rtol)
print('\n using stats ANOVA f_oneway')
for fn in filenameli:
print(fn)
y, x, cert, certified, caty = getnist(fn)
xlist = [x[y==ii] for ii in caty]
res = stats.f_oneway(*xlist)
print(np.array(res) - cert[:2])
print('\n using statsmodels.OLS')
print('f, prob, R2, resstd')
for fn in filenameli[:]:
print(fn)
y, x, cert, certified, caty = getnist(fn)
res = anova_ols(x, y)
print(np.array(res) - cert) |
6,657 | draw buttons ext | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import StringProperty, BoolProperty, IntProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import levelsOflist, multi_socket
socket_types = {
"SvVerticesSocket": "VERTICES",
"SvStringsSocket": "EDGES/POLYGONS/OTHERS",
"SvMatrixSocket": "MATRICES",
"SvObjectSocket": "OBJECTS"
}
footer = """
**************************************************
The End """
def makeframe(nTree):
'''
Making frame to show text to user. appears in left corner
Todo - make more organized layout with button making
lines in up and between Frame and nodes and text of user and layout name
'''
# labls = [n.label for n in nTree.nodes]
if any('Sverchok_viewer' == n.label for n in nTree.nodes):
return
else:
a = nTree.nodes.new('NodeFrame')
a.width = 800
a.height = 1500
locx = [n.location[0] for n in nTree.nodes]
locy = [n.location[1] for n in nTree.nodes]
mx, my = min(locx), max(locy)
a.location[0] = mx - a.width - 10
a.location[1] = my
a.text = bpy.data.texts['Sverchok_viewer']
a.label = 'Sverchok_viewer'
a.shrink = False
a.use_custom_color = True
# this trick allows us to negative color, so user accept it as grey!!!
color = [1 - i for i in bpy.context.preferences.themes['Default'].node_editor.space.back[:]]
a.color[:] = color
def readFORviewer_sockets_data(data, dept, le, num_lines):
cache = ''
output = ''
deptl = dept - 1
if le:
cache += ('(' + str(le) + ') object(s)')
del(le)
if deptl > 1:
for i, object in enumerate(data):
cache += ('\n' + '=' + str(i) + '= (' + str(len(object)) + ')')
cache += str(readFORviewer_sockets_data(object, deptl, False, num_lines))
else:
for k, val in enumerate(data):
output += ('\n' + str(val))
if k >= num_lines-1: break
return cache + output
def readFORviewer_sockets_data_small(data, dept, le, num_lines):
cache = ''
output = ''
deptl = dept - 1
if le:
cache += ('(' + str(le) + ') object(s)')
del(le)
if deptl > 0:
for i, object in enumerate(data):
cache += ('\n' + '=' + str(i) + '= (' + str(len(object)) + ')')
cache += str(readFORviewer_sockets_data_small(object, deptl, False, num_lines))
else:
for k, val in enumerate(data):
output += ('\n' + str(val))
if k >= num_lines-1: break
return cache + output
def do_text(node, out_string):
if not 'Sverchok_viewer' in bpy.data.texts:
bpy.data.texts.new('Sverchok_viewer')
string_to_write = 'node name: ' + node.name + out_string + footer
datablock = bpy.data.texts['Sverchok_viewer']
datablock.clear()
datablock.from_string(string_to_write)
if node.frame:
# adding a frame if it doesn't exist, will create a depsgraph update
makeframe(node.id_data)
def prep_text(node, num_lines):
""" main preparation function for text """
outs = ''
inputs = node.inputs
for socket in inputs:
if socket.is_linked and socket.other:
label = socket.other.node.label
if label:
label = '; node ' + label.upper()
name = socket.name.upper()
data_type = socket_types.get(socket.other.bl_idname, "DATA")
itype = f'\n\nSocket {name}{label}; type {data_type}: \n'
eva = socket.sv_get()
deptl = levelsOflist(eva)
if deptl and deptl > 2:
a = readFORviewer_sockets_data(eva, deptl, len(eva), num_lines)
elif deptl:
a = readFORviewer_sockets_data_small(eva, deptl, len(eva), num_lines)
else:
a = 'None'
outs += itype+str(a)+'\n'
do_text(node, outs)
class SverchokViewerMK1(bpy.types.Operator):
"""Sverchok viewerMK1"""
bl_idname = "node.sverchok_viewer_buttonmk1"
bl_label = "Sverchok viewer.mk1"
bl_icon = 'TEXT'
# bl_options = {'INTERNAL', 'UNDO'}
nodename: StringProperty(name='nodename')
treename: StringProperty(name='treename')
lines: IntProperty(name='lines', description='lines count for operate on',default=1000)
def execute(self, context):
node = bpy.data.node_groups[self.treename].nodes[self.nodename]
num_lines = self.lines
prep_text(node, num_lines)
return {'FINISHED'}
class ViewerNodeTextMK3(SverchCustomTreeNode, bpy.types.Node):
"""
Triggers: Viewer Node text MK3
Tooltip: Inspecting data from sockets in terms
of levels and structure by types
multisocket lets you insert many outputs
"""
bl_idname = 'ViewerNodeTextMK3'
bl_label = 'Viewer Text MK3'
bl_icon = 'FILE_TEXT'
autoupdate: BoolProperty(name='update', default=False)
frame: BoolProperty(name='frame', default=True)
lines: IntProperty(name='lines', description='lines count to show', default=1000, min=1, max=2000)
# multi sockets variables
newsock: BoolProperty(name='newsock', default=False)
base_name = 'data'
multi_socket_type = 'SvStringsSocket'
def sv_init(self, context):
self.inputs.new('SvStringsSocket', 'data0')
def METHOD_NAME(self, context, layout):
row = layout.row()
row.prop(self,'lines',text='lines')
def draw_buttons(self, context, layout):
row = layout.row()
row.scale_y = 4.0
do_text = row.operator('node.sverchok_viewer_buttonmk1', text='V I E W')
do_text.nodename = self.name
do_text.treename = self.id_data.name
do_text.lines = self.lines
col = layout.column(align=True)
col.prop(self, "autoupdate", text="autoupdate")
col.prop(self, "frame", text="frame")
def sv_update(self):
# this function auto extends the number of input sockets once a socket is linked.
multi_socket(self, min=1)
# we want socket types to match the input
for socket in self.inputs:
if socket.is_linked and socket.links:
if socket.other:
if not socket.bl_idname == socket.other.bl_idname:
socket.replace_socket(socket.other.bl_idname)
def process(self):
if not self.autoupdate:
pass
else:
prep_text(self, self.lines)
def register():
bpy.utils.register_class(SverchokViewerMK1)
bpy.utils.register_class(ViewerNodeTextMK3)
def unregister():
bpy.utils.unregister_class(ViewerNodeTextMK3)
bpy.utils.unregister_class(SverchokViewerMK1) |
6,658 | create node iterator | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import (
Any, Iterator, Optional, Union,
)
from amundsen_common.utils.atlas import AtlasCommonParams, AtlasDashboardTypes
from amundsen_rds.models import RDSModel
from amundsen_rds.models.dashboard import DashboardChart as RDSDashboardChart
from databuilder.models.atlas_entity import AtlasEntity
from databuilder.models.atlas_relationship import AtlasRelationship
from databuilder.models.atlas_serializable import AtlasSerializable
from databuilder.models.dashboard.dashboard_query import DashboardQuery
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.models.table_serializable import TableSerializable
from databuilder.serializers.atlas_serializer import (
add_entity_relationship, get_entity_attrs, get_entity_relationships,
)
from databuilder.utils.atlas import AtlasSerializedEntityOperation
LOGGER = logging.getLogger(__name__)
class DashboardChart(GraphSerializable, TableSerializable, AtlasSerializable):
"""
A model that encapsulate Dashboard's charts
"""
DASHBOARD_CHART_LABEL = 'Chart'
DASHBOARD_CHART_KEY_FORMAT = '{product}_dashboard://{cluster}.{dashboard_group_id}/' \
'{dashboard_id}/query/{query_id}/chart/{chart_id}'
CHART_RELATION_TYPE = 'HAS_CHART'
CHART_REVERSE_RELATION_TYPE = 'CHART_OF'
def __init__(self,
dashboard_group_id: Optional[str],
dashboard_id: Optional[str],
query_id: str,
chart_id: str,
chart_name: Optional[str] = None,
chart_type: Optional[str] = None,
chart_url: Optional[str] = None,
product: Optional[str] = '',
cluster: str = 'gold',
**kwargs: Any
) -> None:
self._dashboard_group_id = dashboard_group_id
self._dashboard_id = dashboard_id
self._query_id = query_id
self._chart_id = chart_id if chart_id else chart_name
self._chart_name = chart_name
self._chart_type = chart_type
self._chart_url = chart_url
self._product = product
self._cluster = cluster
self._node_iterator = self.METHOD_NAME()
self._relation_iterator = self._create_relation_iterator()
self._record_iterator = self._create_record_iterator()
self._atlas_entity_iterator = self._create_next_atlas_entity()
def create_next_node(self) -> Union[GraphNode, None]:
try:
return next(self._node_iterator)
except StopIteration:
return None
def METHOD_NAME(self) -> Iterator[GraphNode]:
node_attributes = {
'id': self._chart_id
}
if self._chart_name:
node_attributes['name'] = self._chart_name
if self._chart_type:
node_attributes['type'] = self._chart_type
if self._chart_url:
node_attributes['url'] = self._chart_url
node = GraphNode(
key=self._get_chart_node_key(),
label=DashboardChart.DASHBOARD_CHART_LABEL,
attributes=node_attributes
)
yield node
def create_next_relation(self) -> Union[GraphRelationship, None]:
try:
return next(self._relation_iterator)
except StopIteration:
return None
def _create_relation_iterator(self) -> Iterator[GraphRelationship]:
relationship = GraphRelationship(
start_label=DashboardQuery.DASHBOARD_QUERY_LABEL,
start_key=DashboardQuery.DASHBOARD_QUERY_KEY_FORMAT.format(
product=self._product,
cluster=self._cluster,
dashboard_group_id=self._dashboard_group_id,
dashboard_id=self._dashboard_id,
query_id=self._query_id
),
end_label=DashboardChart.DASHBOARD_CHART_LABEL,
end_key=self._get_chart_node_key(),
type=DashboardChart.CHART_RELATION_TYPE,
reverse_type=DashboardChart.CHART_REVERSE_RELATION_TYPE,
attributes={}
)
yield relationship
def _get_chart_node_key(self) -> str:
return DashboardChart.DASHBOARD_CHART_KEY_FORMAT.format(
product=self._product,
cluster=self._cluster,
dashboard_group_id=self._dashboard_group_id,
dashboard_id=self._dashboard_id,
query_id=self._query_id,
chart_id=self._chart_id
)
def create_next_record(self) -> Union[RDSModel, None]:
try:
return next(self._record_iterator)
except StopIteration:
return None
def _create_record_iterator(self) -> Iterator[RDSModel]:
record = RDSDashboardChart(
rk=self._get_chart_node_key(),
id=self._chart_id,
query_rk=DashboardQuery.DASHBOARD_QUERY_KEY_FORMAT.format(
product=self._product,
cluster=self._cluster,
dashboard_group_id=self._dashboard_group_id,
dashboard_id=self._dashboard_id,
query_id=self._query_id
)
)
if self._chart_name:
record.name = self._chart_name
if self._chart_type:
record.type = self._chart_type
if self._chart_url:
record.url = self._chart_url
yield record
def create_next_atlas_entity(self) -> Union[AtlasEntity, None]:
try:
return next(self._atlas_entity_iterator)
except StopIteration:
return None
def create_next_atlas_relation(self) -> Union[AtlasRelationship, None]:
return None
def _create_next_atlas_entity(self) -> Iterator[AtlasEntity]:
# Chart
attrs_mapping = [
(AtlasCommonParams.qualified_name, self._get_chart_node_key()),
('name', self._chart_name),
('type', self._chart_type),
('url', self._chart_url)
]
chart_entity_attrs = get_entity_attrs(attrs_mapping)
relationship_list = [] # type: ignore
add_entity_relationship(
relationship_list,
'query',
AtlasDashboardTypes.query,
DashboardQuery.DASHBOARD_QUERY_KEY_FORMAT.format(
product=self._product,
cluster=self._cluster,
dashboard_group_id=self._dashboard_group_id,
dashboard_id=self._dashboard_id,
query_id=self._query_id
)
)
chart_entity = AtlasEntity(
typeName=AtlasDashboardTypes.chart,
operation=AtlasSerializedEntityOperation.CREATE,
attributes=chart_entity_attrs,
relationships=get_entity_relationships(relationship_list)
)
yield chart_entity
def __repr__(self) -> str:
return f'DashboardChart({self._dashboard_group_id!r}, {self._dashboard_id!r}, ' \
f'{self._query_id!r}, {self._chart_id!r}, {self._chart_name!r}, {self._chart_type!r}, ' \
f'{self._chart_url!r}, {self._product!r}, {self._cluster!r})' |
6,659 | create propname | from propfind import PROPFIND
from xml.dom import minidom
domimpl = minidom.getDOMImplementation()
from utils import get_parenturi
class REPORT(PROPFIND):
def __init__(self, uri, dataclass, depth, body):
PROPFIND.__init__(self, uri, dataclass, depth, body)
doc = minidom.parseString(body)
self.filter = doc.documentElement
def METHOD_NAME(self):
""" create a multistatus response for the prop names """
dc=self._dataclass
# create the document generator
doc = domimpl.createDocument(None, "multistatus", None)
ms = doc.documentElement
ms.setAttribute("xmlns:D", "DAV:")
ms.tagName = 'D:multistatus'
if self._depth=="0":
if self._uri in self._dataclass.get_childs(get_parenturi(self._uri),
self.filter):
pnames=dc.get_propnames(self._uri)
re=self.mk_propname_response(self._uri,pnames, doc)
ms.appendChild(re)
elif self._depth=="1":
if self._uri in self._dataclass.get_childs(get_parenturi(self._uri),
self.filter):
pnames=dc.get_propnames(self._uri)
re=self.mk_propname_response(self._uri,pnames, doc)
ms.appendChild(re)
for newuri in dc.get_childs(self._uri, self.filter):
pnames=dc.get_propnames(newuri)
re=self.mk_propname_response(newuri,pnames, doc)
ms.appendChild(re)
elif self._depth=='infinity':
uri_list = [self._uri]
while uri_list:
uri = uri_list.pop()
if uri in self._dataclass.get_childs(get_parenturi(uri),
self.filter):
pnames=dc.get_propnames(uri)
re=self.mk_propname_response(uri,pnames, doc)
ms.appendChild(re)
uri_childs = self._dataclass.get_childs(uri)
if uri_childs:
uri_list.extend(uri_childs)
return doc.toxml(encoding="utf-8")
def create_prop(self):
""" handle a <prop> request
This will
1. set up the <multistatus>-Framework
2. read the property values for each URI
(which is dependant on the Depth header)
This is done by the get_propvalues() method.
3. For each URI call the append_result() method
to append the actual <result>-Tag to the result
document.
We differ between "good" properties, which have been
assigned a value by the interface class and "bad"
properties, which resulted in an error, either 404
(Not Found) or 403 (Forbidden).
"""
# create the document generator
doc = domimpl.createDocument(None, "multistatus", None)
ms = doc.documentElement
ms.setAttribute("xmlns:D", "DAV:")
ms.tagName = 'D:multistatus'
if self._depth=="0":
if self._uri in self._dataclass.get_childs(get_parenturi(self._uri),
self.filter):
gp,bp=self.get_propvalues(self._uri)
res=self.mk_prop_response(self._uri,gp,bp,doc)
ms.appendChild(res)
elif self._depth=="1":
if self._uri in self._dataclass.get_childs(get_parenturi(self._uri),
self.filter):
gp,bp=self.get_propvalues(self._uri)
res=self.mk_prop_response(self._uri,gp,bp,doc)
ms.appendChild(res)
for newuri in self._dataclass.get_childs(self._uri, self.filter):
gp,bp=self.get_propvalues(newuri)
res=self.mk_prop_response(newuri,gp,bp,doc)
ms.appendChild(res)
elif self._depth=='infinity':
uri_list = [self._uri]
while uri_list:
uri = uri_list.pop()
if uri in self._dataclass.get_childs(get_parenturi(uri),
self.filter):
gp,bp=self.get_propvalues(uri)
res=self.mk_prop_response(uri,gp,bp,doc)
ms.appendChild(res)
uri_childs = self._dataclass.get_childs(uri)
if uri_childs:
uri_list.extend(uri_childs)
return doc.toxml(encoding="utf-8")
|
6,660 | get pch suffix | # Copyright 2019 The meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
"""Abstractions for the Elbrus family of compilers."""
import os
import typing as T
import subprocess
import re
from .gnu import GnuLikeCompiler
from .gnu import gnu_optimization_args
from ...mesonlib import Popen_safe, OptionKey
if T.TYPE_CHECKING:
from ...environment import Environment
from ...coredata import KeyedOptionDictType
class ElbrusCompiler(GnuLikeCompiler):
# Elbrus compiler is nearly like GCC, but does not support
# PCH, LTO, sanitizers and color output as of version 1.21.x.
id = 'lcc'
def __init__(self) -> None:
super().__init__()
self.base_options = {OptionKey(o) for o in ['b_pgo', 'b_coverage', 'b_ndebug', 'b_staticpic', 'b_lundef', 'b_asneeded']}
default_warn_args = ['-Wall']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic'],
'everything': default_warn_args + ['-Wextra', '-Wpedantic']}
# FIXME: use _build_wrapper to call this so that linker flags from the env
# get applied
def get_library_dirs(self, env: 'Environment', elf_class: T.Optional[int] = None) -> T.List[str]:
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
stdo = Popen_safe(self.get_exelist(ccache=False) + ['--print-search-dirs'], env=os_env)[1]
for line in stdo.split('\n'):
if line.startswith('libraries:'):
# lcc does not include '=' in --print-search-dirs output. Also it could show nonexistent dirs.
libstr = line.split(' ', 1)[1]
return [os.path.realpath(p) for p in libstr.split(':') if os.path.exists(p)]
return []
def get_program_dirs(self, env: 'Environment') -> T.List[str]:
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
stdo = Popen_safe(self.get_exelist(ccache=False) + ['--print-search-dirs'], env=os_env)[1]
for line in stdo.split('\n'):
if line.startswith('programs:'):
# lcc does not include '=' in --print-search-dirs output.
libstr = line.split(' ', 1)[1]
return [os.path.realpath(p) for p in libstr.split(':')]
return []
def get_default_include_dirs(self) -> T.List[str]:
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
p = subprocess.Popen(self.get_exelist(ccache=False) + ['-xc', '-E', '-v', '-'], env=os_env, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = p.stderr.read().decode('utf-8', errors='replace')
includes: T.List[str] = []
for line in stderr.split('\n'):
if line.lstrip().startswith('--sys_include'):
includes.append(re.sub(r'\s*\\$', '', re.sub(r'^\s*--sys_include\s*', '', line)))
return includes
def get_optimization_args(self, optimization_level: str) -> T.List[str]:
return gnu_optimization_args[optimization_level]
def get_prelink_args(self, prelink_name: str, obj_list: T.List[str]) -> T.List[str]:
return ['-r', '-nodefaultlibs', '-nostartfiles', '-o', prelink_name] + obj_list
def METHOD_NAME(self) -> str:
# Actually it's not supported for now, but probably will be supported in future
return 'pch'
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
args: T.List[str] = []
std = options[OptionKey('std', lang=self.language, machine=self.for_machine)]
if std.value != 'none':
args.append('-std=' + std.value)
return args
def openmp_flags(self) -> T.List[str]:
return ['-fopenmp'] |
6,661 | get secret output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSecretResult',
'AwaitableGetSecretResult',
'get_secret',
'get_secret_output',
]
@pulumi.output_type
class GetSecretResult:
"""
Secret represents a secret.
"""
def __init__(__self__, id=None, name=None, secret_resources=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if secret_resources and not isinstance(secret_resources, str):
raise TypeError("Expected argument 'secret_resources' to be a str")
pulumi.set(__self__, "secret_resources", secret_resources)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="secretResources")
def secret_resources(self) -> Optional[str]:
"""
The Secrets Resources.
"""
return pulumi.get(self, "secret_resources")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSecretResult(GetSecretResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSecretResult(
id=self.id,
name=self.name,
secret_resources=self.secret_resources,
system_data=self.system_data,
type=self.type)
def get_secret(child_resource_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecretResult:
"""
The operation returns properties of a Secret.
:param str child_resource_name: The name of the Secret resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the OpenShift cluster resource.
"""
__args__ = dict()
__args__['childResourceName'] = child_resource_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:redhatopenshift/v20220904:getSecret', __args__, opts=opts, typ=GetSecretResult).value
return AwaitableGetSecretResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
secret_resources=pulumi.get(__ret__, 'secret_resources'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_secret)
def METHOD_NAME(child_resource_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSecretResult]:
"""
The operation returns properties of a Secret.
:param str child_resource_name: The name of the Secret resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the OpenShift cluster resource.
"""
... |
6,662 | transfer direction | #!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import ctypes
from qiling.hw.peripheral import QlPeripheral
from qiling.hw.const.stm32f4xx_dma import DMA, DMA_SxCR
class Stream(ctypes.Structure):
_fields_ = [
('CR' , ctypes.c_uint32), # DMA stream x configuration register
('NDTR', ctypes.c_uint32), # DMA stream x number of data register
('PAR' , ctypes.c_uint32), # DMA stream x peripheral address register
('M0AR', ctypes.c_uint32), # DMA stream x memory 0 address register
('M1AR', ctypes.c_uint32), # DMA stream x memory 1 address register
('FCR' , ctypes.c_uint32), # DMA stream x FIFO control register
]
def enable(self):
return self.CR & DMA_SxCR.EN
def METHOD_NAME(self):
return self.CR & DMA_SxCR.DIR
def transfer_peripheral_size(self):
PSIZE = self.CR & DMA_SxCR.PSIZE
if PSIZE == DMA.PDATAALIGN_BYTE:
return 1
if PSIZE == DMA.PDATAALIGN_HALFWORD:
return 2
if PSIZE == DMA.PDATAALIGN_WORD:
return 4
def transfer_memory_size(self):
MSIZE = self.CR & DMA_SxCR.MSIZE
if MSIZE == DMA.MDATAALIGN_BYTE:
return 1
if MSIZE == DMA.MDATAALIGN_HALFWORD:
return 2
if MSIZE == DMA.MDATAALIGN_WORD:
return 4
def step(self, mem):
if self.NDTR == 0:
return
dir_flag = self.METHOD_NAME() == DMA.MEMORY_TO_PERIPH
psize = self.transfer_peripheral_size()
msize = self.transfer_memory_size()
src, dst = (self.M0AR, self.PAR) if dir_flag else (self.PAR, self.M0AR)
src_size, dst_size = (msize, psize) if dir_flag else (psize, msize)
data = bytes(mem.read(src, src_size)).ljust(dst_size)[:dst_size]
mem.write(dst, data)
self.NDTR -= 1
if self.CR & DMA_SxCR.MINC:
self.M0AR += msize
if self.CR & DMA_SxCR.PINC:
self.PAR += psize
if self.NDTR == 0:
self.CR &= ~DMA_SxCR.EN
return True
class STM32F4xxDma(QlPeripheral):
class Type(ctypes.Structure):
""" the structure available in :
stm32f413xx.h
stm32f407xx.h
stm32f469xx.h
stm32f446xx.h
stm32f427xx.h
stm32f401xc.h
stm32f415xx.h
stm32f412cx.h
stm32f410rx.h
stm32f410tx.h
stm32f439xx.h
stm32f412vx.h
stm32f417xx.h
stm32f479xx.h
stm32f429xx.h
stm32f412rx.h
stm32f423xx.h
stm32f437xx.h
stm32f412zx.h
stm32f401xe.h
stm32f410cx.h
stm32f405xx.h
stm32f411xe.h
"""
_fields_ = [
('LISR' , ctypes.c_uint32), # DMA low interrupt status register, Address offset: 0x00
('HISR' , ctypes.c_uint32), # DMA high interrupt status register, Address offset: 0x04
('LIFCR', ctypes.c_uint32), # DMA low interrupt flag clear register, Address offset: 0x08
('HIFCR', ctypes.c_uint32), # DMA high interrupt flag clear register, Address offset: 0x0C
('stream', Stream * 8),
]
def __init__(
self, ql, label,
stream0_intn=None,
stream1_intn=None,
stream2_intn=None,
stream3_intn=None,
stream4_intn=None,
stream5_intn=None,
stream6_intn=None,
stream7_intn=None
):
super().__init__(ql, label)
self.instance = self.struct()
self.intn = [
stream0_intn,
stream1_intn,
stream2_intn,
stream3_intn,
stream4_intn,
stream5_intn,
stream6_intn,
stream7_intn,
]
@QlPeripheral.monitor(width=15)
def read(self, offset: int, size: int) -> int:
buf = ctypes.create_string_buffer(size)
ctypes.memmove(buf, ctypes.addressof(self.instance) + offset, size)
return int.from_bytes(buf.raw, byteorder='little')
@QlPeripheral.monitor(width=15)
def write(self, offset: int, size: int, value: int):
if offset == self.struct.LIFCR.offset:
self.instance.LISR &= ~value
elif offset == self.struct.HIFCR.offset:
self.instance.HISR &= ~value
elif offset > self.struct.HIFCR.offset:
data = (value).to_bytes(size, byteorder='little')
ctypes.memmove(ctypes.addressof(self.instance) + offset, data, size)
def transfer_complete(self, id):
tc_bits = [5, 11, 21, 27]
if id > 4:
self.instance.HISR |= 1 << tc_bits[id - 4]
else:
self.instance.LISR |= 1 << tc_bits[id]
if self.intn[id] is not None:
self.ql.hw.nvic.set_pending(self.intn[id])
def step(self):
for id, stream in enumerate(self.instance.stream):
if not stream.enable():
continue
if stream.step(self.ql.mem):
self.transfer_complete(id) |
6,663 | init viewer | from .. import pinocchio_pywrap as pin
from ..shortcuts import buildModelsFromUrdf, createDatas
import time
import numpy as np
import os.path as osp
try:
import imageio
IMAGEIO_SUPPORT = True
except ImportError:
IMAGEIO_SUPPORT = False
class BaseVisualizer(object):
"""Pinocchio visualizers are employed to easily display a model at a given configuration.
BaseVisualizer is not meant to be directly employed, but only to provide a uniform interface and a few common methods.
New visualizers should extend this class and override its methods as neeeded.
"""
_video_writer = None
def __init__(
self,
model=pin.Model(),
collision_model=None,
visual_model=None,
copy_models=False,
data=None,
collision_data=None,
visual_data=None,
):
"""Construct a display from the given model, collision model, and visual model.
If copy_models is True, the models are copied. Otherwise, they are simply kept as a reference."""
if copy_models:
self.model = model.copy()
self.collision_model = collision_model.copy()
self.visual_model = visual_model.copy()
else:
self.model = model
self.collision_model = collision_model
self.visual_model = visual_model
if data is None:
self.data = self.model.createData()
else:
self.data = data
if collision_data is None and self.collision_model is not None:
self.collision_data = self.collision_model.createData()
else:
self.collision_data = collision_data
if visual_data is None and self.visual_model is not None:
self.visual_data = self.visual_model.createData()
else:
self.visual_data = visual_data
def rebuildData(self):
"""Re-build the data objects. Needed if the models were modified.
Warning: this will delete any information stored in all data objects."""
self.data, self.collision_data, self.visual_data = createDatas(
self.model, self.collision_model, self.visual_model
)
def getViewerNodeName(self, geometry_object, geometry_type):
"""Return the name of the geometry object inside the viewer."""
pass
def METHOD_NAME(self, *args, **kwargs):
"""Init the viewer by loading the gui and creating a window."""
pass
def loadViewerModel(self, *args, **kwargs):
"""Create the scene displaying the robot meshes in the viewer"""
pass
def reload(self, new_geometry_object, geometry_type=None):
"""Reload a geometry_object given by its type"""
pass
def clean(self):
"""Delete all the objects from the whole scene"""
pass
def display(self, q=None):
"""Display the robot at configuration q or refresh the rendering
from the current placements contained in data by placing all the bodies in the viewer."""
pass
def displayCollisions(self, visibility):
"""Set whether to display collision objects or not."""
pass
def displayVisuals(self, visibility):
"""Set whether to display visual objects or not."""
raise NotImplementedError()
def setBackgroundColor(self, *args, **kwargs):
"""Set the visualizer background color."""
raise NotImplementedError()
def setCameraTarget(self, target):
"""Set the camera target."""
raise NotImplementedError()
def setCameraPosition(self, position):
"""Set the camera's 3D position."""
raise NotImplementedError()
def setCameraZoom(self, zoom):
"""Set camera zoom value."""
raise NotImplementedError()
def setCameraPose(self, pose=np.eye(4)):
"""Set camera 6D pose using a 4x4 matrix."""
raise NotImplementedError()
def captureImage(self, w=None, h=None):
"""Captures an image from the viewer and returns an RGB array."""
pass
def disableCameraControl(self):
raise NotImplementedError()
def enableCameraControl(self):
raise NotImplementedError()
def drawFrameVelocities(self, *args, **kwargs):
"""Draw current frame velocities."""
raise NotImplementedError()
def sleep(self, dt):
time.sleep(dt)
def has_video_writer(self):
return self._video_writer is not None
def play(self, q_trajectory, dt=None, callback=None, capture=False, **kwargs):
"""Play a trajectory with given time step. Optionally capture RGB images and returns them."""
nsteps = len(q_trajectory)
if not capture:
capture = self.has_video_writer()
imgs = []
for i in range(nsteps):
t0 = time.time()
self.display(q_trajectory[i])
if callback is not None:
callback(i, **kwargs)
if capture:
img_arr = self.captureImage()
if not self.has_video_writer():
imgs.append(img_arr)
else:
self._video_writer.append_data(img_arr)
t1 = time.time()
elapsed_time = t1 - t0
if dt is not None and elapsed_time < dt:
self.sleep(dt - elapsed_time)
if capture and not self.has_video_writer():
return imgs
def create_video_ctx(self, filename=None, fps=30, directory=None, **kwargs):
"""Create a video recording context, generating the output filename if necessary.
Code inspired from https://github.com/petrikvladimir/RoboMeshCat.
"""
if not IMAGEIO_SUPPORT:
import warnings, contextlib
warnings.warn("Video context cannot be created because imageio is not available.", UserWarning)
return contextlib.nullcontext()
if filename is None:
if directory is None:
from tempfile import gettempdir
directory = gettempdir()
f_fmt = "%Y%m%d_%H%M%S"
ext = "mp4"
filename = time.strftime("{}.{}".format(f_fmt, ext))
filename = osp.join(directory, filename)
return VideoContext(self, fps, filename)
class VideoContext:
def __init__(self, viz, fps, filename, **kwargs):
self.viz = viz
self.vid_writer = imageio.get_writer(filename, fps=fps, **kwargs)
def __enter__(self):
print("[Entering video recording context]")
self.viz._video_writer = self.vid_writer
def __exit__(self, *args):
self.vid_writer.close()
self.viz._video_writer = None
__all__ = ["BaseVisualizer"] |
6,664 | way | #-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2019 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
import dateutil.parser
from . import config
from .osm_pbf_parser import osm_pbf_parser
from .OsmState import OsmState
import subprocess
from .OsmReader import OsmReader, dummylog
try: # osmium still optional for now
import osmium # type: ignore
have_osmium = True
except:
have_osmium = False
class OsmPbfReader(OsmReader, osm_pbf_parser.Visitor):
def log(self, txt):
self._logger.log(txt)
def __init__(self, pbf_file, logger = dummylog(), state_file = None):
osm_pbf_parser.Visitor.__init__(self)
self._pbf_file = pbf_file
self._state_file = state_file
self._logger = logger
self._got_error = False
def set_filter_since_timestamp(self, since_timestamp):
self.set_since_timestamp(int(since_timestamp.timestamp()) if since_timestamp else 0)
def timestamp(self):
if have_osmium:
try:
osmobject = osmium.io.Reader(self._pbf_file)
return dateutil.parser.isoparse(osmobject.header().get('osmosis_replication_timestamp')).replace(tzinfo=None)
except:
pass
if self._state_file:
osm_state = OsmState(self._state_file)
return osm_state.timestamp()
else:
try:
# Try to get timestamp from metadata
res = subprocess.check_output([config.bin_osmconvert, self._pbf_file, '--out-timestamp']).decode('utf-8')
d = dateutil.parser.parse(res).replace(tzinfo=None)
if not d:
raise ValueError()
return d
except:
pass
try:
# Compute max timestamp from data
res = subprocess.check_output('{} {} --out-statistics | grep "timestamp max"'.format(config.bin_osmconvert, self._pbf_file), shell=True).decode('utf-8')
s = res.split(' ')[2]
return dateutil.parser.parse(s).replace(tzinfo=None)
except:
return
def CopyTo(self, output):
self._output = output
osm_pbf_parser.read_osm_pbf(self._pbf_file, self)
def node(self, osmid, lon, lat, tags):
data = {
'id': osmid,
'lon': lon,
'lat': lat,
'tag': tags,
#'version'
#'timestamp'
#'uid'
}
self._output.NodeCreate(data)
def METHOD_NAME(self, osmid, tags, refs):
data = {
'id': osmid,
'tag': tags,
'nd': refs,
#'version'
#'timestamp'
#'uid'
}
self._output.WayCreate(data)
def relation(self, osmid, tags, ref):
data = {
'id': osmid,
'tag': tags,
#'version'
#'timestamp'
#'uid'
'member': ref,
}
self._output.RelationCreate(data)
###########################################################################
import unittest
class MockCountObjects:
def __init__(self):
self.num_nodes = 0
self.num_ways = 0
self.num_rels = 0
def NodeCreate(self, data):
self.num_nodes += 1
def WayCreate(self, data):
self.num_ways += 1
def RelationCreate(self, data):
self.num_rels += 1
class Test(unittest.TestCase):
def test_copy_all(self):
i1 = OsmPbfReader("tests/saint_barthelemy.osm.pbf", state_file = "tests/saint_barthelemy.state.txt")
o1 = MockCountObjects()
i1.CopyTo(o1)
self.assertEqual(o1.num_nodes, 83) # only nodes with tags are reported
self.assertEqual(o1.num_ways, 625)
self.assertEqual(o1.num_rels, 16)
self.assertEqual(i1.timestamp(), dateutil.parser.parse("2015-03-25T19:05:08Z").replace(tzinfo=None))
def test_copy_all_no_state_txt(self):
i1 = OsmPbfReader("tests/saint_barthelemy.osm.pbf")
o1 = MockCountObjects()
i1.CopyTo(o1)
self.assertEqual(o1.num_nodes, 83) # only nodes with tags are reported
self.assertEqual(o1.num_ways, 625)
self.assertEqual(o1.num_rels, 16)
self.assertEqual(i1.timestamp(), dateutil.parser.parse("2014-01-15T19:05:08Z").replace(tzinfo=None))
def test_copy_all_pbf_timestamp(self):
i1 = OsmPbfReader("tests/gibraltar.osm.pbf")
o1 = MockCountObjects()
i1.CopyTo(o1)
self.assertEqual(o1.num_nodes, 850) # only nodes with tags are reported
self.assertEqual(o1.num_ways, 3833)
self.assertEqual(o1.num_rels, 55)
self.assertEqual(i1.timestamp(), dateutil.parser.parse("2017-09-03T23:40:03Z").replace(tzinfo=None)) |
6,665 | do save | from __future__ import print_function
import json, sys, cmd
import cPickle as pickle
from verifiable_base import VerifiableBase
from verifiable_log import VerifiableLog
from verifiable_map import VerifiableMap, recalc_tree_hash
# Example general purpose verifiable database
# Mutation opertions append to its log
# Its verifiable map then calls the callback (_apply_operation) to change the view.
class VerifiableDatabase(VerifiableBase):
def __init__(self):
VerifiableBase.__init__(self, VerifiableLog())
# Private, call back for the underlying map when new entries are sequenced by the log
def _apply_operation(self, idx, operation, map):
op = json.loads(operation)
if op['operation'] == 'set':
map.put(str(op['key']), str(op['value']))
elif op['operation'] == 'delete':
map.put(str(op['key']), '')
# Example database operation
def set(self, key, value):
self._log.append(json.dumps({'operation': 'set', 'key': key, 'value': value}))
# Example database operation
def delete(self, key):
self._log.append(json.dumps({'operation': 'delete', 'key': key}))
# Return a value for a key and given tree_size (as returned by get_tree_head)
# Also returns proof
def get(self, key, tree_size):
val, proof = VerifiableBase.get(self, str(key), tree_size)
val = str(val) if len(val) else None
return val, proof
# Test right val is returned and inclusion proof checks out
def test(db, query, tree_size, exp_val):
val, proof = db.get(query, tree_size)
assert val == exp_val
assert recalc_tree_hash(query, str(val) if val else '', proof) == db.get_tree_head(tree_size)['sha256_root_hash']
class ReplCmd(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = '> '
self.do_new()
def do_sth(self, arg):
try:
if not len(arg.strip()):
seq = None
else:
seq = int(arg)
except:
self.help_sth()
return
print(self.db.get_tree_head(seq))
def help_sth(self):
print('sth <integer> - Updates tree to sequence number and print STH. Leave blank for latest.')
def do_new(self, arg=''):
self.db = VerifiableDatabase()
def help_new(self):
print('new - creates a new database, called by default upon launch')
def METHOD_NAME(self, arg):
arg = arg.strip()
if not len(arg):
self.help_save()
return
pickle.dump(self.db, file(arg, 'wb'))
def help_save(self):
print('save <path> - save state to a path')
def do_load(self, arg):
arg = arg.strip()
if not len(arg):
self.help_load()
return
self.db = pickle.load(file(arg, 'rb'))
def help_load(self):
print('load <path> - load state from path')
def do_set(self, arg):
try:
n, v = arg.split(' ')
except:
self.help_set()
return
n = n.strip()
v = v.strip()
self.db.set(n, v)
self.do_get(n)
def help_set(self):
print('set <key> <value> - set key (string) to the specified value (string)')
def do_get(self, arg):
try:
n, v = arg.split(' ')
n = n.strip()
v = v.strip()
except:
n = arg.strip()
v = self.db.get_tree_head(None)['tree_size']
try:
v = int(v)
except:
self.help_get()
return
try:
val, proof = self.db.get(n, v)
except ValueError:
print('Tree size does not exist.')
return
print('Value: ', val)
print('Proof: ', proof)
print('Map hash: ', self.db.get_tree_head(v)['sha256_root_hash'])
print('Log hash: ', self.db.get_tree_head(v)['log_tree_head']['sha256_root_hash'])
print('Tree size: ', self.db.get_tree_head(v)['tree_size'])
def help_get(self):
print('get <key> <integer> - get value as of this sequence number. Leave blank for latest.')
def do_del(self, arg):
n = arg.strip()
self.db.delete(n)
self.do_get(n)
def help_del(self):
print('del <key> - delete key (string) from database')
def do_dump(self, arg=''):
try:
if not len(arg.strip()):
seq = None
else:
seq = int(arg)
except:
self.help_dump()
return
print('Tree:')
self.db.debug_dump(seq)
def help_dump(self):
print('dump <integer> - dump the tree as of this sequence number. Leave blank for latest.')
def do_log(self, arg=''):
for i, x in enumerate(self.db.get_log_entries(0, self.db.get_tree_head()['tree_size'] - 1)):
print(i, x)
def help_log(self):
print('log - dump all ops')
db = VerifiableDatabase()
db.set('foo', 'bar')
db.set('foo', 'baz')
db.delete('foo')
db.set('foo', 'bar')
db.get_tree_head()
test(db, 'foo', 0, None)
test(db, 'foo', 1, 'bar')
test(db, 'foo', 2, 'baz')
test(db, 'foo', 3, None)
test(db, 'foo', 4, 'bar')
if __name__ == '__main__':
ReplCmd().cmdloop('Type "help" to get started.') |
6,666 | test untrusted get partial voting right | import numpy as np
import pytest
from vouch.voting_rights import OVER_TRUST_BIAS, OVER_TRUST_SCALE, compute_voting_rights
def test_empty_input():
np.testing.assert_array_equal(compute_voting_rights(np.array([]), np.array([])), [])
def test_everyone_trusted():
trust_scores, privacy_penalties = np.array([1, 1, 1, 1]), np.ones(shape=4)
np.testing.assert_array_equal(
compute_voting_rights(trust_scores, privacy_penalties), [1, 1, 1, 1]
)
def test_everyone_trusted_some_penalized():
trust_scores, privacy_penalties = np.array([1, 1, 1, 1]), np.array([0.5, 0.5, 1, 1])
np.testing.assert_array_equal(
compute_voting_rights(trust_scores, privacy_penalties), [0.5, 0.5, 1, 1]
)
def test_untrusted_less_than_bias_get_full_voting_right():
np.testing.assert_array_equal(
compute_voting_rights(np.array([0, 0.5, 0.5, 1, 1]), np.ones(shape=5)),
[1, 1, 1, 1, 1],
)
def test_untrusted_and_penalized_less_than_bias_get_penalized_voting_right():
np.testing.assert_array_equal(
compute_voting_rights(
np.array([0, 0.5, 0.5, 1, 1]), np.array([0.7, 0.7, 0.7, 1, 1])
),
[0.7, 0.7, 0.7, 1, 1],
)
# The below test checks simple cases where trust scores are only 0 and 1
@pytest.mark.parametrize(
"n_trusted, n_non_trusted",
[
(0, 2),
(0, 100),
(1, 1),
(1, 50),
(2, 3),
(100, 50),
(1000, 1000),
(1000, 3),
],
)
def METHOD_NAME(n_trusted, n_non_trusted):
expected_partial_right = min(
(OVER_TRUST_BIAS + n_trusted * OVER_TRUST_SCALE) / n_non_trusted, 1
)
np.testing.assert_array_equal(
compute_voting_rights(
np.array([0] * n_non_trusted + [1] * n_trusted),
np.ones(shape=n_non_trusted + n_trusted),
),
[expected_partial_right] * n_non_trusted + [1] * n_trusted,
)
@pytest.mark.parametrize(
"n_random_users",
[
1,
8,
32,
128,
512,
1024,
],
)
def test_random_input_voting_right_more_than_trust_score(n_random_users):
trust_scores = np.random.random(size=(n_random_users,))
voting_rights = compute_voting_rights(trust_scores, np.ones(shape=n_random_users))
assert all(v >= t for v, t in zip(voting_rights, trust_scores))
@pytest.mark.parametrize(
"n_random_users",
[
1,
8,
32,
128,
512,
1024,
],
)
def test_total_over_trust_less_than_expected(n_random_users):
trust_scores = np.random.random(size=(n_random_users,))
voting_rights = compute_voting_rights(trust_scores, np.ones(shape=n_random_users))
total_over_trust = (voting_rights - trust_scores).sum()
expected_over_trust = OVER_TRUST_BIAS + trust_scores.sum() * OVER_TRUST_SCALE
assert total_over_trust < expected_over_trust or np.isclose(
total_over_trust, expected_over_trust
)
@pytest.mark.parametrize(
"n_random_users",
[
1,
8,
32,
128,
512,
1024,
],
)
def test_total_over_trust_less_than_expected_with_random_penalizations(n_random_users):
trust_scores = np.random.random(size=(n_random_users,))
privacy_penalties = np.random.random(size=(n_random_users,))
voting_rights = compute_voting_rights(trust_scores, privacy_penalties)
total_over_trust = (voting_rights - trust_scores * privacy_penalties).sum()
expected_over_trust = OVER_TRUST_BIAS + trust_scores.sum() * OVER_TRUST_SCALE
assert total_over_trust < expected_over_trust or np.isclose(
total_over_trust, expected_over_trust
)
@pytest.mark.parametrize(
"n_random_users",
[
1,
8,
32,
128,
512,
1024,
],
)
def test_min_voting_right_more_than_min_trust(n_random_users):
trust_scores = np.random.random(size=(n_random_users,))
min_voting_right = compute_voting_rights(
trust_scores, np.ones(shape=n_random_users)
).min()
min_trust_score = trust_scores.min()
assert min_voting_right > min_trust_score |
6,667 | test skeletonize wrong dim | import numpy as np
import scipy.ndimage as ndi
from skimage import io, draw
from skimage.data import binary_blobs
from skimage.util import img_as_ubyte
from skimage.morphology import skeletonize, skeletonize_3d
from skimage._shared import testing
from skimage._shared.testing import assert_equal, assert_, parametrize, fetch
# basic behavior tests (mostly copied over from 2D skeletonize)
def METHOD_NAME():
im = np.zeros(5, dtype=np.uint8)
with testing.raises(ValueError):
skeletonize(im, method='lee')
im = np.zeros((5, 5, 5, 5), dtype=np.uint8)
with testing.raises(ValueError):
skeletonize(im, method='lee')
def test_skeletonize_1D_old_api():
# a corner case of an image of a shape(1, N)
im = np.ones((5, 1), dtype=np.uint8)
res = skeletonize_3d(im)
assert_equal(res, im)
def test_skeletonize_1D():
# a corner case of an image of a shape(1, N)
im = np.ones((5, 1), dtype=np.uint8)
res = skeletonize(im, method='lee')
assert_equal(res, im)
def test_skeletonize_no_foreground():
im = np.zeros((5, 5), dtype=np.uint8)
result = skeletonize(im, method='lee')
assert_equal(result, im)
def test_skeletonize_all_foreground():
im = np.ones((3, 4), dtype=np.uint8)
assert_equal(skeletonize(im, method='lee'),
np.array([[0, 0, 0, 0],
[1, 1, 1, 1],
[0, 0, 0, 0]], dtype=np.uint8))
def test_skeletonize_single_point():
im = np.zeros((5, 5), dtype=np.uint8)
im[3, 3] = 1
result = skeletonize(im, method='lee')
assert_equal(result, im)
def test_skeletonize_already_thinned():
im = np.zeros((5, 5), dtype=np.uint8)
im[3, 1:-1] = 1
im[2, -1] = 1
im[4, 0] = 1
result = skeletonize(im, method='lee')
assert_equal(result, im)
def test_dtype_conv():
# check that the operation does the right thing with floats etc
# also check non-contiguous input
img = np.random.random((16, 16))[::2, ::2]
img[img < 0.5] = 0
orig = img.copy()
res = skeletonize(img, method='lee')
img_max = img_as_ubyte(img).max()
assert_equal(res.dtype, np.uint8)
assert_equal(img, orig) # operation does not clobber the original
assert_equal(res.max(), img_max) # the intensity range is preserved
@parametrize("img", [
np.ones((8, 8), dtype=float), np.ones((4, 8, 8), dtype=float)
])
def test_input_with_warning(img):
# check that the input is not clobbered
# for 2D and 3D images of varying dtypes
check_input(img)
@parametrize("img", [
np.ones((8, 8), dtype=np.uint8), np.ones((4, 8, 8), dtype=np.uint8),
np.ones((8, 8), dtype=bool), np.ones((4, 8, 8), dtype=bool)
])
def test_input_without_warning(img):
# check that the input is not clobbered
# for 2D and 3D images of varying dtypes
check_input(img)
def check_input(img):
orig = img.copy()
skeletonize(img, method='lee')
assert_equal(img, orig)
def test_skeletonize_num_neighbors():
# an empty image
image = np.zeros((300, 300))
# foreground object 1
image[10:-10, 10:100] = 1
image[-100:-10, 10:-10] = 1
image[10:-10, -100:-10] = 1
# foreground object 2
rs, cs = draw.line(250, 150, 10, 280)
for i in range(10):
image[rs + i, cs] = 1
rs, cs = draw.line(10, 150, 250, 280)
for i in range(20):
image[rs + i, cs] = 1
# foreground object 3
ir, ic = np.indices(image.shape)
circle1 = (ic - 135)**2 + (ir - 150)**2 < 30**2
circle2 = (ic - 135)**2 + (ir - 150)**2 < 20**2
image[circle1] = 1
image[circle2] = 0
result = skeletonize(image, method='lee')
# there should never be a 2x2 block of foreground pixels in a skeleton
mask = np.array([[1, 1],
[1, 1]], np.uint8)
blocks = ndi.correlate(result, mask, mode='constant')
assert_(not np.any(blocks == 4))
def test_two_hole_image():
# test a simple 2D image against FIJI
img_o = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
img_f = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
res = skeletonize(img_o, method='lee')
assert_equal(res, img_f)
def test_3d_vs_fiji():
# generate an image with blobs and compate its skeleton to
# the skeleton generated by FIJI (Plugins>Skeleton->Skeletonize)
img = binary_blobs(32, 0.05, n_dim=3, rng=1234)
img = img[:-2, ...]
img = img.astype(np.uint8)*255
img_s = skeletonize(img)
img_f = io.imread(fetch("data/_blobs_3d_fiji_skeleton.tif"))
assert_equal(img_s, img_f) |
6,668 | submit task | # SPDX-License-Identifier: GPL-3.0-or-later
# SPDX-FileCopyrightText: Copyright contributors to the OpenScanHub project.
import os
import sys
from xmlrpc.client import Fault
from kobo.shortcuts import random_string
import osh.client
from osh.client.commands.common import (add_analyzers_option,
add_comment_option,
add_comp_warnings_option,
add_config_option,
add_csmock_args_option,
add_custom_model_option,
add_download_results_option,
add_email_to_option,
add_install_to_chroot_option,
add_nowait_option, add_nvr_option,
add_priority_option,
add_profile_option,
add_task_id_file_option)
from osh.client.commands.shortcuts import (check_analyzers, fetch_results,
handle_perm_denied, upload_file,
verify_koji_build, verify_mock,
verify_scan_profile_exists)
from osh.client.conf import get_conf
class Diff_Build(osh.client.OshCommand):
"""analyze a SRPM without and with patches, return diff"""
enabled = True
admin = False # admin type account required
def options(self):
# specify command usage
# normalized name contains a lower-case class name with underscores
# converted to dashes
self.parser.usage = "%%prog %s [options] <args>" % self.normalized_name
self.parser.epilog = "User configuration file is located at: \
~/.config/osh/client.conf"
add_config_option(self.parser)
add_download_results_option(self.parser)
add_comp_warnings_option(self.parser)
add_analyzers_option(self.parser)
add_profile_option(self.parser)
add_csmock_args_option(self.parser)
add_comment_option(self.parser)
add_task_id_file_option(self.parser)
add_nowait_option(self.parser)
add_email_to_option(self.parser)
add_priority_option(self.parser)
add_nvr_option(self.parser)
add_custom_model_option(self.parser)
add_install_to_chroot_option(self.parser)
def validate_results_store_file(self):
if self.results_store_file:
if isinstance(self.results_store_file, str):
if not os.path.isdir(self.results_store_file):
self.parser.error("Path (%s) for storing results doesn't \
exist." % self.results_store_file)
else:
self.parser.error("Invalid path to store results.")
def run(self, *args, **kwargs): # noqa: C901
local_conf = get_conf(self.conf)
# optparser output is passed via *args (args) and **kwargs (opts)
config = kwargs.pop("config", None)
email_to = kwargs.pop("email_to", [])
comment = kwargs.pop("comment")
nowait = kwargs.pop("nowait")
task_id_file = kwargs.pop("task_id_file")
priority = kwargs.pop("priority")
nvr = kwargs.pop("nvr")
self.results_store_file = kwargs.pop("results_dir", None)
warn_level = kwargs.pop('warn_level', '0')
analyzers = kwargs.pop('analyzers', '')
profile = kwargs.pop('profile', None)
csmock_args = kwargs.pop('csmock_args', None)
cov_custom_model = kwargs.pop('cov_custom_model', None)
tarball_build_script = kwargs.pop('tarball_build_script', None)
packages_to_install = kwargs.pop('install_to_chroot', None)
if len(args) != 1:
self.parser.error("please specify exactly one SRPM")
if nvr:
# self.srpm contains NVR if --nvr is used!
self.srpm = args[0]
else:
self.srpm = os.path.abspath(os.path.expanduser(args[0]))
self.validate_results_store_file()
if nvr:
# get build from koji
koji_profiles = self.conf.get('KOJI_PROFILES', 'brew,koji')
result = verify_koji_build(self.srpm, koji_profiles)
if result is not None:
self.parser.error(result)
elif tarball_build_script:
# we are analyzing tarball with build script
if not os.path.exists(self.srpm):
self.parser.error("Tarball does not exist.")
if not config:
config = local_conf.get_default_mockconfig()
if not config:
self.parser.error("You haven't specified mock config, there \
is not even one in your user configuration file \
(~/.config/osh/client.conf) nor in system configuration file \
(/etc/osh/client.conf)")
# non-negative priority
if priority is not None and priority < 0:
self.parser.error("Priority must be a non-negative number!")
# login to the hub
self.connect_to_hub(kwargs)
result = verify_mock(config, self.hub)
if result is not None:
self.parser.error(result)
# options setting
options = {
"comment": comment,
"mock_config": config
}
if email_to:
options["email_to"] = email_to
if priority is not None:
options["priority"] = priority
if warn_level:
options['warning_level'] = warn_level
if analyzers:
try:
check_analyzers(self.hub, analyzers)
except RuntimeError as ex:
self.parser.error(str(ex))
options['analyzers'] = analyzers
if profile:
result = verify_scan_profile_exists(self.hub, profile)
if result is not None:
self.parser.error(result)
options['profile'] = profile
if nvr:
options["brew_build"] = self.srpm
options["srpm_name"] = self.srpm
else:
target_dir = random_string(32)
upload_id, err_code, err_msg = upload_file(self.hub, self.srpm,
target_dir, self.parser)
options["upload_id"] = upload_id
if csmock_args:
options['csmock_args'] = csmock_args
if cov_custom_model:
target_dir = random_string(32)
upload_model_id, err_code, err_msg = upload_file(self.hub,
cov_custom_model,
target_dir,
self.parser)
options["upload_model_id"] = upload_model_id
if packages_to_install:
options['install_to_chroot'] = packages_to_install
if tarball_build_script:
options['tarball_build_script'] = tarball_build_script
task_id = self.METHOD_NAME(config, comment, options)
self.write_task_id_file(task_id, task_id_file)
task_url = self.hub.client.task_url(task_id)
print("Task info: %s" % task_url)
if not nowait:
from kobo.client.task_watcher import TaskWatcher
TaskWatcher.watch_tasks(self.hub, [task_id])
# store results if user requested this
if self.results_store_file is not None and \
not fetch_results(self.hub, self.results_store_file, task_id):
sys.exit(1)
def METHOD_NAME(self, config, comment, options):
try:
return self.hub.scan.diff_build(config, comment, options)
except Fault as e:
handle_perm_denied(e, self.parser) |
6,669 | test mi constant in function | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
import typing
import unittest
import onnx
import onnx.parser
import onnx.shape_inference
class TestModelInference(unittest.TestCase):
def _check(self, model_text: str, *expected: int):
"""Check that the model inference infers the expected types for outputs.
Restricted to the simple case of tensor types, so expected types specify
only the element type (ints corresponding to onnx.TensorProto.DataType).
"""
model = onnx.parser.parse_model(model_text)
inferred = onnx.shape_inference.infer_shapes(model)
outputs = inferred.graph.output
for output, expected_elem_type in zip(outputs, expected):
inferred_type = output.type
self.assertTrue(inferred_type.HasField("tensor_type"))
tensor_type = inferred_type.tensor_type
self.assertTrue(tensor_type.HasField("elem_type"))
elem_type = tensor_type.elem_type
self.assertEqual(elem_type, expected_elem_type)
def test_mi_basic(self):
"""Test that model inference infers model output type."""
model = """
<
ir_version: 7,
opset_import: [ "" : 17]
>
agraph (float[N] x) => (y)
{
y = Cast<to=6> (x)
}
"""
self._check(model, onnx.TensorProto.INT32)
def test_mi_function(self):
"""Test use of functions."""
model = """
<
ir_version: 7,
opset_import: [ "" : 17, "local" : 1]
>
agraph (float[N] x) => (y)
{
y = local.cast(x)
}
<
opset_import: [ "" : 17 ],
domain: "local"
>
cast (x) => (y)
{
y = Cast<to=6> (x)
}
"""
self._check(model, onnx.TensorProto.INT32)
def test_mi_function_attr(self):
"""Test use of functions with attribute parameters."""
model = """
<
ir_version: 7,
opset_import: [ "" : 17, "local" : 1]
>
agraph (float[N] x) => (y)
{
y = local.cast<target=6>(x)
}
<
opset_import: [ "" : 17 ],
domain: "local"
>
cast<target>(x) => (y)
{
y = Cast<to:int = @target> (x)
}
"""
self._check(model, onnx.TensorProto.INT32)
def test_mi_function_subgraph_attr(self):
"""Test use of function attributes within subgraphs."""
model = """
<
ir_version: 7,
opset_import: [ "" : 17, "local" : 1]
>
agraph (float[N] x, bool flag) => (y)
{
y = local.cast<target=6>(x, flag)
}
<
opset_import: [ "" : 17 ],
domain: "local"
>
cast<target>(x, flag) => (y)
{
y = If (flag) <
then_branch = g1 () => (z_then) { z_then = Cast<to:int = @target> (x) },
else_branch = g2 () => (z_else) { z_else = Cast<to:int = @target> (x) }
>
}
"""
self._check(model, onnx.TensorProto.INT32)
def test_mi_function_multiple_calls(self):
"""Test use of multiple invocation of functions."""
model = """
<
ir_version: 7,
opset_import: [ "" : 17, "local" : 1]
>
agraph (float[N] x, bool flag) => (y, z)
{
y = local.cast<target=6>(x, flag)
z = local.cast<target=7>(x, flag)
}
<
opset_import: [ "" : 17 ],
domain: "local"
>
cast<target>(x, flag) => (y)
{
y = If (flag) <
then_branch = g1 () => (z_then) { z_then = Cast<to:int = @target> (x) },
else_branch = g2 () => (z_else) { z_else = Cast<to:int = @target> (x) }
>
}
"""
self._check(model, onnx.TensorProto.INT32, onnx.TensorProto.INT64)
def _check_shape(self, model_text: str, *expected: typing.Sequence[int]):
"""Check that the model inference infers the expected shapes for outputs.
Restricted to the simple case of tensor type outputs with completely
known shapes.
"""
model = onnx.parser.parse_model(model_text)
inferred = onnx.shape_inference.infer_shapes(model, True, True, True)
outputs = inferred.graph.output
for output, expected_shape in zip(outputs, expected):
inferred_type = output.type
self.assertTrue(inferred_type.HasField("tensor_type"))
tensor_type = inferred_type.tensor_type
self.assertTrue(tensor_type.HasField("shape"))
inferred_shape = tensor_type.shape
self.assertEqual(len(inferred_shape.dim), len(expected_shape))
for inferred_dim, expected_dim in zip(inferred_shape.dim, expected_shape):
self.assertTrue(inferred_dim.HasField("dim_value"))
self.assertEqual(inferred_dim.dim_value, expected_dim)
def test_mi_constant(self):
model = """
<
ir_version: 7,
opset_import: [ "" : 17]
>
mymodel (float[4, 8, 16] x) => (y) {
shape = Constant<value_ints=[8,4,16]>()
y = Reshape(x, shape)
}
"""
self._check_shape(model, [8, 4, 16])
def test_mi_constant_2(self):
model = """
<
ir_version: 7,
opset_import: [ "" : 17]
>
mymodel (float[4, 8, 16] x) => (y) {
shape = Constant<value_ints=[4,2,8]>()
two = Constant<value_int=2>()
shape2 = Mul(shape, two)
y = Reshape(x, shape2)
}
"""
self._check_shape(model, [8, 4, 16])
def METHOD_NAME(self):
model = """
<
ir_version: 7,
opset_import: [ "" : 17, "local" : 1]
>
main (float x) => (y, z) {
y, z = local.expand(x)
}
<
opset_import: [ "" : 17 ],
domain: "local"
>
expand (x) => (y, z) {
shape1 = Constant<value = int64[2] {4,4}>()
shape2 = Constant<value = int64[3] {8,8,8}>()
z = Expand (x, shape2)
y = Expand (x, shape1)
}
"""
self._check_shape(model, [4, 4], [8, 8, 8])
def test_mi_function_default_attr(self):
"""Test use of default values of function attributes."""
model = """
<ir_version: 7, opset_import: [ "" : 17, "local" : 1]>
agraph (float[N] x) => (y, z)
{
y = local.cast <target=6> (x) # casts to INT32 type (encoding value 6)
z = local.cast (x) # uses default-attribute value of 1 (FLOAT type)
}
<opset_import: [ "" : 17 ], domain: "local">
cast <target: int = 1> (x) => (y)
{
y = Cast <to:int = @target> (x)
}
"""
self._check(model, onnx.TensorProto.INT32, onnx.TensorProto.FLOAT)
if __name__ == "__main__":
unittest.main() |
6,670 | remove header footer from key | import logging
import os
import stat
from tempfile import NamedTemporaryFile
from ocs_ci.framework import config
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import CommandFailed, ConfigurationError
from ocs_ci.utility.utils import download_file, exec_cmd
logger = logging.getLogger(__name__)
def generate_onboarding_token():
"""
Generate Onboarding token for consumer cluster via following steps:
1. Download ticketgen.sh script from:
https://raw.githubusercontent.com/jarrpa/ocs-operator/ticketgen/hack/ticketgen/ticketgen.sh
2. Save private key from AUTH["managed_service"]["private_key"] to
temporary file.
3. Run ticketgen.sh script to generate Onboarding token.
Raises:
CommandFailed: In case the script ticketgen.sh fails.
ConfigurationError: when AUTH["managed_service"]["private_key"] not is not defined
Returns:
string: Onboarding token
"""
logger.debug("Generate onboarding token for ODF to ODF deployment")
ticketgen_script_path = os.path.join(constants.DATA_DIR, "ticketgen.sh")
# download ticketgen.sh script
logger.debug("Download and prepare ticketgen.sh script")
download_file(
"https://raw.githubusercontent.com/jarrpa/ocs-operator/ticketgen/hack/ticketgen/ticketgen.sh",
ticketgen_script_path,
)
# add execute permission to the ticketgen.sh script
current_file_permissions = os.stat(ticketgen_script_path)
os.chmod(
ticketgen_script_path,
current_file_permissions.st_mode | stat.S_IEXEC,
)
# save private key to temp file
logger.debug("Prepare temporary file with private key")
private_key = config.AUTH.get("managed_service", {}).get("private_key", "")
if not private_key:
raise ConfigurationError(
"Private key for Managed Service not defined.\n"
"Expected following configuration in auth.yaml file:\n"
"managed_service:\n"
' private_key: "..."\n'
' public_key: "..."'
)
with NamedTemporaryFile(
mode="w", prefix="private", suffix=".pem", delete=True
) as key_file:
key_file.write(private_key)
key_file.flush()
logger.debug("Generate Onboarding token")
ticketgen_result = exec_cmd(f"{ticketgen_script_path} {key_file.name}")
ticketgen_output = ticketgen_result.stdout.decode()
if ticketgen_result.stderr:
raise CommandFailed(
f"Script ticketgen.sh failed to generate Onboarding token:\n"
f"command: '{' '.join(ticketgen_result.args)}'\n"
f"stderr: {ticketgen_result.stderr.decode()}\n"
f"stdout: {ticketgen_output}"
)
return ticketgen_output
def METHOD_NAME(key):
"""
This function will remove header and footer from key (like:
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----
) and return the key on one line.
Returns:
string: one line key string without header and footer
"""
key_lines = key.strip().split("\n")
if "-----BEGIN" in key_lines[0]:
key_lines = key_lines[1:]
if "-----END" in key_lines[-1]:
key_lines = key_lines[:-1]
return "".join(key_lines)
def get_storage_provider_endpoint(cluster):
"""
Get get_storage_provider_endpoint
Args:
cluster (str): cluster name
Returns:
str: value of storage provider endpoint
"""
# TODO: p2 task to implement below functionality
# Use multicluster implementation to use
# kubeconfig as per cluster name and
# extract value of storage_provider_endpoint
# handle invalid cluster name in implementation
# validate Return String storage provider endpoint:
# 1. raise Error if storage_provider_endpoint is
# not found in cluster yaml
# 2. warning if storage cluster is not ready
# and storage_provider_endpoint is available in
# storagecluster yaml .
# For now use hardcoded value from config with key
# storage_provider_endpoint:
return config.DEPLOYMENT.get("storage_provider_endpoint", "") |
6,671 | customer | from enum import Enum, auto
from typing import Dict, Optional
from aws_lambda_powertools.utilities.data_classes.common import DictWrapper
class ConnectContactFlowChannel(Enum):
VOICE = auto()
CHAT = auto()
class ConnectContactFlowEndpointType(Enum):
TELEPHONE_NUMBER = auto()
class ConnectContactFlowInitiationMethod(Enum):
INBOUND = auto()
OUTBOUND = auto()
TRANSFER = auto()
CALLBACK = auto()
API = auto()
class ConnectContactFlowEndpoint(DictWrapper):
@property
def address(self) -> str:
"""The phone number."""
return self["Address"]
@property
def endpoint_type(self) -> ConnectContactFlowEndpointType:
"""The endpoint type."""
return ConnectContactFlowEndpointType[self["Type"]]
class ConnectContactFlowQueue(DictWrapper):
@property
def arn(self) -> str:
"""The unique queue ARN."""
return self["ARN"]
@property
def name(self) -> str:
"""The queue name."""
return self["Name"]
class ConnectContactFlowMediaStreamAudio(DictWrapper):
@property
def start_fragment_number(self) -> Optional[str]:
"""The number that identifies the Kinesis Video Streams fragment, in the stream used for Live media streaming,
in which the customer audio stream started.
"""
return self["StartFragmentNumber"]
@property
def start_timestamp(self) -> Optional[str]:
"""When the customer audio stream started."""
return self["StartTimestamp"]
@property
def stream_arn(self) -> Optional[str]:
"""The ARN of the Kinesis Video stream used for Live media streaming that includes the customer data to
reference.
"""
return self["StreamARN"]
class ConnectContactFlowMediaStreamCustomer(DictWrapper):
@property
def audio(self) -> ConnectContactFlowMediaStreamAudio:
return ConnectContactFlowMediaStreamAudio(self["Audio"])
class ConnectContactFlowMediaStreams(DictWrapper):
@property
def METHOD_NAME(self) -> ConnectContactFlowMediaStreamCustomer:
return ConnectContactFlowMediaStreamCustomer(self["Customer"])
class ConnectContactFlowData(DictWrapper):
@property
def attributes(self) -> Dict[str, str]:
"""These are attributes that have been previously associated with a contact,
such as when using a Set contact attributes block in a contact flow.
This map may be empty if there aren't any saved attributes.
"""
return self["Attributes"]
@property
def channel(self) -> ConnectContactFlowChannel:
"""The method used to contact your contact center."""
return ConnectContactFlowChannel[self["Channel"]]
@property
def contact_id(self) -> str:
"""The unique identifier of the contact."""
return self["ContactId"]
@property
def customer_endpoint(self) -> Optional[ConnectContactFlowEndpoint]:
"""Contains the customer’s address (number) and type of address."""
if self["CustomerEndpoint"] is not None:
return ConnectContactFlowEndpoint(self["CustomerEndpoint"])
return None
@property
def initial_contact_id(self) -> str:
"""The unique identifier for the contact associated with the first interaction between the customer and your
contact center. Use the initial contact ID to track contacts between contact flows.
"""
return self["InitialContactId"]
@property
def initiation_method(self) -> ConnectContactFlowInitiationMethod:
"""How the contact was initiated."""
return ConnectContactFlowInitiationMethod[self["InitiationMethod"]]
@property
def instance_arn(self) -> str:
"""The ARN for your Amazon Connect instance."""
return self["InstanceARN"]
@property
def previous_contact_id(self) -> str:
"""The unique identifier for the contact before it was transferred.
Use the previous contact ID to trace contacts between contact flows.
"""
return self["PreviousContactId"]
@property
def queue(self) -> Optional[ConnectContactFlowQueue]:
"""The current queue."""
if self["Queue"] is not None:
return ConnectContactFlowQueue(self["Queue"])
return None
@property
def system_endpoint(self) -> Optional[ConnectContactFlowEndpoint]:
"""Contains the address (number) the customer dialed to call your contact center and type of address."""
if self["SystemEndpoint"] is not None:
return ConnectContactFlowEndpoint(self["SystemEndpoint"])
return None
@property
def media_streams(self) -> ConnectContactFlowMediaStreams:
return ConnectContactFlowMediaStreams(self["MediaStreams"])
class ConnectContactFlowEvent(DictWrapper):
"""Amazon Connect contact flow event
Documentation:
-------------
- https://docs.aws.amazon.com/connect/latest/adminguide/connect-lambda-functions.html
"""
@property
def contact_data(self) -> ConnectContactFlowData:
"""This is always passed by Amazon Connect for every contact. Some parameters are optional."""
return ConnectContactFlowData(self["Details"]["ContactData"])
@property
def parameters(self) -> Dict[str, str]:
"""These are parameters specific to this call that were defined when you created the Lambda function."""
return self["Details"]["Parameters"] |
6,672 | iterate list | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: Benjamin Hättasch, Tobias Kunze
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
from collections import OrderedDict
from django.dispatch import receiver
from django.utils.timezone import get_current_timezone
from django.utils.translation import gettext as _, gettext_lazy, pgettext_lazy
from pretix.base.settings import PERSON_NAME_SCHEMES
from ..exporter import ListExporter, OrganizerLevelExportMixin
from ..signals import register_multievent_data_exporters
class CustomerListExporter(OrganizerLevelExportMixin, ListExporter):
identifier = 'customerlist'
verbose_name = gettext_lazy('Customer accounts')
organizer_required_permission = 'can_manage_customers'
category = pgettext_lazy('export_category', 'Customer accounts')
description = gettext_lazy('Download a spreadsheet of all currently registered customer accounts.')
@property
def additional_form_fields(self):
return OrderedDict(
[]
)
def METHOD_NAME(self, form_data):
qs = self.organizer.customers.prefetch_related('provider')
headers = [
_('Customer ID'),
_('SSO provider'),
_('External identifier'),
_('E-mail'),
_('Phone number'),
_('Full name'),
]
name_scheme = PERSON_NAME_SCHEMES[self.organizer.settings.name_scheme]
if name_scheme and len(name_scheme['fields']) > 1:
for k, label, w in name_scheme['fields']:
headers.append(_('Name') + ': ' + str(label))
headers += [
_('Account active'),
_('Verified email address'),
_('Last login'),
_('Registration date'),
_('Language'),
_('Notes'),
]
yield headers
tz = get_current_timezone()
for obj in qs:
row = [
obj.identifier,
obj.provider.name if obj.provider else None,
obj.external_identifier,
obj.email or '',
obj.phone or '',
obj.name,
]
if name_scheme and len(name_scheme['fields']) > 1:
for k, label, w in name_scheme['fields']:
row.append(obj.name_parts.get(k, ''))
row += [
_('Yes') if obj.is_active else _('No'),
_('Yes') if obj.is_verified else _('No'),
obj.last_login.astimezone(tz).date().strftime('%Y-%m-%d') if obj.last_login else '',
obj.date_joined.astimezone(tz).date().strftime('%Y-%m-%d') if obj.date_joined else '',
obj.get_locale_display(),
obj.notes or '',
]
yield row
def get_filename(self):
return '{}_customers'.format(self.organizer.slug)
@receiver(register_multievent_data_exporters, dispatch_uid="multiexporter_customerlist")
def register_multievent_i_customerlist_exporter(sender, **kwargs):
return CustomerListExporter |
6,673 | test filter out groups when selecting user | import os
import re
import sys
import textwrap
import time
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from . import resource_suite
from .. import lib
from .. import paths
from ..core_file import temporary_core_file
from ..configuration import IrodsConfig
class Test_Quotas(resource_suite.ResourceBase, unittest.TestCase):
plugin_name = IrodsConfig().default_rule_engine_plugin
class_name = 'Test_Quotas'
def setUp(self):
super(Test_Quotas, self).setUp()
def tearDown(self):
super(Test_Quotas, self).tearDown()
def test_iquota__3044(self):
pep_map = {
'irods_rule_engine_plugin-irods_rule_language': textwrap.dedent('''
acRescQuotaPolicy {
msiSetRescQuotaPolicy("on");
}
'''),
'irods_rule_engine_plugin-python': textwrap.dedent('''
def acRescQuotaPolicy(rule_args, callback, rei):
callback.msiSetRescQuotaPolicy('on')
''')
}
filename_1 = 'test_iquota__3044_1'
filename_2 = 'test_iquota__3044_2'
with temporary_core_file() as core:
core.add_rule(pep_map[self.plugin_name])
for quotatype in [['sgq', 'public']]: # group
for quotaresc in [self.testresc, 'total']: # resc and total
cmd = 'iadmin {0} {1} {2} 10000000'.format(quotatype[0], quotatype[1], quotaresc) # set high quota
self.admin.assert_icommand(cmd.split())
cmd = 'iquota'
self.admin.assert_icommand(cmd.split(), 'STDOUT_SINGLELINE', 'Nearing quota') # not over yet
lib.make_file(filename_1, 1024, contents='arbitrary')
cmd = 'iput -R {0} {1}'.format(self.testresc, filename_1) # should succeed
self.admin.assert_icommand(cmd.split())
cmd = 'iadmin cu' # calculate, update db
self.admin.assert_icommand(cmd.split())
cmd = 'iquota'
self.admin.assert_icommand(cmd.split(), 'STDOUT_SINGLELINE', 'Nearing quota') # not over yet
cmd = 'iadmin {0} {1} {2} 40'.format(quotatype[0], quotatype[1], quotaresc) # set low quota
self.admin.assert_icommand(cmd.split())
cmd = 'iquota'
self.admin.assert_icommand(cmd.split(), 'STDOUT_SINGLELINE', 'OVER QUOTA') # confirm it's over
lib.make_file(filename_2, 1024, contents='arbitrary')
cmd = 'iput -R {0} {1}'.format(self.testresc, filename_2) # should fail
self.admin.assert_icommand(cmd.split(), 'STDERR_SINGLELINE', 'SYS_RESC_QUOTA_EXCEEDED')
cmd = 'istream write nopes'
self.admin.assert_icommand(cmd.split(), 'STDERR', 'Error: Cannot open data object.', input='some data')
cmd = 'iadmin {0} {1} {2} 0'.format(quotatype[0], quotatype[1], quotaresc) # remove quota
self.admin.assert_icommand(cmd.split())
cmd = 'iadmin cu' # update db
self.admin.assert_icommand(cmd.split())
cmd = 'iput -R {0} {1}'.format(self.testresc, filename_2) # should succeed again
self.admin.assert_icommand(cmd.split())
cmd = 'irm -rf {0}'.format(filename_1) # clean up
self.admin.assert_icommand(cmd.split())
cmd = 'irm -rf {0}'.format(filename_2) # clean up
self.admin.assert_icommand(cmd.split())
def test_iquota_empty__3048(self):
cmd = 'iadmin suq' # no arguments
self.admin.assert_icommand(cmd.split(), 'STDERR_SINGLELINE', 'ERROR: missing username parameter') # usage information
cmd = 'iadmin sgq' # no arguments
self.admin.assert_icommand(cmd.split(), 'STDERR_SINGLELINE', 'ERROR: missing group name parameter') # usage information
def METHOD_NAME(self):
self.admin.assert_icommand(['igroupadmin', 'mkgroup', 'test_group_3507'])
try:
# Attempt to set user quota passing in the name of a group; should fail
self.admin.assert_icommand(['iadmin', 'suq', 'test_group_3507', 'demoResc', '0'], 'STDERR_SINGLELINE', 'CAT_INVALID_USER')
finally:
self.admin.assert_icommand(['iadmin', 'rmgroup', 'test_group_3507']) |
6,674 | is context moderator | import warnings
import rules
from rules import predicates as rules_predicates
from adhocracy4.organisations.predicates import is_initiator
from adhocracy4.organisations.predicates import is_org_member
from adhocracy4.phases import predicates as phase_predicates
from adhocracy4.projects.predicates import is_live
from adhocracy4.projects.predicates import is_moderator
from adhocracy4.projects.predicates import is_prj_group_member
from adhocracy4.projects.predicates import is_project_member
from adhocracy4.projects.predicates import is_public
from adhocracy4.projects.predicates import is_semipublic
# Predicates testing roles
@rules.predicate
def is_context_initiator(user, item):
if item:
if hasattr(item, "project"):
return is_initiator(user, item.project)
else:
return is_initiator(user, item)
return False
@rules.predicate
def METHOD_NAME(user, item):
if item:
if hasattr(item, "project"):
return is_moderator(user, item.project)
else:
return is_moderator(user, item)
return False
@rules.predicate
def is_context_group_member(user, item):
if item:
if hasattr(item, "project"):
return is_prj_group_member(user, item.project)
else:
return is_prj_group_member(user, item)
@rules.predicate
def is_context_member(user, item):
"""Return if normal user is project participant or org member.
In public projects every registered user is a participant.
In private or semi-public projects only invited participants are
participants.
"""
if item:
return is_project_member(user, item.project) | is_org_member(
user, item.project.organisation
)
return False
@rules.predicate
def is_owner(user, item):
if item:
return item.creator == user
return False
# Predicates testing context
@rules.predicate
def is_public_context(user, item):
if item:
return is_public(user, item.project) | is_semipublic(user, item.project)
return False
@rules.predicate
def is_live_context(user, item):
if item:
return is_live(user, item.project)
return False
# Predicates testing if user is allowed to do sth. in project
@rules.predicate
def is_allowed_moderate_project(user, item):
"""Return if user is allowed to moderate project of item."""
if item:
return (
rules_predicates.is_superuser(user)
| METHOD_NAME(user, item)
| is_context_initiator(user, item)
| is_context_group_member(user, item)
)
return False
@rules.predicate
def is_allowed_crud_project(user, item):
"""Return if user is allowed to change project of item."""
if item:
return (
rules_predicates.is_superuser(user)
| is_context_initiator(user, item)
| is_context_group_member(user, item)
)
return False
@rules.predicate
def is_project_admin(user, item):
"""Return if user is allowed to moderate project.
Attention: This method is _deprecated_ as it was named confusingly.
Now either use is_allowed_moderate_project or is_allowed_crud_project
"""
warnings.warn(
"is_project_admin is deprecated; use is_allowed_moderate_project.",
DeprecationWarning,
)
return is_allowed_moderate_project(user, item)
# Predicates testing if user is allowed to do that on the item
# in the current phase; bringing together all info
@rules.predicate
def is_allowed_view_item(user, item):
if item:
return is_allowed_moderate_project(user, item) | (
(is_context_member(user, item) | is_public_context(user, item))
& is_live_context(user, item)
)
return False
def is_allowed_add_item(item_class):
@rules.predicate
def _add_item(user, module):
if module:
return is_allowed_moderate_project(user, module) | (
is_context_member(user, module)
& is_live_context(user, module)
& phase_predicates.phase_allows_add(item_class)(user, module)
)
return False
return _add_item
@rules.predicate
def is_allowed_rate_item(user, item):
if item:
return is_allowed_moderate_project(user, item) | (
is_context_member(user, item)
& is_live_context(user, item)
& phase_predicates.phase_allows_rate(user, item)
)
return False
@rules.predicate
def is_allowed_comment_item(user, item):
if item:
return is_allowed_moderate_project(user, item) | (
is_context_member(user, item)
& is_live_context(user, item)
& phase_predicates.phase_allows_comment(user, item)
)
return False
@rules.predicate
def is_allowed_change_item(user, item):
if item:
return is_allowed_moderate_project(user, item) | (
is_context_member(user, item)
& is_live_context(user, item)
& is_owner(user, item)
& phase_predicates.phase_allows_change(user, item)
)
return False
def module_is_between_phases(past_phase_type, future_phase_type, module):
return (
module.phases.active_phases().count() == 0
and past_phase_type in [phase.type for phase in module.phases.past_phases()]
and future_phase_type in [phase.type for phase in module.phases.future_phases()]
) |
6,675 | label sort key | #!/usr/bin/env python3
"""This script creates build_components.bzl.new with new contents based on the
current source tree. It should be used to regularly update the version of
build_components.bzl file that is present in git."""
import argparse
import os
import subprocess
import sys
def METHOD_NAME(label):
# How to compare labels (lexicographically by subpackage names).
return label.split("/")
def _is_full_package_library(one_label):
package, short_name = one_label.split(":")
if package.endswith("/" + short_name):
return package
else:
return None
def _bazel_query(args):
output = subprocess.check_output(["bazel", "query"] + args).decode('utf8')
return [x for x in output.split('\n') if x]
def _find_libdrake_components():
# This forms the set of cc_library targets that will be installed.
# TODO(russt/eric-cousineau/jwnimmer): Remove any examples from
# libdrake.so, pending resolution of #9648.
components_query = """
kind("cc_library", visible("//tools/install/libdrake:libdrake.so", "//..."))
except(attr("testonly", "1", "//..."))
except("//:*")
except("//bindings/pydrake/...")
except(
"//examples/..." except(set(
"//examples/acrobot/..."
"//examples/compass_gait/..."
"//examples/manipulation_station/..."
"//examples/pendulum/..."
"//examples/quadrotor/..."
"//examples/rimless_wheel/..."
"//examples/van_der_pol/..."
))
)
except("//lcmtypes/...")
except("//tools/install/...")
except("//tools/performance/...")
except(attr(tags, "exclude_from_libdrake", //...))
"""
# First, find the drake_cc_package_library targets within that query.
package_libs = []
for label in _bazel_query([
'attr(tags, "{}", {})'.format(
"drake_cc_package_library", components_query)]):
new_label = _is_full_package_library(label)
assert new_label
package_libs.append(new_label)
# Then, find any remaining cc_library targets that are not part of a
# drake_cc_package_library.
misc_libs = _bazel_query([
components_query + " ".join([
"except deps({}, 1)".format(x)
for x in package_libs
])])
# Sort the result for consistency.
return sorted(package_libs + misc_libs, key=METHOD_NAME)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output", type=argparse.FileType("w"), default=None,
help="Output to a given file, instead of `build_components.bzl`.")
args = parser.parse_args()
mydir = os.path.abspath(os.path.dirname(sys.argv[0]))
original_basename = "build_components.bzl"
original_name = os.path.join(mydir, original_basename)
# Read the original version.
with open(original_name, "r") as original:
original_lines = original.readlines()
# Extract the header (group 0) and footer (group 2), discarding the list of
# components in between (group 1).
header_lines = []
footer_lines = []
current_group = 0
for one_line in original_lines:
if current_group == 0:
header_lines.append(one_line)
if one_line == "LIBDRAKE_COMPONENTS = [\n":
current_group = 1
if one_line.startswith(' "//'):
raise RuntimeError("Could not find header", header_lines)
elif current_group == 1:
if one_line == "]\n":
footer_lines.append(one_line)
current_group = 2
elif current_group == 2:
footer_lines.append(one_line)
else:
current_group = 999
if current_group != 2:
raise RuntimeError("Could not find header and footer", current_group)
# Compute the new contents.
component_labels = _find_libdrake_components()
# Write the new version.
new = args.output
if new is None:
new = open(original_name, "w")
with new:
for one_line in header_lines:
new.write(one_line)
for one_label in component_labels:
if '#' in one_label:
line = ' ' + one_label
else:
line = ' "{}",'.format(one_label)
if ":" in one_label:
line += ' # unpackaged'
new.write(line)
if len(line) > 79:
new.write(' # noqa')
new.write('\n')
for one_line in footer_lines:
new.write(one_line)
# Done.
return 0
if __name__ == '__main__':
main() |
6,676 | add refs | # Copyright 2004-2022 Davide Alberani <da@erlug.linux.it>
# 2008-2018 H. Turgut Uyar <uyar@tekir.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the classes (and the instances) that are used to parse
the results of a search for a given title.
For example, when searching for the title "the passion", the parsed page
would be:
http://www.imdb.com/find?q=the+passion&s=tt
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from imdb.utils import analyze_title, re_m_kind
from .piculet import Path, Rule, Rules, reducers
from .utils import DOMParserBase, analyze_imdbid
def process_title(tdict):
"""Process parsed data and build a tuple that
can be used to create a list of results."""
imdbid = analyze_imdbid(tdict.get('link'))
title = tdict.get('title', '').strip()
kind = (tdict.get('kind') or '').strip()
if not re_m_kind.match('(%s)' % kind):
kind = ''
year = (tdict.get('year') or '').strip()
if year:
title += ' (%s)' % year
if kind:
title += ' (%s)' % kind
if title:
analized_title = analyze_title(title)
else:
analized_title = {}
akas = tdict.get('akas')
cover = tdict.get('cover url')
return imdbid, analized_title, akas, cover
class DOMHTMLSearchMovieParser(DOMParserBase):
"""A parser for the title search page."""
rules = [
Rule(
key='data',
extractor=Rules(
foreach='//li[contains(@class, "find-title-result")]',
rules=[
Rule(
key='link',
extractor=Path('.//a[@class="ipc-metadata-list-summary-item__t"]/@href',
reduce=reducers.first)
),
Rule(
key='title',
extractor=Path('.//a[@class="ipc-metadata-list-summary-item__t"]/text()')
),
Rule(
key='year',
extractor=Path('.//span[@class="ipc-metadata-list-summary-item__li"]/text()',
reduce=reducers.first)
),
Rule(
key='kind',
extractor=Path('(.//span[@class="ipc-metadata-list-summary-item__li"])[2]/text()')
),
Rule(
key='cover url',
extractor=Path('.//img[@class="ipc-image"]/@src')
)
],
transform=process_title
)
)
]
def _init(self):
self.url = ''
self.img_type = 'cover url'
def _reset(self):
self.url = ''
def postprocess_data(self, data):
if 'data' not in data:
return {'data': []}
results = getattr(self, 'results', None)
if results is not None:
data['data'][:] = data['data'][:results]
# Horrible hack to support AKAs.
data['data'] = [x for x in data['data'] if x[0] and x[1]]
if data and data['data'] and len(data['data'][0]) == 4 and isinstance(data['data'][0], tuple):
for idx, datum in enumerate(data['data']):
if not isinstance(datum, tuple):
continue
if not datum[0] and datum[1]:
continue
if datum[2] is not None:
akas = [aka[1:-1] for aka in datum[2]] # remove the quotes
datum[1]['akas'] = akas
if datum[3] is not None:
datum[1][self.img_type] = datum[3]
data['data'][idx] = (datum[0], datum[1])
return data
def METHOD_NAME(self, data):
return data
_OBJECTS = {
'search_movie_parser': ((DOMHTMLSearchMovieParser,), None)
} |
6,677 | export sources | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd, valid_min_cppstd
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, export_conandata_patches, copy, get, rmdir
from conan.tools.scm import Version
import os
required_conan_version = ">=1.54.0"
class CAFConan(ConanFile):
name = "caf"
description = "An open source implementation of the Actor Model in C++"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/actor-framework/actor-framework"
topics = "actor-framework", "actor-model", "pattern-matching", "actors"
license = "BSD-3-Clause", "BSL-1.0"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"log_level": ["error", "warning", "info", "debug", "trace", "quiet"],
"with_openssl": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"log_level": "quiet",
"with_openssl": True,
}
@property
def _min_cppstd(self):
return "17"
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "16",
"msvc": "192",
"gcc": "7",
"clang": "6", # Should be 5 but clang 5 has a bug that breaks compiling CAF
# see https://github.com/actor-framework/actor-framework/issues/1226
"apple-clang": "10",
}
def METHOD_NAME(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
if self.options.with_openssl:
self.requires("openssl/[>=1.1 <4]")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, self._min_cppstd)
minimum_version = self._minimum_compilers_version.get(str(self.settings.compiler), False)
if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support."
)
if self.settings.compiler == "apple-clang" and Version(self.settings.compiler.version) > "10.0" and \
self.settings.arch == "x86":
raise ConanInvalidConfiguration("clang >= 11.0 does not support x86")
if self.options.shared and self.settings.os == "Windows":
raise ConanInvalidConfiguration("Shared libraries are not supported on Windows")
if self.options.with_openssl and self.settings.os == "Windows" and self.settings.arch == "x86":
raise ConanInvalidConfiguration("OpenSSL is not supported for Windows x86")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
if not valid_min_cppstd(self, self._min_cppstd):
tc.variables["CMAKE_CXX_STANDARD"] = self._min_cppstd
tc.variables["CAF_ENABLE_OPENSSL_MODULE"] = self.options.with_openssl
tc.variables["CAF_ENABLE_EXAMPLES"] = False
tc.variables["CAF_ENABLE_TOOLS"] = False
tc.variables["CAF_ENABLE_TESTING"] = False
tc.variables["CAF_LOG_LEVEL"] = self.options.log_level.value.upper()
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE*", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "CAF")
self.cpp_info.components["caf_core"].set_property("cmake_target_name", "CAF::core")
self.cpp_info.components["caf_core"].libs = ["caf_core"]
if self.settings.os == "Windows":
self.cpp_info.components["caf_core"].system_libs = ["iphlpapi"]
elif self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["caf_core"].system_libs = ["pthread", "m"]
self.cpp_info.components["caf_io"].set_property("cmake_target_name", "CAF::io")
self.cpp_info.components["caf_io"].libs = ["caf_io"]
self.cpp_info.components["caf_io"].requires = ["caf_core"]
if self.settings.os == "Windows":
self.cpp_info.components["caf_io"].system_libs = ["ws2_32"]
if self.options.with_openssl:
self.cpp_info.components["caf_openssl"].set_property("cmake_target_name", "CAF::openssl")
self.cpp_info.components["caf_openssl"].libs = ["caf_openssl"]
self.cpp_info.components["caf_openssl"].requires = ["caf_io", "openssl::openssl"]
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.names["cmake_find_package"] = "CAF"
self.cpp_info.names["cmake_find_package_multi"] = "CAF"
self.cpp_info.components["caf_core"].names["cmake_find_package"] = "core"
self.cpp_info.components["caf_core"].names["cmake_find_package_multi"] = "core"
self.cpp_info.components["caf_io"].names["cmake_find_package"] = "io"
self.cpp_info.components["caf_io"].names["cmake_find_package_multi"] = "io"
if self.options.with_openssl:
self.cpp_info.components["caf_openssl"].names["cmake_find_package"] = "openssl"
self.cpp_info.components["caf_openssl"].names["cmake_find_package_multi"] = "openssl" |
6,678 | test a option | # Copyright (C) 2007 Giampaolo Rodola' <g.rodola@gmail.com>.
# Use of this source code is governed by MIT license that can be
# found in the LICENSE file.
import logging
import os
import sys
import warnings
try:
from StringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import pyftpdlib
import pyftpdlib.__main__
from pyftpdlib._compat import PY3
from pyftpdlib._compat import super
from pyftpdlib.servers import FTPServer
from pyftpdlib.test import VERBOSITY
from pyftpdlib.test import PyftpdlibTestCase
from pyftpdlib.test import mock
from pyftpdlib.test import safe_rmpath
from pyftpdlib.test import unittest
class TestCommandLineParser(PyftpdlibTestCase):
"""Test command line parser."""
SYSARGV = sys.argv
STDERR = sys.stderr
def setUp(self):
super().setUp()
class DummyFTPServer(FTPServer):
"""An overridden version of FTPServer class which forces
serve_forever() to return immediately.
"""
def serve_forever(self, *args, **kwargs):
return
if PY3:
import io
self.devnull = io.StringIO()
else:
self.devnull = BytesIO()
sys.argv = self.SYSARGV[:]
sys.stderr = self.STDERR
self.original_ftpserver_class = FTPServer
pyftpdlib.__main__.FTPServer = DummyFTPServer
def tearDown(self):
self.devnull.close()
sys.argv = self.SYSARGV[:]
sys.stderr = self.STDERR
pyftpdlib.servers.FTPServer = self.original_ftpserver_class
super().tearDown()
def METHOD_NAME(self):
sys.argv += ["-i", "localhost", "-p", "0"]
pyftpdlib.__main__.main()
sys.argv = self.SYSARGV[:]
# no argument
sys.argv += ["-a"]
sys.stderr = self.devnull
self.assertRaises(SystemExit, pyftpdlib.__main__.main)
def test_p_option(self):
sys.argv += ["-p", "0"]
pyftpdlib.__main__.main()
# no argument
sys.argv = self.SYSARGV[:]
sys.argv += ["-p"]
sys.stderr = self.devnull
self.assertRaises(SystemExit, pyftpdlib.__main__.main)
# invalid argument
sys.argv += ["-p foo"]
self.assertRaises(SystemExit, pyftpdlib.__main__.main)
def test_w_option(self):
sys.argv += ["-w", "-p", "0"]
with warnings.catch_warnings():
warnings.filterwarnings("error")
self.assertRaises(RuntimeWarning, pyftpdlib.__main__.main)
# unexpected argument
sys.argv = self.SYSARGV[:]
sys.argv += ["-w foo"]
sys.stderr = self.devnull
self.assertRaises(SystemExit, pyftpdlib.__main__.main)
def test_d_option(self):
dirname = self.get_testfn()
os.mkdir(dirname)
sys.argv += ["-d", dirname, "-p", "0"]
pyftpdlib.__main__.main()
# without argument
sys.argv = self.SYSARGV[:]
sys.argv += ["-d"]
sys.stderr = self.devnull
self.assertRaises(SystemExit, pyftpdlib.__main__.main)
# no such directory
sys.argv = self.SYSARGV[:]
sys.argv += ["-d %s" % dirname]
safe_rmpath(dirname)
self.assertRaises(ValueError, pyftpdlib.__main__.main)
def test_r_option(self):
sys.argv += ["-r 60000-61000", "-p", "0"]
pyftpdlib.__main__.main()
# without arg
sys.argv = self.SYSARGV[:]
sys.argv += ["-r"]
sys.stderr = self.devnull
self.assertRaises(SystemExit, pyftpdlib.__main__.main)
# wrong arg
sys.argv = self.SYSARGV[:]
sys.argv += ["-r yyy-zzz"]
self.assertRaises(SystemExit, pyftpdlib.__main__.main)
def test_v_option(self):
sys.argv += ["-v"]
self.assertRaises(SystemExit, pyftpdlib.__main__.main)
# unexpected argument
sys.argv = self.SYSARGV[:]
sys.argv += ["-v foo"]
sys.stderr = self.devnull
self.assertRaises(SystemExit, pyftpdlib.__main__.main)
def test_D_option(self):
with mock.patch('pyftpdlib.__main__.config_logging') as fun:
sys.argv += ["-D", "-p 0"]
pyftpdlib.__main__.main()
fun.assert_called_once_with(level=logging.DEBUG)
# unexpected argument
sys.argv = self.SYSARGV[:]
sys.argv += ["-V foo"]
sys.stderr = self.devnull
self.assertRaises(SystemExit, pyftpdlib.__main__.main)
if __name__ == '__main__':
unittest.main(verbosity=VERBOSITY) |
6,679 | test issue5949 | from test import test_support as support
# If we end up with a significant number of tests that don't require
# threading, this test module should be split. Right now we skip
# them all if we don't have threading.
threading = support.import_module('threading')
from contextlib import contextmanager
import imaplib
import os.path
import SocketServer
import time
from test_support import reap_threads, verbose, transient_internet
import unittest
try:
import ssl
except ImportError:
ssl = None
CERTFILE = None
class TestImaplib(unittest.TestCase):
def test_that_Time2Internaldate_returns_a_result(self):
# We can check only that it successfully produces a result,
# not the correctness of the result itself, since the result
# depends on the timezone the machine is in.
timevalues = [2000000000, 2000000000.0, time.localtime(2000000000),
'"18-May-2033 05:33:20 +0200"']
for t in timevalues:
imaplib.Time2Internaldate(t)
if ssl:
class SecureTCPServer(SocketServer.TCPServer):
def get_request(self):
newsocket, fromaddr = self.socket.accept()
connstream = ssl.wrap_socket(newsocket,
server_side=True,
certfile=CERTFILE)
return connstream, fromaddr
IMAP4_SSL = imaplib.IMAP4_SSL
else:
class SecureTCPServer:
pass
IMAP4_SSL = None
class SimpleIMAPHandler(SocketServer.StreamRequestHandler):
timeout = 1
def _send(self, message):
if verbose: print "SENT:", message.strip()
self.wfile.write(message)
def handle(self):
# Send a welcome message.
self._send('* OK IMAP4rev1\r\n')
while 1:
# Gather up input until we receive a line terminator or we timeout.
# Accumulate read(1) because it's simpler to handle the differences
# between naked sockets and SSL sockets.
line = ''
while 1:
try:
part = self.rfile.read(1)
if part == '':
# Naked sockets return empty strings..
return
line += part
except IOError:
# ..but SSLSockets raise exceptions.
return
if line.endswith('\r\n'):
break
if verbose: print 'GOT:', line.strip()
splitline = line.split()
tag = splitline[0]
cmd = splitline[1]
args = splitline[2:]
if hasattr(self, 'cmd_%s' % (cmd,)):
getattr(self, 'cmd_%s' % (cmd,))(tag, args)
else:
self._send('%s BAD %s unknown\r\n' % (tag, cmd))
def cmd_CAPABILITY(self, tag, args):
self._send('* CAPABILITY IMAP4rev1\r\n')
self._send('%s OK CAPABILITY completed\r\n' % (tag,))
class BaseThreadedNetworkedTests(unittest.TestCase):
def make_server(self, addr, hdlr):
class MyServer(self.server_class):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
if verbose: print "creating server"
server = MyServer(addr, hdlr)
self.assertEqual(server.server_address, server.socket.getsockname())
if verbose:
print "server created"
print "ADDR =", addr
print "CLASS =", self.server_class
print "HDLR =", server.RequestHandlerClass
t = threading.Thread(
name='%s serving' % self.server_class,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print "server running"
return server, t
def reap_server(self, server, thread):
if verbose: print "waiting for server"
server.shutdown()
thread.join()
if verbose: print "done"
@contextmanager
def reaped_server(self, hdlr):
server, thread = self.make_server((support.HOST, 0), hdlr)
try:
yield server
finally:
self.reap_server(server, thread)
@reap_threads
def test_connect(self):
with self.reaped_server(SimpleIMAPHandler) as server:
client = self.imap_class(*server.server_address)
client.shutdown()
@reap_threads
def METHOD_NAME(self):
class EOFHandler(SocketServer.StreamRequestHandler):
def handle(self):
# EOF without sending a complete welcome message.
self.wfile.write('* OK')
with self.reaped_server(EOFHandler) as server:
self.assertRaises(imaplib.IMAP4.abort,
self.imap_class, *server.server_address)
class ThreadedNetworkedTests(BaseThreadedNetworkedTests):
server_class = SocketServer.TCPServer
imap_class = imaplib.IMAP4
@unittest.skipUnless(ssl, "SSL not available")
class ThreadedNetworkedTestsSSL(BaseThreadedNetworkedTests):
server_class = SecureTCPServer
imap_class = IMAP4_SSL
class RemoteIMAPTest(unittest.TestCase):
host = 'cyrus.andrew.cmu.edu'
port = 143
username = 'anonymous'
password = 'pass'
imap_class = imaplib.IMAP4
def setUp(self):
with transient_internet(self.host):
self.server = self.imap_class(self.host, self.port)
def tearDown(self):
if self.server is not None:
self.server.logout()
def test_logincapa(self):
self.assertTrue('LOGINDISABLED' in self.server.capabilities)
def test_anonlogin(self):
self.assertTrue('AUTH=ANONYMOUS' in self.server.capabilities)
rs = self.server.login(self.username, self.password)
self.assertEqual(rs[0], 'OK')
def test_logout(self):
rs = self.server.logout()
self.server = None
self.assertEqual(rs[0], 'BYE')
@unittest.skipUnless(ssl, "SSL not available")
class RemoteIMAP_SSLTest(RemoteIMAPTest):
port = 993
imap_class = IMAP4_SSL
def test_logincapa(self):
self.assertFalse('LOGINDISABLED' in self.server.capabilities)
self.assertTrue('AUTH=PLAIN' in self.server.capabilities)
def test_main():
tests = [TestImaplib]
if support.is_resource_enabled('network'):
if ssl:
global CERTFILE
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
if not os.path.exists(CERTFILE):
raise support.TestFailed("Can't read certificate files!")
tests.extend([
ThreadedNetworkedTests, ThreadedNetworkedTestsSSL,
RemoteIMAPTest, RemoteIMAP_SSLTest,
])
support.run_unittest(*tests)
if __name__ == "__main__":
support.use_resources = ['network']
test_main() |
6,680 | get values | import logging
import threading
from typing import List
from control import data
from modules import ripple_control_receiver
from modules.utils import ModuleUpdateCompletedContext
from modules.common.abstract_device import AbstractDevice
from modules.common.component_type import ComponentType, type_to_topic_mapping
from modules.common.store import update_values
from modules.common.utils.component_parser import get_component_obj_by_id
from helpermodules.utils import thread_handler
log = logging.getLogger(__name__)
class Loadvars:
def __init__(self) -> None:
self.event_module_update_completed = threading.Event()
def METHOD_NAME(self) -> None:
topic = "openWB/set/system/device/module_update_completed"
try:
not_finished_threads = self._set_values()
levels = data.data.counter_all_data.get_list_of_elements_per_level()
levels.reverse()
for level in levels:
with ModuleUpdateCompletedContext(self.event_module_update_completed, topic):
self._update_values_of_level(level, not_finished_threads)
data.data.copy_module_data()
with ModuleUpdateCompletedContext(self.event_module_update_completed, topic):
thread_handler(self._get_general(), data.data.general_data.data.control_interval/3)
with ModuleUpdateCompletedContext(self.event_module_update_completed, topic):
data.data.pv_all_data.calc_power_for_all_components()
data.data.bat_all_data.calc_power_for_all_components()
if self.event_module_update_completed.wait(data.data.general_data.data.control_interval/2) is False:
log.error("Daten wurden noch nicht vollständig empfangen. Timeout abgelaufen, fortsetzen der Regelung.")
except Exception:
log.exception("Fehler im loadvars-Modul")
def _set_values(self) -> List[str]:
"""Threads, um Werte von Geräten abzufragen"""
modules_threads: List[threading.Thread] = []
for item in data.data.system_data.values():
try:
if isinstance(item, AbstractDevice):
modules_threads.append(threading.Thread(target=item.update, args=(),
name=f"device{item.device_config.id}"))
except Exception:
log.exception(f"Fehler im loadvars-Modul bei Element {item}")
for cp in data.data.cp_data.values():
try:
modules_threads.append(threading.Thread(target=cp.chargepoint_module.METHOD_NAME,
args=(), name=f"set values cp{cp.chargepoint_module.config.id}"))
except Exception:
log.exception(f"Fehler im loadvars-Modul bei Element {cp.num}")
return thread_handler(modules_threads, data.data.general_data.data.control_interval/3)
def _update_values_of_level(self, elements, not_finished_threads: List[str]) -> None:
"""Threads, um von der niedrigsten Ebene der Hierarchie Werte ggf. miteinander zu verrechnen und zu
veröffentlichen"""
modules_threads: List[threading.Thread] = []
for element in elements:
try:
if element["type"] == ComponentType.CHARGEPOINT.value:
chargepoint = data.data.cp_data[f'{type_to_topic_mapping(element["type"])}{element["id"]}']
if self.thread_without_set_value(modules_threads, not_finished_threads) is False:
modules_threads.append(threading.Thread(
target=update_values,
args=(chargepoint.chargepoint_module,),
name=f"update values cp{chargepoint.chargepoint_module.config.id}"))
else:
component = get_component_obj_by_id(element["id"], not_finished_threads)
if component is None:
continue
modules_threads.append(threading.Thread(target=update_values, args=(
component,), name=f"component{component.component_config.id}"))
except Exception:
log.exception(f"Fehler im loadvars-Modul bei Element {element}")
thread_handler(modules_threads, data.data.general_data.data.control_interval/3)
def thread_without_set_value(self,
modules_threads: List[threading.Thread],
not_finished_threads: List[str]) -> bool:
for t in not_finished_threads:
for module_thread in modules_threads:
if t == module_thread.name:
return True
return False
def _get_general(self) -> List[threading.Thread]:
threads = [] # type: List[threading.Thread]
try:
# Beim ersten Durchlauf wird in jedem Fall eine Exception geworfen,
# da die Daten erstmalig ins data-Modul kopiert werden müssen.
if data.data.general_data.data.ripple_control_receiver.configured:
threads.append(threading.Thread(target=ripple_control_receiver.read, args=(), name="get general"))
except Exception:
log.exception("Fehler im loadvars-Modul")
finally:
return threads |
6,681 | wait | """Wait for the specified tests to finish, printing progress reports along
the way."""
import os
import time
from typing import List
from pavilion import cmd_utils
from pavilion import status_utils
from pavilion.output import fprint
from pavilion.status_file import STATES
from pavilion.test_run import TestRun
from .base_classes import Command
def check_pgid(pgid):
"""Checks if pgid still exists. Returns false if pgid does not exist."""
try:
# PGID needs to be negative
if pgid > 0:
pgid = -1*pgid
# No signal is sent, but an OS Error will be raised if the PID doesn't
# exist
os.kill(pgid, 0)
except OSError:
return False
else:
return True
class WaitCommand(Command):
"""A command to wait for test completion."""
def __init__(self):
super().__init__('wait', 'Wait for the specified test or series to '
'complete or fail and return the status.',
short_help="Wait for statuses of tests.")
OUT_SILENT = 'silent'
OUT_SUMMARY = 'summary'
def _setup_arguments(self, parser):
parser.add_argument(
'-t', '--timeout', action='store',
help='Maximum time to wait for results in seconds. Default is to '
'wait indefinitely.'
)
parser.add_argument(
'tests', nargs='*', action='store',
help='The name(s) of the tests to check. These may be any mix of '
'test IDs and series IDs. If no value is provided, the most '
'recent series submitted by this user is checked.'
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-s', '--silent',
action='store_const', dest='out_mode', const=self.OUT_SILENT,
help="No periodic status output."
)
group.add_argument(
'--summary',
action='store_const', dest='out_mode', const=self.OUT_SUMMARY,
help="Prints a summary of the status."
)
def run(self, pav_cfg, args):
"""Wait for the requested tests to complete."""
# get start time
start_time = time.time()
tests = cmd_utils.get_tests_by_id(pav_cfg, args.tests, self.errfile)
# determine timeout time, if there is one
end_time = None
if args.timeout is not None:
end_time = start_time + float(args.timeout)
self.METHOD_NAME(pav_cfg, tests, end_time, args.out_mode)
return 0
STATUS_UPDATE_PERIOD = 5 # seconds
def METHOD_NAME(self, pav_cfg, tests: List[TestRun],
end_time: float, out_mode: str) -> None:
"""Wait on each of the given tests to complete, printing a status
message """
done_tests = []
all_tests = list(tests)
all_tests.sort(key=lambda t: t.full_id)
tests = list(tests)
status_time = time.time() + self.STATUS_UPDATE_PERIOD
while tests and (end_time is None or time.time() < end_time):
for test_state in tests:
if test_state.complete:
done_tests.append(test_state)
tests.remove(test_state)
# print status every 5 seconds
if time.time() > status_time:
status_time = time.time() + self.STATUS_UPDATE_PERIOD
stats = status_utils.get_statuses(pav_cfg, all_tests)
stats_out = []
if out_mode == self.OUT_SILENT:
pass
elif out_mode == self.OUT_SUMMARY:
states = {}
for test_state in stats:
if test_state['state'] not in states.keys():
states[test_state['state']] = 1
else:
states[test_state['state']] += 1
status_counts = []
for state, count in states.items():
status_counts.append(state + ': ' + str(count))
fprint(self.outfile, ' | '.join(status_counts), width=None, end='\r')
else:
for test_state in stats:
stat = [str(time.ctime(time.time())), ':',
'test #',
str(test_state['test_id']),
test_state['name'],
test_state['state'],
test_state['note'],
"\n"]
stats_out.append(' '.join(stat))
fprint(self.outfile, ''.join(map(str, stats_out)), width=None)
final_stats = status_utils.get_statuses(pav_cfg, tests)
fprint(self.outfile, '\n')
status_utils.print_status(final_stats, self.outfile) |
6,682 | test integrate ro i errors | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_raises
from skbeam.core.spectroscopy import align_and_scale, integrate_ROI, integrate_ROI_spectrum
def synthetic_data(E, E0, sigma, alpha, k, beta):
"""
return synthetic data of the form
d = alpha * e ** (-(E - e0)**2 / (2 * sigma ** 2) + beta * sin(k * E)
Parameters
----------
E : ndarray
The energies to compute values at
E0 : float
Location of the peak
sigma : float
Width of the peak
alpha : float
Height of peak
k : float
Frequency of oscillations
beta : float
Magnitude of oscillations
"""
return alpha * np.exp(-((E - E0) ** 2) / (2 * sigma**2)) + beta * (1 + np.sin(k * E))
def test_align_and_scale_smoketest():
# does nothing but call the function
# make data
E = np.linspace(0, 50, 1000)
# this is not efficient for large lists, but quick and dirty
e_list = []
c_list = []
for j in range(25, 35, 2):
e_list.append(E)
c_list.append(synthetic_data(E, j + j / 100, j / 10, 1000, 2 * np.pi * 6 / 50, 60))
# call the function
e_cor_list, c_cor_list = align_and_scale(e_list, c_list)
def METHOD_NAME():
E = np.arange(100)
C = np.ones_like(E)
# limits out of order
assert_raises(ValueError, integrate_ROI, E, C, [32, 1], [2, 10])
# bottom out of range
assert_raises(ValueError, integrate_ROI, E, C, -1, 2)
# top out of range
assert_raises(ValueError, integrate_ROI, E, C, 2, 110)
# different length limits
assert_raises(
ValueError,
integrate_ROI,
E,
C,
[32, 1],
[2, 10, 32],
)
# independent variable (x_value_array) not increasing monotonically
assert_raises(ValueError, integrate_ROI, C, C, 2, 10)
# outliers present in x_value_array which violate monotonic reqirement
E[2] = 50
E[50] = 2
assert_raises(ValueError, integrate_ROI, E, C, 2, 60)
def test_integrate_ROI_compute():
E = np.arange(100)
C = np.ones_like(E)
assert_array_almost_equal(integrate_ROI(E, C, 5.5, 6.5), 1)
assert_array_almost_equal(integrate_ROI(E, C, 5.5, 11.5), 6)
assert_array_almost_equal(integrate_ROI(E, C, [5.5, 17], [11.5, 23]), 12)
def test_integrate_ROI_spectrum_compute():
C = np.ones(100)
E = np.arange(101)
assert_array_almost_equal(integrate_ROI_spectrum(E, C, 5, 6), 1)
assert_array_almost_equal(integrate_ROI_spectrum(E, C, 5, 11), 6)
assert_array_almost_equal(integrate_ROI_spectrum(E, C, [5, 17], [11, 23]), 12)
def test_integrate_ROI_reverse_input():
E = np.arange(100)
C = E[::-1]
E_rev = E[::-1]
C_rev = C[::-1]
assert_array_almost_equal(
integrate_ROI(E_rev, C_rev, [5.5, 17], [11.5, 23]), integrate_ROI(E, C, [5.5, 17], [11.5, 23])
) |
6,683 | call | #!/bin/true
#
# util.py - part of autospec
# Copyright (C) 2015 Intel Corporation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import hashlib
import os
import re
import shlex
import subprocess
import sys
dictionary_filename = os.path.dirname(__file__) + "/translate.dic"
dictionary = [line.strip() for line in open(dictionary_filename, 'r')]
os_paths = None
def METHOD_NAME(command, logfile=None, check=True, **kwargs):
"""Subprocess.call convenience wrapper."""
returncode = 1
full_args = {
"args": shlex.split(command),
"universal_newlines": True,
}
full_args.update(kwargs)
if logfile:
full_args["stdout"] = open(logfile, "w")
full_args["stderr"] = subprocess.STDOUT
returncode = subprocess.METHOD_NAME(**full_args)
full_args["stdout"].close()
else:
returncode = subprocess.METHOD_NAME(**full_args)
if check and returncode != 0:
raise subprocess.CalledProcessError(returncode, full_args["args"], None)
return returncode
def _file_write(self, s):
s = s.strip()
if not s.endswith("\n"):
s += "\n"
self.write(s)
def translate(package):
"""Convert terms to their alternate definition."""
global dictionary
for item in dictionary:
if item.startswith(package + "="):
return item.split("=")[1]
return package
def do_regex(patterns, re_str):
"""Find a match in multiple patterns."""
for p in patterns:
match = re.search(p, re_str)
if match:
return match
def get_contents(filename):
"""Get contents of filename."""
with open(filename, "rb") as f:
return f.read()
return None
def get_sha1sum(filename):
"""Get sha1 sum of filename."""
sh = hashlib.sha1()
sh.update(get_contents(filename))
return sh.hexdigest()
def _supports_color():
# FIXME: check terminfo instead
return sys.stdout.isatty()
def _print_message(message, level, color=None):
prefix = level
if color and _supports_color():
# FIXME: use terminfo instead
if color == 'red':
params = '31;1'
elif color == 'green':
params = '32;1'
elif color == 'yellow':
params = '33;1'
elif color == 'blue':
params = '34;1'
prefix = f'\033[{params}m{level}\033[0m'
print(f'[{prefix}] {message}')
def print_error(message):
"""Print error, color coded for TTYs."""
_print_message(message, 'ERROR', 'red')
def print_fatal(message):
"""Print fatal error, color coded for TTYs."""
_print_message(message, 'FATAL', 'red')
def print_warning(message):
"""Print warning, color coded for TTYs."""
_print_message(message, 'WARNING', 'red')
def print_info(message):
"""Print informational message, color coded for TTYs."""
_print_message(message, 'INFO', 'yellow')
def print_success(message):
"""Print success message, color coded for TTYs."""
_print_message(message, 'SUCCESS', 'green')
def binary_in_path(binary):
"""Determine if the given binary exists in the provided filesystem paths."""
global os_paths
if not os_paths:
os_paths = os.getenv("PATH", default="/usr/bin:/bin").split(os.pathsep)
for path in os_paths:
if os.path.exists(os.path.join(path, binary)):
return True
return False
def write_out(filename, content, mode="w"):
"""File.write convenience wrapper."""
with open_auto(filename, mode) as require_f:
require_f.write(content)
def open_auto(*args, **kwargs):
"""Open a file with UTF-8 encoding.
Open file with UTF-8 encoding and "surrogate" escape characters that are
not valid UTF-8 to avoid data corruption.
"""
# 'encoding' and 'errors' are fourth and fifth positional arguments, so
# restrict the args tuple to (file, mode, buffering) at most
assert len(args) <= 3
assert 'encoding' not in kwargs
assert 'errors' not in kwargs
return open(*args, encoding="utf-8", errors="surrogateescape", **kwargs) |
6,684 | log update type | # SPDX-License-Identifier: MPL-2.0
# Copyright (C) 2021 - 2023 Gemeente Amsterdam
import pytz
from django.conf import settings
from django.utils import timezone
from signals.apps.feedback.models import Feedback
from signals.apps.history.models import Log
from signals.apps.questionnaires.models import Questionnaire, Session
from signals.apps.signals.models import (
CategoryAssignment,
Location,
Note,
Priority,
Signal,
SignalDepartments,
SignalUser,
Status
)
from signals.apps.signals.models import Type as _Type
class SignalLogService:
@staticmethod
def log_create_initial(signal: Signal) -> None:
if signal.is_child:
# We cannot create a GenericRelation on the Signal model because the naming will clash with the ForeignKey
# `_signal` defined on the Log model. So for now Log rules for a specific Signal are created as seen here:
Log.objects.create(
action=Log.ACTION_CREATE,
extra=signal.id,
object=signal,
created_by=None,
created_at=signal.created_at,
_signal=signal.parent,
)
SignalLogService.log_update_location(signal.location)
SignalLogService.log_update_status(signal.status)
SignalLogService.log_update_category_assignment(signal.category_assignment)
SignalLogService.log_update_priority(signal.priority)
SignalLogService.METHOD_NAME(signal.type_assignment)
@staticmethod
def log_create_note(note: Note) -> None:
if not isinstance(note, Note):
return
note.history_log.create(
action=Log.ACTION_CREATE,
description=note.text,
extra='Notitie toegevoegd',
created_by=note.created_by,
created_at=note.created_at,
_signal=note._signal,
)
@staticmethod
def log_update_category_assignment(category_assignment: CategoryAssignment) -> None:
if not isinstance(category_assignment, CategoryAssignment):
return
if category_assignment.category.slo.exists() and category_assignment._signal.categories.count() == 1:
category_assignment.category.slo.first().history_log.create(
action=Log.ACTION_UPDATE,
description=category_assignment.stored_handling_message,
created_by=category_assignment.created_by,
created_at=category_assignment.created_at,
_signal=category_assignment._signal,
)
category_assignment.history_log.create(
action=Log.ACTION_UPDATE,
extra=category_assignment.category.name,
created_by=category_assignment.created_by,
created_at=category_assignment.created_at,
_signal=category_assignment._signal,
)
@staticmethod
def log_update_location(location: Location) -> None:
if not isinstance(location, Location):
return
location.history_log.create(
action=Log.ACTION_UPDATE,
extra='Locatie gewijzigd',
created_by=location.created_by,
created_at=location.created_at,
_signal=location._signal,
)
@staticmethod
def log_update_priority(priority: Priority) -> None:
if not isinstance(priority, Priority):
return
priority.history_log.create(
action=Log.ACTION_UPDATE,
extra=priority.priority,
created_by=priority.created_by,
created_at=priority.created_at,
_signal=priority._signal,
)
@staticmethod
def log_update_status(status: Status) -> None:
if not isinstance(status, Status):
return
status.history_log.create(
action=Log.ACTION_UPDATE,
description=status.text,
extra=status.state,
created_by=status.created_by,
created_at=status.created_at,
_signal=status._signal,
)
@staticmethod
def METHOD_NAME(_type: _Type) -> None:
if not isinstance(_type, _Type):
return
_type.history_log.create(
action=Log.ACTION_UPDATE,
extra=_type.name,
created_by=_type.created_by,
created_at=_type.created_at,
_signal=_type._signal,
)
@staticmethod
def log_update_user_assignment(user_assignment: SignalUser) -> None:
if not isinstance(user_assignment, SignalUser):
return
log_extra = user_assignment.user.email if user_assignment.user else None
user_assignment.history_log.create(
action=Log.ACTION_UPDATE,
extra=log_extra,
created_by=user_assignment.created_by,
created_at=user_assignment.created_at,
_signal=user_assignment._signal,
)
@staticmethod
def log_update_signal_departments(signal_departments: SignalDepartments) -> None:
if not isinstance(signal_departments, SignalDepartments):
return
extra = ', '.join(signal_departments.departments.values_list('code', flat=True))
signal_departments.history_log.create(
action=Log.ACTION_UPDATE,
extra=extra,
description=None,
created_by=signal_departments.created_by,
created_at=signal_departments.created_at,
_signal=signal_departments._signal,
)
@staticmethod
def log_receive_feedback(feedback: Feedback) -> None:
if not isinstance(feedback, Feedback):
return
if feedback.submitted_at is None:
# No feedback was submitted, so we don't log anything
return
feedback.history_log.create(
action=Log.ACTION_CREATE,
extra='Feedback ontvangen',
description=feedback.get_description(),
created_by=None,
created_at=feedback.submitted_at,
_signal=feedback._signal,
)
@staticmethod
def log_external_reaction_received(session: Session, reaction: str) -> None:
if not isinstance(session, Session):
return
if session.questionnaire.flow != Questionnaire.FORWARD_TO_EXTERNAL:
return
if not session.frozen:
return
tz = pytz.timezone(settings.TIME_ZONE)
when = session._signal_status.created_at.astimezone(tz).strftime('%d-%m-%Y %H:%M')
description = f'Toelichting externe behandelaar op vraag van {when} {reaction}'
session.history_log.create(
action=Log.ACTION_RECEIVE,
description=description,
created_by=None,
created_at=timezone.now(),
_signal=session._signal,
)
@staticmethod
def log_external_reaction_not_received(session: Session) -> None:
if not isinstance(session, Session):
return
if session.questionnaire.flow != Questionnaire.FORWARD_TO_EXTERNAL:
return
# Log is created just before invalidating the session, hence check that it is not yet invalidated.
if session.frozen or session.invalidated:
return
external_user = session._signal_status.email_override
when = session._signal_status.created_at.strftime('%d-%m-%Y %H:%M')
description = f'Geen toelichting ontvangen van behandelaar {external_user} op vraag van {when}'
session.history_log.create(
action=Log.ACTION_NOT_RECEIVED,
description=description,
created_by=None,
created_at=timezone.now(),
_signal=session._signal,
) |
6,685 | test topk | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for vision package"""
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
_sort_implement = {
"generic": (topi.sort, topi.generic.schedule_sort),
"gpu": (topi.cuda.sort, topi.cuda.schedule_sort),
}
_argsort_implement = {
"generic": (topi.argsort, topi.generic.schedule_argsort),
"gpu": (topi.cuda.argsort, topi.cuda.schedule_argsort),
}
_topk_implement = {
"generic": (topi.topk, topi.generic.schedule_topk),
"gpu": (topi.cuda.topk, topi.cuda.schedule_topk),
}
axis = tvm.testing.parameter(0, -1, 1)
is_ascend = tvm.testing.parameter(True, False, ids=["is_ascend", "not_ascend"])
dtype = tvm.testing.parameter("int64", "float32")
topk = tvm.testing.parameter(0, 1, 5)
topk_ret_type = tvm.testing.parameter("values", "indices", "both")
def test_sort(target, dev, axis, is_ascend):
np.random.seed(0)
dshape = (20, 100)
data_dtype = "float32"
data = te.placeholder(dshape, name="data", dtype=data_dtype)
perm = np.arange(dshape[0] * dshape[1], dtype=data_dtype)
np.random.shuffle(perm)
np_data = perm.reshape(dshape)
if is_ascend:
np_sort = np.sort(np_data, axis=axis)
else:
np_sort = -np.sort(-np_data, axis=axis)
if axis == 0:
np_sort = np_sort[: dshape[axis], :]
else:
np_sort = np_sort[:, : dshape[axis]]
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _sort_implement)
out = fcompute(data, axis=axis, is_ascend=is_ascend)
s = fschedule(out)
tvm_data = tvm.nd.array(np_data, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data_dtype), dev)
f = tvm.build(s, [data, out], target)
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_sort, rtol=1e0)
def test_argsort(target, dev, axis, is_ascend):
dshape = (20, 100)
data_dtype = "float32"
data = te.placeholder(dshape, name="data", dtype=data_dtype)
perm = np.arange(dshape[0] * dshape[1], dtype=data_dtype)
np.random.shuffle(perm)
np_data = perm.reshape(dshape)
if is_ascend:
np_indices = np.argsort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
if axis == 0:
np_indices = np_indices[: dshape[axis], :]
else:
np_indices = np_indices[:, : dshape[axis]]
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _argsort_implement)
out = fcompute(data, axis=axis, is_ascend=is_ascend)
s = fschedule(out)
tvm_data = tvm.nd.array(np_data, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data_dtype), dev)
f = tvm.build(s, [data, out], target)
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_indices.astype(data_dtype), rtol=1e0)
def METHOD_NAME(target, dev, topk, axis, topk_ret_type, is_ascend, dtype):
np.random.seed(0)
shape = (20, 100)
data_dtype = "float32"
data = te.placeholder(shape, name="data", dtype=data_dtype)
np_data = np.random.uniform(size=shape).astype(data_dtype)
if is_ascend:
np_indices = np.argsort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
kk = topk if topk >= 1 else shape[axis]
if axis == 0:
np_indices = np_indices[:kk, :]
np_values = np.zeros(np_indices.shape).astype(data_dtype)
for i in range(shape[1]):
np_values[:, i] = np_data[np_indices[:, i], i]
else:
np_indices = np_indices[:, :kk]
np_values = np.zeros(np_indices.shape).astype(data_dtype)
for i in range(shape[0]):
np_values[i, :] = np_data[i, np_indices[i, :]]
np_indices = np_indices.astype(dtype)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _topk_implement)
outs = fcompute(data, topk, axis, topk_ret_type, is_ascend, dtype)
outs = outs if isinstance(outs, list) else [outs]
s = fschedule(outs)
tvm_data = tvm.nd.array(np_data, dev)
tvm_res = []
for t in outs:
tvm_res.append(tvm.nd.empty(t.shape, dtype=t.dtype, device=dev))
f = tvm.build(s, [data] + outs, target)
f(tvm_data, *tvm_res)
if topk_ret_type == "both":
tvm.testing.assert_allclose(tvm_res[0].numpy(), np_values)
tvm.testing.assert_allclose(tvm_res[1].numpy(), np_indices)
elif topk_ret_type == "values":
tvm.testing.assert_allclose(tvm_res[0].numpy(), np_values)
else:
tvm.testing.assert_allclose(tvm_res[0].numpy(), np_indices)
if __name__ == "__main__":
tvm.testing.main() |
6,686 | test get versions no json | # -*- coding: utf-8 -*-
#
# Copyright © 2017 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
"""
Tests for the :mod:`anitya.lib.backends.crates` module.
"""
from __future__ import unicode_literals
import unittest
import mock
from anitya.db import models
from anitya.lib.backends import crates
from anitya.lib.exceptions import AnityaPluginException
from anitya.tests.base import DatabaseTestCase
class CratesBackendTests(DatabaseTestCase):
"""Crates backend tests."""
def setUp(self):
"""Set up the environnment, run before every test"""
super().setUp()
self.create_project()
def create_project(self):
"""Create some basic projects to work with."""
project1 = models.Project(
name="itoa", homepage="https://crates.io/crates/itoa", backend="crates.io"
)
project2 = models.Project(
name="pleasedontmakethisprojectitllbreakmytests",
homepage="https://crates.io/crates/somenonsensehomepage",
backend="crates.io",
)
self.session.add(project1)
self.session.add(project2)
self.session.commit()
def test_get_version(self):
"""Test the get_version function of the crates backend."""
project = models.Project.by_id(self.session, 1)
self.assertEqual("0.2.1", crates.CratesBackend.get_version(project))
def test_get_version_missing(self):
"""Assert an exception is raised if a project doesn't exist and get_version is called"""
project = models.Project.get(self.session, 2)
self.assertRaises(
AnityaPluginException, crates.CratesBackend.get_version, project
)
def test_get_version_url(self):
"""Assert that correct url is returned."""
project = models.Project(
name="test", homepage="http://example.org", backend="crates.io"
)
exp = "https://crates.io/api/v1/crates/test/versions"
obs = crates.CratesBackend.get_version_url(project)
self.assertEqual(obs, exp)
def test_get_versions(self):
"""Test the get_versions function of the crates backend."""
expected_versions = ["0.2.1", "0.2.0", "0.1.1", "0.1.0"]
project = models.Project.by_id(self.session, 1)
self.assertEqual(expected_versions, crates.CratesBackend.get_versions(project))
def test_get_ordered_versions(self):
"""Test the get_ordered_versions function of the crates backend."""
expected_versions = ["0.2.1", "0.2.0", "0.1.1", "0.1.0"]
project = models.Project.by_id(self.session, 1)
self.assertEqual(
expected_versions, crates.CratesBackend.get_ordered_versions(project)
)
@mock.patch("anitya.lib.backends.crates.CratesBackend.call_url")
def METHOD_NAME(self, mock_call_url):
"""Assert we handle getting non-JSON responses gracefully"""
mock_call_url.return_value.json.side_effect = ValueError
project = models.Project.by_id(self.session, 1)
with self.assertRaises(AnityaPluginException) as context_manager:
crates.CratesBackend._get_versions(project) # pylint: disable=W0212
self.assertIn("Failed to decode JSON", str(context_manager.exception))
def test_get_versions_not_modified(self):
"""Assert that not modified response is handled correctly"""
pid = 1
project = models.Project.get(self.session, pid)
exp_url = "https://crates.io/api/v1/crates/itoa/versions"
with mock.patch("anitya.lib.backends.BaseBackend.call_url") as m_call:
m_call.return_value = mock.Mock(status_code=304)
versions = crates.CratesBackend.get_versions(project)
m_call.assert_called_with(exp_url, last_change=None)
self.assertEqual(versions, [])
if __name__ == "__main__":
unittest.main(verbosity=2) |
6,687 | completions response | from unittest import mock
import pytest
from fastapi import HTTPException
from fastapi.encoders import jsonable_encoder
from pydantic import ValidationError
from mlflow.gateway.config import RouteConfig
from mlflow.gateway.constants import MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS
from mlflow.gateway.providers.anthropic import AnthropicProvider
from mlflow.gateway.schemas import chat, completions, embeddings
from tests.gateway.tools import MockAsyncResponse
def METHOD_NAME():
return {
"completion": "Here is a basic overview of how a car works:\n\n1. The engine. "
"The engine is the power source that makes the car move.",
"stop_reason": "max_tokens",
"model": "claude-instant-1.1",
"truncated": False,
"stop": None,
"log_id": "dee173f87ddf1357da639dee3c38d833",
"exception": None,
"headers": {"Content-Type": "application/json"},
}
def completions_config():
return {
"name": "completions",
"route_type": "llm/v1/completions",
"model": {
"provider": "anthropic",
"name": "claude-instant-1",
"config": {
"anthropic_api_key": "key",
},
},
}
def parsed_completions_response():
return {
"candidates": [
{
"text": "Here is a basic overview of how a car works:\n\n1. The engine. "
"The engine is the power source that makes the car move.",
"metadata": {"finish_reason": "length"},
}
],
"metadata": {
"model": "claude-instant-1.1",
"route_type": "llm/v1/completions",
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
},
}
@pytest.mark.asyncio
async def test_completions():
resp = METHOD_NAME()
config = completions_config()
with mock.patch(
"aiohttp.ClientSession.post", return_value=MockAsyncResponse(resp)
) as mock_post:
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "How does a car work?", "max_tokens": 200}
response = await provider.completions(completions.RequestPayload(**payload))
assert jsonable_encoder(response) == parsed_completions_response()
mock_post.assert_called_once()
@pytest.mark.asyncio
async def test_completions_with_default_max_tokens():
resp = METHOD_NAME()
config = completions_config()
with mock.patch(
"aiohttp.ClientSession.post", return_value=MockAsyncResponse(resp)
) as mock_post:
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "How does a car work?"}
response = await provider.completions(completions.RequestPayload(**payload))
assert jsonable_encoder(response) == parsed_completions_response()
mock_post.assert_called_once()
@pytest.mark.asyncio
async def test_completions_throws_with_invalid_max_tokens_too_large():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "Would Fozzie or Kermet win in a fight?", "max_tokens": 1000001}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert (
"Invalid value for max_tokens: cannot exceed "
f"{MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS}" in e.value.detail
)
assert e.value.status_code == 422
@pytest.mark.asyncio
async def test_completions_throws_with_unsupported_candidate_count():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"prompt": "Would Fozzie or Kermet win in a fight?",
"candidate_count": 5,
"max_tokens": 10,
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "'candidate_count' must be '1' for the Anthropic provider" in e.value.detail
assert e.value.status_code == 422
@pytest.mark.asyncio
async def test_completions_throws_with_top_p_defined():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "Would Fozzie or Kermet win in a fight?", "max_tokens": 500, "top_p": 0.6}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "Cannot set both 'temperature' and 'top_p' parameters. Please" in e.value.detail
assert e.value.status_code == 422
@pytest.mark.asyncio
async def test_completions_throws_with_stream_set_to_true():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"prompt": "Could the Millennium Falcon fight a Borg Cube and win?",
"max_tokens": 5000,
"stream": "true",
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "Setting the 'stream' parameter to 'true' is not supported" in e.value.detail
assert e.value.status_code == 422
def chat_config():
return {
"name": "chat",
"route_type": "llm/v1/chat",
"model": {
"provider": "anthropic",
"name": "claude-instant-1",
"config": {
"anthropic_api_key": "key",
},
},
}
@pytest.mark.asyncio
async def test_chat_is_not_supported_for_anthropic():
config = chat_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"messages": [{"role": "user", "content": "Claude, can you chat with me? I'm lonely."}]
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.chat(chat.RequestPayload(**payload))
assert "The chat route is not available for Anthropic models" in e.value.detail
assert e.value.status_code == 404
def embedding_config():
return {
"name": "embeddings",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "anthropic",
"name": "claude-1.3-100k",
"config": {
"anthropic_api_key": "key",
},
},
}
@pytest.mark.asyncio
async def test_embeddings_are_not_supported_for_anthropic():
config = embedding_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"text": "give me that sweet, sweet vector, please."}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.embeddings(embeddings.RequestPayload(**payload))
assert "The embeddings route is not available for Anthropic models" in e.value.detail
assert e.value.status_code == 404
@pytest.mark.asyncio
async def test_param_model_is_not_permitted():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"prompt": "This should fail",
"max_tokens": 5000,
"model": "something-else",
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "The parameter 'model' is not permitted" in e.value.detail
assert e.value.status_code == 422
@pytest.mark.parametrize("prompt", [{"set1", "set2"}, ["list1"], [1], ["list1", "list2"], [1, 2]])
@pytest.mark.asyncio
async def test_completions_throws_if_prompt_contains_non_string(prompt):
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": prompt}
with pytest.raises(ValidationError, match=r"prompt"):
await provider.completions(completions.RequestPayload(**payload)) |
6,688 | validate | import os
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd
from conan.tools.files import copy, get
from conan.tools.layout import basic_layout
from conan.tools.scm import Version
required_conan_version = ">=1.52.0"
class SerdeppConan(ConanFile):
name = "serdepp"
description = "c++ serialize and deserialize adaptor library like rust serde.rs"
license = "MIT"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/injae/serdepp"
topics = ("yaml", "toml", "serialization", "json", "reflection", "header-only")
package_type = "header-library"
settings = "os", "arch", "compiler", "build_type"
options = {
"with_nlohmann_json": [True, False],
"with_rapidjson": [True, False],
"with_fmt": [True, False],
"with_toml11": [True, False],
"with_yamlcpp": [True, False],
}
default_options = {
"with_nlohmann_json": True,
"with_rapidjson": True,
"with_fmt": True,
"with_toml11": True,
"with_yamlcpp": True,
}
no_copy_source = True
@property
def _min_cppstd(self):
return 17
@property
def _compilers_minimum_version(self):
return {
"gcc": "7",
"Visual Studio": "17",
"clang": "5",
"apple-clang": "10",
}
def layout(self):
basic_layout(self, src_folder="src")
def requirements(self):
self.requires("nameof/0.10.3")
self.requires("magic_enum/0.9.3")
if self.options.with_toml11:
self.requires("toml11/3.7.1")
if self.options.with_yamlcpp:
self.requires("yaml-cpp/0.8.0")
if self.options.with_rapidjson:
self.requires("rapidjson/1.1.0")
if self.options.with_fmt:
self.requires("fmt/10.1.0")
if self.options.with_nlohmann_json:
self.requires("nlohmann_json/3.11.2")
def package_id(self):
self.info.clear()
def METHOD_NAME(self):
compiler = self.settings.compiler
if compiler.get_safe("cppstd"):
check_min_cppstd(self, self._min_cppstd)
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if not minimum_version:
self.output.warning(f"{self.name} requires C++17. Your compiler is unknown. Assuming it supports C++17.")
elif Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(f"{self.name} requires a compiler that supports at least C++17")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def package(self):
s = lambda x: os.path.join(self.source_folder, x)
p = lambda x: os.path.join(self.package_folder, x)
copy(self, "LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
include = os.path.join("include", "serdepp")
copy(self, "*.hpp", dst=p(include), src=s(include))
attribute = os.path.join(include, "attribute")
copy(self, "*.hpp", dst=p(attribute), src=s(attribute))
adaptor = os.path.join(include, "adaptor")
copy(self, "reflection.hpp", dst=p(adaptor), src=s(adaptor))
copy(self, "sstream.hpp", dst=p(adaptor), src=s(adaptor))
if self.options.with_toml11:
copy(self, "toml11.hpp", dst=p(adaptor), src=s(adaptor))
if self.options.with_yamlcpp:
copy(self, "yaml-cpp.hpp", dst=p(adaptor), src=s(adaptor))
if self.options.with_rapidjson:
copy(self, "rapidjson.hpp", dst=p(adaptor), src=s(adaptor))
if self.options.with_fmt:
copy(self, "fmt.hpp", dst=p(adaptor), src=s(adaptor))
if self.options.with_nlohmann_json:
copy(self, "nlohmann_json.hpp", dst=p(adaptor), src=s(adaptor))
def package_info(self):
self.cpp_info.bindirs = []
self.cpp_info.libdirs = [] |
6,689 | test variance | # Copyright 2023 ParaDiGMS authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class DDIMParallelSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DDIMParallelScheduler,)
forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50))
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**kwargs)
return config
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps, eta = 10, 0.0
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for t in scheduler.timesteps:
residual = model(sample, t)
sample = scheduler.step(residual, t, sample, eta).prev_sample
return sample
def test_timesteps(self):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_steps_offset(self):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=steps_offset)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(steps_offset=1)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1]))
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=clip_sample)
def test_timestep_spacing(self):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=timestep_spacing)
def test_rescale_betas_zero_snr(self):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
)
def test_time_indices(self):
for t in [1, 10, 49]:
self.check_over_forward(time_step=t)
def test_inference_steps(self):
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
def test_eta(self):
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=t, eta=eta)
def METHOD_NAME(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5
def test_batch_step_no_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps, eta = 10, 0.0
scheduler.set_timesteps(num_inference_steps)
model = self.dummy_model()
sample1 = self.dummy_sample_deter
sample2 = self.dummy_sample_deter + 0.1
sample3 = self.dummy_sample_deter - 0.1
per_sample_batch = sample1.shape[0]
samples = torch.stack([sample1, sample2, sample3], dim=0)
timesteps = torch.arange(num_inference_steps)[0:3, None].repeat(1, per_sample_batch)
residual = model(samples.flatten(0, 1), timesteps.flatten(0, 1))
pred_prev_sample = scheduler.batch_step_no_noise(residual, timesteps.flatten(0, 1), samples.flatten(0, 1), eta)
result_sum = torch.sum(torch.abs(pred_prev_sample))
result_mean = torch.mean(torch.abs(pred_prev_sample))
assert abs(result_sum.item() - 1147.7904) < 1e-2
assert abs(result_mean.item() - 0.4982) < 1e-3
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 172.0067) < 1e-2
assert abs(result_mean.item() - 0.223967) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 52.5302) < 1e-2
assert abs(result_mean.item() - 0.0684) < 1e-3
def test_full_loop_with_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 149.8295) < 1e-2
assert abs(result_mean.item() - 0.1951) < 1e-3
def test_full_loop_with_no_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 149.0784) < 1e-2
assert abs(result_mean.item() - 0.1941) < 1e-3 |
6,690 | test run inference | #
# Copyright (c) 2018-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
from constants import MODEL_SERVICE, ERROR_SHAPE, TARGET_DEVICE_GPU, TARGET_DEVICE_HDDL, NOT_TO_BE_REPORTED_IF_SKIPPED, \
TARGET_DEVICE_MYRIAD
from config import skip_nginx_test
from conftest import devices_not_supported_for_test
from model.models_information import Resnet, ResnetBS4, ResnetBS8, ResnetS3
from utils.grpc import create_channel, infer, get_model_metadata, \
model_metadata_response, get_model_status
import logging
from utils.models_utils import ModelVersionState, ErrorCode, \
ERROR_MESSAGE # noqa
from utils.rest import get_predict_url, get_metadata_url, get_status_url, infer_rest, \
get_model_metadata_response_rest, get_model_status_response_rest
logger = logging.getLogger(__name__)
@pytest.mark.skipif(skip_nginx_test, reason=NOT_TO_BE_REPORTED_IF_SKIPPED)
@devices_not_supported_for_test([TARGET_DEVICE_MYRIAD, TARGET_DEVICE_HDDL, TARGET_DEVICE_GPU])
class TestMultiModelInference:
def METHOD_NAME(self, start_server_multi_model):
_, ports = start_server_multi_model
# Connect to grpc service
stub = create_channel(port=ports["grpc_port"])
for model in [Resnet, ResnetBS4, ResnetBS8, ResnetS3]:
input_data = np.ones(model.input_shape, model.dtype)
logger.info("Starting inference using {} model".format(model.name))
output = infer(input_data, input_tensor=model.input_name,
grpc_stub=stub,
model_spec_name=model.name,
model_spec_version=None,
output_tensors=[model.output_name])
logger.info("Output shape: {} for model {} ".format(output[model.output_name].shape, model.name))
assert_msg = "{} for model {}".format(ERROR_SHAPE, model.name)
assert output[model.output_name].shape == model.output_shape, assert_msg
def test_get_model_metadata(self, start_server_multi_model):
_, ports = start_server_multi_model
# Connect to grpc service
stub = create_channel(port=ports["grpc_port"])
for model in [Resnet, ResnetBS4, ResnetBS8, ResnetS3]:
logger.info("Getting info about {} model".format(model.name))
expected_input_metadata = {model.input_name: {'dtype': 1, 'shape': list(model.input_shape)}}
expected_output_metadata = {model.output_name: {'dtype': 1, 'shape': list(model.output_shape)}}
request = get_model_metadata(model_name=model.name)
response = stub.GetModelMetadata(request, 10)
input_metadata, output_metadata = model_metadata_response(response=response)
logger.info("Input metadata: {}".format(input_metadata))
logger.info("Output metadata: {}".format(output_metadata))
assert response.model_spec.name == model.name
assert expected_input_metadata == input_metadata
assert expected_output_metadata == output_metadata
def test_get_model_status(self, start_server_multi_model):
_, ports = start_server_multi_model
stub = create_channel(port=ports["grpc_port"], service=MODEL_SERVICE)
for model in [Resnet, ResnetBS4, ResnetBS8, ResnetS3]:
request = get_model_status(model_name=model.name, version=1)
response = stub.GetModelStatus(request, 10)
versions_statuses = response.model_version_status
version_status = versions_statuses[0]
assert version_status.version == 1
assert version_status.state == ModelVersionState.AVAILABLE
assert version_status.status.error_code == ErrorCode.OK
assert version_status.status.error_message == ERROR_MESSAGE[
ModelVersionState.AVAILABLE][ErrorCode.OK]
def test_run_inference_rest(self, start_server_multi_model):
_, ports = start_server_multi_model
for model in [Resnet, ResnetBS4, ResnetBS8, ResnetS3]:
input_data = np.ones(model.input_shape, model.dtype)
logger.info("Starting inference using {} model".format(model.name))
rest_url = get_predict_url(model=model.name, port=ports["rest_port"])
output = infer_rest(input_data, input_tensor=model.input_name, rest_url=rest_url,
output_tensors=[model.output_name],
request_format=model.rest_request_format)
logger.info("Output shape: {}".format(output[model.output_name].shape))
assert output[model.output_name].shape == model.output_shape, ERROR_SHAPE
def test_get_model_metadata_rest(self, start_server_multi_model):
_, ports = start_server_multi_model
for model in [Resnet, ResnetBS4]:
logger.info("Getting info about {} model".format(model.name))
expected_input_metadata = {model.input_name: {'dtype': 1, 'shape': list(model.input_shape)}}
expected_output_metadata = {model.output_name: {'dtype': 1, 'shape': list(model.output_shape)}}
rest_url = get_metadata_url(model=model.name, port=ports["rest_port"])
response = get_model_metadata_response_rest(rest_url)
input_metadata, output_metadata = model_metadata_response(response=response)
logger.info("Input metadata: {}".format(input_metadata))
logger.info("Output metadata: {}".format(output_metadata))
assert response.model_spec.name == model.name
assert expected_input_metadata == input_metadata
assert expected_output_metadata == output_metadata
def test_get_model_status_rest(self, start_server_multi_model):
_, ports = start_server_multi_model
for model in [Resnet, ResnetBS4]:
rest_url = get_status_url(model=model.name, port=ports["rest_port"])
response = get_model_status_response_rest(rest_url)
versions_statuses = response.model_version_status
version_status = versions_statuses[0]
assert version_status.version == 1
assert version_status.state == ModelVersionState.AVAILABLE
assert version_status.status.error_code == ErrorCode.OK
assert version_status.status.error_message == ERROR_MESSAGE[
ModelVersionState.AVAILABLE][ErrorCode.OK] |
6,691 | do file | '''Ships alb logs from s3 to es'''
import os
import re
import gzip
import logging
import hashlib
import geohash
import urllib.parse
import boto3
from geoip import geolite2
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from elasticsearch.serializer import JSONSerializer
print('Loading function')
s3 = boto3.client('s3')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
es_logger = logging.getLogger('elasticsearch')
es_logger.setLevel(logging.DEBUG)
class SetEncoder(JSONSerializer):
def default(self, obj):
if isinstance(obj, frozenset):
return list(obj)
return JSONSerializer.default(self, obj)
def parse_int(thing):
try:
return int(thing)
except:
return None
fields = (
("type", str),
("time", str),
("elb", str),
("client_ip", str),
("client_port", parse_int),
("target_ip", str),
("target_port", parse_int),
("request_processing_time", float),
("target_processing_time", float),
("response_processing_time", float),
("elb_status_code", parse_int),
("target_status_code", str),
("received_bytes", parse_int),
("sent_bytes", parse_int),
("request_verb", str),
("request_url", lambda a: urllib.parse.urlsplit(a)._asdict()),
("request_proto", str),
("user_agent", str),
("ssl_cipher", str),
("ssl_protocol", str),
("target_group_arn", str),
("trace_id", str),
("domain_name", str),
("chosen_cert_arn", str),
("matched_rule_priority", str),
("request_creation_time", str),
("actions_executed", str),
("redirect_url", str),
("lambda_error_reason", str),
("target_port_list", str),
("target_status_code_list", str),
("classification", str),
("classification_reason", str),
)
REGEX = r'([^ ]*) ([^ ]*) ([^ ]*) ([^ ]*):([0-9]*) ([^ ]*)[:-]([0-9]*) ([-.0-9]*) ([-.0-9]*) ([-.0-9]*) (|[-0-9]*) (-|[-0-9]*) ([-0-9]*) ([-0-9]*) \"([^ ]*) (.*) (- |[^ ]*)\" \"([^\"]*)\" ([A-Z0-9-_]+) ([A-Za-z0-9.-]*) ([^ ]*) \"([^\"]*)\" \"([^\"]*)\" \"([^\"]*)\" ([-.0-9]*) ([^ ]*) \"([^\"]*)\" \"([^\"]*)\" \"([^ ]*)\" \"([^\s]+?)\" \"([^\s]+)\" \"([^ ]*)\" \"([^ ]*)\"'
MATCHER = re.compile(REGEX)
AFFINITY = {
'/api/latest/fleet/download_installer/pkg': 'mac',
'/api/latest/fleet/download_installer/msi': 'windows',
'/api/latest/fleet/download_installer/deb': 'linux',
'/api/latest/fleet/download_installer/rpm': 'linux',
}
ENRICHERS = [
lambda a: {'geoip': geolite2.lookup(a['client_ip']).to_dict() if geolite2.lookup(a['client_ip']) is not None else None},
lambda a: {'geohash': geohash.encode(*a['geoip']['location']) if a['geoip'] is not None else None},
lambda a: {'os_affinity': AFFINITY[a['request_url']['path']] if a['request_url']['path'] in AFFINITY else None},
]
def METHOD_NAME(bucket, key):
'''Generates log lines'''
search = Elasticsearch([os.environ['ES_URL']], serializer=SetEncoder())
out = []
response = s3.get_object(Bucket=bucket, Key=key)
with gzip.GzipFile(fileobj=response["Body"]) as handle:
for line in handle:
line = line.decode('utf8')
match = MATCHER.match(line)
if not match:
raise line
thing = {i[0]: i[1](match.group(n+1)) for n, i in enumerate(fields)}
thing['_index'] = 'sandbox-prod'
thing['_id'] = hashlib.sha256(line.encode('utf8')).hexdigest()
if thing['elb_status_code'] == 200:
for enricher in ENRICHERS:
thing.update(enricher(thing))
out.append(thing)
logger.debug(f"Sending {len(out)} items to {os.environ['ES_URL']}")
bulk(search, out, chunk_size=100)
def lambda_handler(event, _):
'''Main function'''
#print("Received event: " + json.dumps(event, indent=2))
# Get the object from the event and show its content type
logger.debug(event)
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
METHOD_NAME(bucket, key) |
6,692 | from config | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ppdet.core.workspace import register, create
from .meta_arch import BaseArch
import random
import paddle
import paddle.nn.functional as F
import paddle.distributed as dist
__all__ = ['YOLOX']
@register
class YOLOX(BaseArch):
"""
YOLOX network, see https://arxiv.org/abs/2107.08430
Args:
backbone (nn.Layer): backbone instance
neck (nn.Layer): neck instance
head (nn.Layer): head instance
for_mot (bool): whether used for MOT or not
input_size (list[int]): initial scale, will be reset by self._preprocess()
size_stride (int): stride of the size range
size_range (list[int]): multi-scale range for training
random_interval (int): interval of iter to change self._input_size
"""
__category__ = 'architecture'
def __init__(self,
backbone='CSPDarkNet',
neck='YOLOCSPPAN',
head='YOLOXHead',
for_mot=False,
input_size=[640, 640],
size_stride=32,
size_range=[15, 25],
random_interval=10):
super(YOLOX, self).__init__()
self.backbone = backbone
self.neck = neck
self.head = head
self.for_mot = for_mot
self.input_size = input_size
self._input_size = paddle.to_tensor(input_size)
self.size_stride = size_stride
self.size_range = size_range
self.random_interval = random_interval
self._step = 0
@classmethod
def METHOD_NAME(cls, cfg, *args, **kwargs):
# backbone
backbone = create(cfg['backbone'])
# fpn
kwargs = {'input_shape': backbone.out_shape}
neck = create(cfg['neck'], **kwargs)
# head
kwargs = {'input_shape': neck.out_shape}
head = create(cfg['head'], **kwargs)
return {
'backbone': backbone,
'neck': neck,
"head": head,
}
def _forward(self):
if self.training:
self._preprocess()
body_feats = self.backbone(self.inputs)
neck_feats = self.neck(body_feats, self.for_mot)
if self.training:
yolox_losses = self.head(neck_feats, self.inputs)
yolox_losses.update({'size': self._input_size[0]})
return yolox_losses
else:
head_outs = self.head(neck_feats)
bbox, bbox_num = self.head.post_process(
head_outs, self.inputs['im_shape'], self.inputs['scale_factor'])
return {'bbox': bbox, 'bbox_num': bbox_num}
def get_loss(self):
return self._forward()
def get_pred(self):
return self._forward()
def _preprocess(self):
# YOLOX multi-scale training, interpolate resize before inputs of the network.
self._get_size()
scale_y = self._input_size[0] / self.input_size[0]
scale_x = self._input_size[1] / self.input_size[1]
if scale_x != 1 or scale_y != 1:
self.inputs['image'] = F.interpolate(
self.inputs['image'],
size=self._input_size,
mode='bilinear',
align_corners=False)
gt_bboxes = self.inputs['gt_bbox']
for i in range(len(gt_bboxes)):
if len(gt_bboxes[i]) > 0:
gt_bboxes[i][:, 0::2] = gt_bboxes[i][:, 0::2] * scale_x
gt_bboxes[i][:, 1::2] = gt_bboxes[i][:, 1::2] * scale_y
self.inputs['gt_bbox'] = gt_bboxes
def _get_size(self):
# random_interval = 10 as default, every 10 iters to change self._input_size
image_ratio = self.input_size[1] * 1.0 / self.input_size[0]
if self._step % self.random_interval == 0:
size_factor = random.randint(*self.size_range)
size = [
self.size_stride * size_factor,
self.size_stride * int(size_factor * image_ratio)
]
self._input_size = paddle.to_tensor(size)
self._step += 1 |
6,693 | test manager cannot update person | from tests.base import ApiDBTestCase
from zou.app.services import projects_service
class PermissionTestCase(ApiDBTestCase):
def setUp(self):
super(PermissionTestCase, self).setUp()
self.generate_fixture_user_cg_artist()
self.user_cg_artist_id = self.user_cg_artist["id"]
self.generate_fixture_user_manager()
self.generate_fixture_project_status()
self.generate_fixture_project()
self.project_id = self.project.id
def tearDown(self):
self.log_out()
def test_admin_can_create_project(self):
self.log_in(self.user["email"])
data = {"name": "Cosmos Landromat 2"}
self.post("data/projects/", data, 201)
def test_admin_can_edit_project(self):
self.log_in(self.user["email"])
def test_admin_can_read_project(self):
self.log_in(self.user["email"])
def test_cg_artist_cannot_create_project(self):
self.log_in_cg_artist()
data = {"name": "Cosmos Landromat 2"}
self.post("data/projects/", data, 403)
def test_cg_artist_cannot_edit_project(self):
self.log_in_cg_artist()
data = {"name": "Cosmos Landromat 2 edited"}
self.put("data/projects/%s" % self.project_id, data, 403)
def test_cg_artist_can_read_open_projects(self):
self.log_in_cg_artist()
self.get("data/projects/open")
def test_cg_artist_can_read_project_task_types(self):
self.generate_fixture_department()
self.generate_fixture_task_type()
task_type_id = self.task_type_concept.id
projects_service.add_task_type_setting(
self.project_id, task_type_id, 1
)
self.log_in_cg_artist()
user_id = str(self.user_cg_artist["id"])
projects_service.add_team_member(self.project_id, user_id)
result = self.get("data/projects/%s/task-types" % self.project_id, 200)
self.assertIsInstance(result, list)
self.assertEqual(len(result), 1)
self.assertEqual(result[0]["id"], str(task_type_id))
def test_cg_artist_can_read_project_task_statuses(self):
self.log_in_cg_artist()
user_id = str(self.user_cg_artist["id"])
projects_service.add_team_member(self.project_id, user_id)
self.get(
"data/projects/%s/settings/task-status" % self.project_id, 200
)
def test_manager_cannot_create_person(self):
self.log_in_manager()
data = {
"first_name": "John",
"last_name": "Doe",
"email": "john.doe@gmail.com",
}
self.post("data/persons/new", data, 403)
def test_admin_can_create_person(self):
self.log_in_admin()
data = {
"first_name": "John",
"last_name": "Doe",
"email": "john.doe@gmail.com",
}
self.post("data/persons/new", data, 201)
def test_manager_cannot_update_admin(self):
self.log_in_manager()
data = {"email": "john.doe2@gmail.com"}
self.put("data/persons/%s" % self.user["id"], data, 403)
def METHOD_NAME(self):
self.log_in_manager()
data = {"role": "admin"}
self.put("data/persons/%s" % self.user_cg_artist_id, data, 403)
self.get("data/persons/%s" % self.user_cg_artist_id)
def test_admin_can_update_admin(self):
self.log_in_admin()
data = {"first_name": "Super admin"}
self.put("data/persons/%s" % self.user["id"], data, 200)
def test_manager_cannot_delete_admin(self):
self.log_in_manager()
self.delete("data/persons/%s" % self.user["id"], 403)
def test_user_projects(self):
self.generate_fixture_project_standard()
self.generate_fixture_project_closed_status()
self.generate_fixture_project_closed()
self.generate_fixture_asset_type()
self.generate_fixture_asset()
self.log_in_cg_artist()
user_id = str(self.user_cg_artist["id"])
projects = self.get("data/projects")
self.assertEqual(len(projects), 0)
projects_service.add_team_member(self.project_id, user_id)
projects = self.get("data/projects")
self.assertEqual(len(projects), 1)
projects = self.get("data/projects/all")
self.assertEqual(len(projects), 1)
projects = self.get("data/projects/open")
self.assertEqual(len(projects), 1)
def test_is_in_team(self):
self.generate_fixture_asset_type()
self.generate_fixture_asset()
asset_id = self.asset.id
self.log_in_cg_artist()
self.get("data/assets/%s" % asset_id, 403)
projects_service.add_team_member(
self.project_id, self.user_cg_artist["id"]
)
self.get("data/assets/%s" % asset_id, 200) |
6,694 | build arguments schema | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"eventhubs cluster available-region",
)
class AvailableRegion(AAZCommand):
"""List the quantity of available pre-provisioned Event Hubs Clusters, indexed by Azure region.
"""
_aaz_info = {
"version": "2023-01-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/providers/microsoft.eventhub/availableclusterregions", "2023-01-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.ClustersListAvailableClusterRegion(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class ClustersListAvailableClusterRegion(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/providers/Microsoft.EventHub/availableClusterRegions",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-01-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.location = AAZStrType()
return cls._schema_on_200
class _AvailableRegionHelper:
"""Helper class for AvailableRegion"""
__all__ = ["AvailableRegion"] |
6,695 | slot add | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
A widget that provides a scheme selector, with New and Remove buttons.
"""
from PyQt5.QtCore import QDir, QSettings, pyqtSignal, Qt
from PyQt5.QtWidgets import (
QComboBox, QHBoxLayout, QInputDialog, QLabel, QPushButton, QWidget,
QAction, QMenu, QFileDialog)
import app
import icons
import os
class SchemeSelector(QWidget):
currentChanged = pyqtSignal()
changed = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.label = QLabel()
self.scheme = QComboBox()
self.menuButton = QPushButton(flat=True)
menu = QMenu(self.menuButton)
self.menuButton.setMenu(menu)
layout.addWidget(self.label)
layout.addWidget(self.scheme)
layout.addWidget(self.menuButton)
layout.addStretch(1)
# action generator
def act(slot, icon=None):
a = QAction(self, triggered=slot)
self.addAction(a)
icon and a.setIcon(icons.get(icon))
return a
# add action
a = self.addAction_ = act(self.METHOD_NAME, 'list-add')
menu.addAction(a)
# remove action
a = self.removeAction = act(self.slotRemove, 'list-remove')
menu.addAction(a)
# rename action
a = self.renameAction = act(self.slotRename, 'document-edit')
menu.addAction(a)
menu.addSeparator()
# import action
a = self.importAction = act(self.slotImport, 'document-open')
menu.addAction(a)
# export action
a = self.exportAction = act(self.slotExport, 'document-save-as')
menu.addAction(a)
self.scheme.currentIndexChanged.connect(self.slotSchemeChanged)
app.translateUI(self)
def translateUI(self):
self.label.setText(_("Scheme:"))
self.menuButton.setText(_("&Menu"))
self.addAction_.setText(_("&Add..."))
self.removeAction.setText(_("&Remove"))
self.renameAction.setText(_("Re&name..."))
self.importAction.setText(_("&Import..."))
self.exportAction.setText(_("&Export..."))
def slotSchemeChanged(self, index):
"""Called when the Scheme combobox is changed by the user."""
self.disableDefault(self.scheme.itemData(index) == 'default')
self.currentChanged.emit()
self.changed.emit()
def disableDefault(self, val):
self.removeAction.setDisabled(val)
self.renameAction.setDisabled(val)
def schemes(self):
"""Returns the list with internal names of currently available schemes."""
return [self.scheme.itemData(i) for i in range(self.scheme.count())]
def currentScheme(self):
"""Returns the internal name of the currently selected scheme"""
return self.scheme.itemData(self.scheme.currentIndex())
def insertSchemeItem(self, name, scheme):
for i in range(1, self.scheme.count()):
n = self.scheme.itemText(i)
if n.lower() > name.lower():
self.scheme.insertItem(i, name, scheme)
break
else:
self.scheme.addItem(name, scheme)
def addScheme(self, name):
num, key = 1, 'user1'
while key in self.schemes() or key in self._schemesToRemove:
num += 1
key = f'user{num}'
self.insertSchemeItem(name, key)
self.scheme.setCurrentIndex(self.scheme.findData(key))
return key
def METHOD_NAME(self):
name, ok = QInputDialog.getText(self,
app.caption(_("Add Scheme")),
_("Please enter a name for the new scheme:"))
if ok:
self.addScheme(name)
def slotRemove(self):
index = self.scheme.currentIndex()
scheme = self.scheme.itemData(index)
if scheme == 'default':
return # default can not be removed
self._schemesToRemove.add(scheme)
self.scheme.removeItem(index)
def slotRename(self):
index = self.scheme.currentIndex()
name = self.scheme.itemText(index)
scheme = self.scheme.itemData(index)
newName, ok = QInputDialog.getText(self, _("Rename"), _("New name:"), text=name)
if ok:
self.scheme.blockSignals(True)
self.scheme.removeItem(index)
self.insertSchemeItem(newName, scheme)
self.scheme.setCurrentIndex(self.scheme.findData(scheme))
self.scheme.blockSignals(False)
self.changed.emit()
def slotImport(self):
filetypes = "{} (*.xml);;{} (*)".format(_("XML Files"), _("All Files"))
caption = app.caption(_("dialog title", "Import color theme"))
filename = QFileDialog.getOpenFileName(self, caption, QDir.homePath(), filetypes)[0]
if filename:
self.parent().import_(filename)
def slotExport(self):
name = self.scheme.currentText()
filetypes = "{} (*.xml);;{} (*)".format(_("XML Files"), _("All Files"))
caption = app.caption(_("dialog title",
"Export {name}").format(name=name))
path = os.path.join(QDir.homePath(), name+'.xml')
filename = QFileDialog.getSaveFileName(self, caption, path, filetypes)[0]
if filename:
if os.path.splitext(filename)[1] != '.xml':
filename += '.xml'
self.parent().export(name, filename)
def loadSettings(self, currentKey, namesGroup):
# don't mark schemes for removal anymore
self._schemesToRemove = set()
s = QSettings()
cur = s.value(currentKey, "default", str)
# load the names for the shortcut schemes
s.beginGroup(namesGroup)
block = self.scheme.blockSignals(True)
self.scheme.clear()
self.scheme.addItem(_("Default"), "default")
lst = [(s.value(key, key, str), key) for key in s.childKeys()]
for name, key in sorted(lst, key=lambda f: f[0].lower()):
self.scheme.addItem(name, key)
# find out index
index = self.scheme.findData(cur)
self.disableDefault(cur == 'default')
self.scheme.setCurrentIndex(index)
self.scheme.blockSignals(block)
self.currentChanged.emit()
def saveSettings(self, currentKey, namesGroup, removePrefix=None):
# first save new scheme names
s = QSettings()
s.beginGroup(namesGroup)
for i in range(self.scheme.count()):
if self.scheme.itemData(i) != 'default':
s.setValue(self.scheme.itemData(i), self.scheme.itemText(i))
for scheme in self._schemesToRemove:
s.remove(scheme)
s.endGroup()
if removePrefix:
for scheme in self._schemesToRemove:
s.remove(f"{removePrefix}/{scheme}")
# then save current
scheme = self.currentScheme()
s.setValue(currentKey, scheme)
# clean up
self._schemesToRemove = set()
|
6,696 | fast map structure | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensor framework-agnostic utilities for manipulating nested structures."""
from typing import Sequence, List, TypeVar, Any
import numpy as np
import tree
ElementType = TypeVar('ElementType')
def METHOD_NAME(func, *structure):
"""Faster map_structure implementation which skips some error checking."""
flat_structure = (tree.flatten(s) for s in structure)
entries = zip(*flat_structure)
# Arbitrarily choose one of the structures of the original sequence (the last)
# to match the structure for the flattened sequence.
return tree.unflatten_as(structure[-1], [func(*x) for x in entries])
def fast_map_structure_with_path(func, *structure):
"""Faster map_structure_with_path implementation."""
head_entries_with_path = tree.flatten_with_path(structure[0])
if len(structure) > 1:
tail_entries = (tree.flatten(s) for s in structure[1:])
entries_with_path = [
e[0] + e[1:] for e in zip(head_entries_with_path, *tail_entries)
]
else:
entries_with_path = head_entries_with_path
# Arbitrarily choose one of the structures of the original sequence (the last)
# to match the structure for the flattened sequence.
return tree.unflatten_as(structure[-1], [func(*x) for x in entries_with_path])
def stack_sequence_fields(sequence: Sequence[ElementType]) -> ElementType:
"""Stacks a list of identically nested objects.
This takes a sequence of identically nested objects and returns a single
nested object whose ith leaf is a stacked numpy array of the corresponding
ith leaf from each element of the sequence.
For example, if `sequence` is:
```python
[{
'action': np.array([1.0]),
'observation': (np.array([0.0, 1.0, 2.0]),),
'reward': 1.0
}, {
'action': np.array([0.5]),
'observation': (np.array([1.0, 2.0, 3.0]),),
'reward': 0.0
}, {
'action': np.array([0.3]),1
'observation': (np.array([2.0, 3.0, 4.0]),),
'reward': 0.5
}]
```
Then this function will return:
```python
{
'action': np.array([....]) # array shape = [3 x 1]
'observation': (np.array([...]),) # array shape = [3 x 3]
'reward': np.array([...]) # array shape = [3]
}
```
Note that the 'observation' entry in the above example has two levels of
nesting, i.e it is a tuple of arrays.
Args:
sequence: a list of identically nested objects.
Returns:
A nested object with numpy.
Raises:
ValueError: If `sequence` is an empty sequence.
"""
# Handle empty input sequences.
if not sequence:
raise ValueError('Input sequence must not be empty')
# Default to asarray when arrays don't have the same shape to be compatible
# with old behaviour.
try:
return METHOD_NAME(lambda *values: np.stack(values), *sequence)
except ValueError:
return METHOD_NAME(lambda *values: np.asarray(values, dtype=object),
*sequence)
def unstack_sequence_fields(struct: ElementType,
batch_size: int) -> List[ElementType]:
"""Converts a struct of batched arrays to a list of structs.
This is effectively the inverse of `stack_sequence_fields`.
Args:
struct: An (arbitrarily nested) structure of arrays.
batch_size: The length of the leading dimension of each array in the struct.
This is assumed to be static and known.
Returns:
A list of structs with the same structure as `struct`, where each leaf node
is an unbatched element of the original leaf node.
"""
return [
tree.map_structure(lambda s, i=i: s[i], struct) for i in range(batch_size)
]
def broadcast_structures(*args: Any) -> Any:
"""Returns versions of the arguments that give them the same nested structure.
Any nested items in *args must have the same structure.
Any non-nested item will be replaced with a nested version that shares that
structure. The leaves will all be references to the same original non-nested
item.
If all *args are nested, or all *args are non-nested, this function will
return *args unchanged.
Example:
```
a = ('a', 'b')
b = 'c'
tree_a, tree_b = broadcast_structure(a, b)
tree_a
> ('a', 'b')
tree_b
> ('c', 'c')
```
Args:
*args: A Sequence of nested or non-nested items.
Returns:
`*args`, except with all items sharing the same nest structure.
"""
if not args:
return
reference_tree = None
for arg in args:
if tree.is_nested(arg):
reference_tree = arg
break
# If reference_tree is None then none of args are nested and we can skip over
# the rest of this function, which would be a no-op.
if reference_tree is None:
return args
def mirror_structure(value, reference_tree):
if tree.is_nested(value):
# Use check_types=True so that the types of the trees we construct aren't
# dependent on our arbitrary choice of which nested arg to use as the
# reference_tree.
tree.assert_same_structure(value, reference_tree, check_types=True)
return value
else:
return tree.map_structure(lambda _: value, reference_tree)
return tuple(mirror_structure(arg, reference_tree) for arg in args)
def tree_map(f):
"""Transforms `f` into a tree-mapped version."""
def mapped_f(*structures):
return tree.map_structure(f, *structures)
return mapped_f |
6,697 | close event | from typing import TYPE_CHECKING, Optional, Union
from PySide6.QtCore import QSize, Qt
from PySide6.QtGui import QFont, QStandardItem, QStandardItemModel
from PySide6.QtWidgets import QHBoxLayout, QHeaderView, QLabel, QTreeView, QTreeWidget, QVBoxLayout
from angrmanagement.config import Conf
from angrmanagement.logic.debugger import DebuggerWatcher
from angrmanagement.logic.debugger.bintrace import BintraceDebugger
from .view import BaseView
if TYPE_CHECKING:
from angr.knowledge_plugins import Function
try:
from bintrace import TraceEvent
except ImportError:
TraceEvent = "TraceEvent"
class CallTreeModel(QStandardItemModel):
"""
Model for the call tree.
"""
Headers = ["Function"]
def hasChildren(self, index):
item: Optional[CallTreeItem] = self.itemFromIndex(index)
if isinstance(item, CallTreeItem):
return item.expandable
return super().hasChildren(index)
def headerData(self, section, orientation, role): # pylint:disable=unused-argument
if role != Qt.DisplayRole:
return None
if section < len(self.Headers):
return self.Headers[section]
return None
class CallTreeItem(QStandardItem):
"""
Item in call tree representing a function.
"""
def __init__(self, function, event):
name = hex(function) if isinstance(function, int) else function.name
super().__init__(name)
self.function: Union[int, Function] = function
self.event: TraceEvent = event
self.populated: bool = False
self.expandable: bool = True
class CallExplorerView(BaseView):
"""
Call Explorer view.
"""
def __init__(self, workspace, instance, default_docking_position, *args, **kwargs):
super().__init__("call_explorer", workspace, instance, default_docking_position, *args, **kwargs)
self._last_updated_func: Optional[Union[int, Function]] = None
self._inhibit_update: bool = False
self.base_caption = "Call Explorer"
self._tree: Optional[QTreeWidget] = None
self._init_widgets()
self.reload()
self.width_hint = 500
self.height_hint = 400
self.updateGeometry()
self._dbg_manager = instance.debugger_mgr
self._dbg_watcher = DebuggerWatcher(self._on_debugger_state_updated, self._dbg_manager.debugger)
self._on_debugger_state_updated()
@staticmethod
def minimumSizeHint(*args, **kwargs): # pylint:disable=unused-argument
return QSize(200, 200)
def _init_widgets(self):
vlayout = QVBoxLayout()
vlayout.setSpacing(0)
vlayout.setContentsMargins(0, 0, 0, 0)
self._top_level_function_level = QLabel()
self._reset_function_label()
hlayout = QHBoxLayout()
hlayout.addWidget(self._top_level_function_level)
hlayout.setContentsMargins(3, 3, 3, 3)
vlayout.addLayout(hlayout)
self._tree = QTreeView(self)
self._model = CallTreeModel(self._tree)
self._tree.setModel(self._model)
self._tree.setFont(QFont(Conf.disasm_font))
header = self._tree.header()
header.setSectionResizeMode(QHeaderView.ResizeToContents)
self._tree.expanded.connect(self._on_item_expanded)
self._tree.clicked.connect(self._on_item_clicked)
self._tree.doubleClicked.connect(self._on_item_double_clicked)
vlayout.addWidget(self._tree)
self.setLayout(vlayout)
#
# Events
#
def METHOD_NAME(self, event):
self._dbg_watcher.shutdown()
super().METHOD_NAME(event)
def _on_item_clicked(self, index):
"""
Highlights the corresponding call site.
"""
item = self._model.itemFromIndex(index)
# Do not try to update on a single click. Allow user to browse through the call tree
original_inhibit = self._inhibit_update
self._inhibit_update = True
# Replay up to just before call
dbg = self.instance.debugger_mgr.debugger
dbg.replay_to_event(dbg._btrace.get_prev_exec_event(item.event, vcpu=dbg._trace_dbg.vcpu))
self._inhibit_update = original_inhibit
def _on_item_double_clicked(self, index):
"""
Navigates into the call.
"""
item = self._model.itemFromIndex(index)
# Replay after the jump, jumping into the called function
# FIXME: Doesn't consider proper selected debugger, assumes bintrace
dbg = self.instance.debugger_mgr.debugger
dbg.replay_to_event(dbg._btrace.get_next_exec_event(item.event, vcpu=dbg._trace_dbg.vcpu))
def _on_item_expanded(self, index):
"""
Descend into call tree for this node.
"""
expanding_item = self._model.itemFromIndex(index)
if not expanding_item.populated:
dbg = self.instance.debugger_mgr.debugger
if dbg.am_none:
return
called = dbg.get_called_functions(expanding_item.event)
for func_or_addr, event in called:
expanding_item.appendRow(CallTreeItem(func_or_addr, event))
expanding_item.expandable = len(called) > 0
expanding_item.populated = True
def _on_debugger_state_updated(self):
"""
Update current call state.
"""
if self._inhibit_update:
return
dbg = self._dbg_watcher.debugger
if isinstance(dbg.am_obj, BintraceDebugger):
func = dbg.get_current_function()
if func is not None:
func = func[0]
else:
func = None
if func is self._last_updated_func:
return
self._model.clear()
self._last_updated_func = func
if func is not None and isinstance(dbg.am_obj, BintraceDebugger):
self._top_level_function_level.setText(f"Current function: {func.name}")
for func, event in dbg.get_called_functions():
self._model.appendRow(CallTreeItem(func, event))
else:
self._reset_function_label()
def _reset_function_label(self):
self._top_level_function_level.setText("Current function: Unknown") |
6,698 | memory storage | from __future__ import absolute_import, division, print_function
# Storage methods
def no_storage(stream):
return stream
def METHOD_NAME(stream):
import cStringIO
return cStringIO.StringIO( stream.read() )
class named_storage(object):
"""
Stores data in a given file
"""
def __init__(self, filename, binary = True):
self.filename = filename
self.mode_suffix = "b" if binary else ""
def __call__(self, stream):
import shutil
with open( self.filename, "w" + self.mode_suffix ) as fp:
shutil.copyfileobj( stream, fp )
return open( self.filename, "r" + self.mode_suffix )
class persistent_storage(object):
"""
Stores data in a file that is named as a function of the url
"""
def __init__(self, namer, binary = True):
self.namer = namer
self.mode_suffix = "b" if binary else ""
def __call__(self, stream):
filename = self.namer( stream.url )
import shutil
with open( filename, "w" + self.mode_suffix ) as fp:
shutil.copyfileobj( stream, fp )
return open( filename, "r" + self.mode_suffix )
class temporary_storage(object):
"""
Stores data in a temporary file
"""
def __init__(self, binary):
self.mode = "w+b" if binary else "w+"
def __call__(self, stream):
import tempfile
mytemp = tempfile.TemporaryFile( self.mode )
import shutil
shutil.copyfileobj( stream, mytemp )
mytemp.seek(0)
return mytemp
# Utility
class coupled_stream(object):
"""
Couples associated streams so that they could be closed explicitly
"""
def __init__(self, primary, auxiliaries):
self.primary = primary
self.auxiliaries = auxiliaries
def close(self):
self.primary.close()
for stream in self.auxiliaries:
stream.close()
def read(self):
return self.primary.read()
def readline(self):
return self.primary.readline()
def readlines(self):
return self.primary.readlines()
def next(self):
return next(self.primary)
def __iter__(self):
return self
def __repr__(self):
return "<coupled primary:%r>" % self.primary
# Encodings
class encoding(object):
@classmethod
def accept(cls, header):
return header == cls.keyword
class identity_encoding(encoding):
keyword = "identity"
def __init__(self, storage = no_storage):
self.storage = storage
def process(self, stream):
return self.storage( stream = stream )
@classmethod
def accept(cls, header):
return not header or super( identity_encoding, cls ).accept( header = header )
class gzip_encoding(encoding):
keyword = "gzip"
def __init__(self, storage = METHOD_NAME):
self.storage = storage
def process(self, stream):
import gzip
storage = self.storage( stream = stream )
return coupled_stream(
primary = gzip.GzipFile( fileobj = storage ),
auxiliaries = [ storage ],
)
class deflate_encoding_small(encoding):
keyword = "deflate"
def __init__(self, storage = no_storage):
self.storage = storage
def process(self, stream):
storage = self.storage( stream = stream )
import zlib
data = zlib.decompress( storage.read() )
storage.close()
import cStringIO
return cStringIO.StringIO( data )
#Exceptions
class DownloadException(Exception):
"""
Base class
"""
class NotFound(DownloadException):
"""
HTTP 404
"""
class NotAcceptable(DownloadException):
"""
HTTP 406
"""
class ServerError(DownloadException):
"""
HTTP 5XX
"""
class UnexpectedResponse(DownloadException):
"""
Unexpected response from server
"""
def http_error_to_exception(error):
if error.code == 404:
return NotFound()
elif error.code == 406:
return NotAcceptable()
elif 500 <= error.code < 600:
return ServerError()
else:
return UnexpectedResponse( str( error ) )
class urlopener(object):
"""
Configurable version of openurl function
"""
def __init__(self, identity = identity_encoding(), extras = [ gzip_encoding() ]):
self.identity = identity
self.encoding_for = dict( ( ec.keyword, ec ) for ec in extras )
def __call__(self, url, data = None):
from six.moves import urllib
request = urllib.request.Request(
url = url,
data = data,
headers = {
"Accept-encoding": ", ".join( self.encoding_for ),
},
)
try:
stream = urllib.request.urlopen( request )
except urllib.error.HTTPError as e:
raise http_error_to_exception( error = e )
used = stream.info().get( "Content-Encoding" )
encoding = self.encoding_for.get( used, self.identity )
if not encoding.accept( header = used ):
raise UnexpectedResponse("Unknown encoding: %s" % used)
return encoding.process( stream = stream )
openurl = urlopener() |
6,699 | organisation one admin user | import pytest
from environments.models import Environment
from features.models import Feature
from organisations.models import Organisation, OrganisationRole
from projects.models import Project
from projects.tags.models import Tag
from users.models import FFAdminUser
@pytest.fixture()
def organisation_one(db):
return Organisation.objects.create(name="Test organisation 1")
@pytest.fixture()
def organisation_two(db):
return Organisation.objects.create(name="Test organisation 2")
@pytest.fixture()
def organisation_one_project_one(organisation_one):
return Project.objects.create(name="Test Project 1", organisation=organisation_one)
@pytest.fixture()
def organisation_one_project_two(organisation_one):
return Project.objects.create(name="Test Project 2", organisation=organisation_one)
@pytest.fixture()
def organisation_two_project_one(organisation_two):
return Project.objects.create(name="Test Project 1", organisation=organisation_two)
@pytest.fixture()
def organisation_two_project_two(organisation_two):
return Project.objects.create(name="Test Project 2", organisation=organisation_two)
@pytest.fixture()
def organisation_one_project_one_environment_one(organisation_one_project_one):
return Environment.objects.create(
name="Test Environment 1", project=organisation_one_project_one
)
@pytest.fixture()
def organisation_one_project_one_environment_two(organisation_one_project_one):
return Environment.objects.create(
name="Test Environment 2", project=organisation_one_project_one
)
@pytest.fixture()
def organisation_two_project_one_environment_one(organisation_two_project_one):
return Environment.objects.create(
name="Test Environment 1", project=organisation_two_project_one
)
@pytest.fixture()
def organisation_two_project_one_environment_two(organisation_two_project_one):
return Environment.objects.create(
name="Test Environment 2", project=organisation_two_project_one
)
@pytest.fixture()
def user_one():
return FFAdminUser.objects.create(email="test@example.com")
@pytest.fixture()
def organisation_one_user(user_one, organisation_one):
user_one.add_organisation(organisation_one)
return user_one
@pytest.fixture()
def METHOD_NAME(organisation_one):
METHOD_NAME = FFAdminUser.objects.create(
email="org1_admin@example.com"
)
METHOD_NAME.add_organisation(
organisation_one, role=OrganisationRole.ADMIN
)
return METHOD_NAME
@pytest.fixture()
def organisation_one_project_one_feature_one(organisation_one_project_one):
return Feature.objects.create(
project=organisation_one_project_one,
name="feature_1",
initial_value="feature_1_value",
)
@pytest.fixture()
def dynamo_enabled_project(organisation):
return Project.objects.create(
name="Dynamo enabled project",
organisation=organisation,
enable_dynamo_db=True,
)
@pytest.fixture()
def realtime_enabled_project(organisation_one):
return Project.objects.create(
name="Realtime enabled project",
organisation=organisation_one,
enable_realtime_updates=True,
)
@pytest.fixture()
def realtime_enabled_project_environment_one(realtime_enabled_project):
return Environment.objects.create(
name="Env 1 realtime",
project=realtime_enabled_project,
api_key="env-1-realtime-key",
)
@pytest.fixture()
def realtime_enabled_project_environment_two(realtime_enabled_project):
return Environment.objects.create(
name="Env 2 realtime",
project=realtime_enabled_project,
api_key="env-2-realtime-key",
)
@pytest.fixture()
def dynamo_enabled_project_environment_one(dynamo_enabled_project):
return Environment.objects.create(
name="Env 1", project=dynamo_enabled_project, api_key="env-1-key"
)
@pytest.fixture()
def dynamo_enabled_project_environment_two(dynamo_enabled_project):
return Environment.objects.create(
name="Env 2", project=dynamo_enabled_project, api_key="env-2-key"
)
@pytest.fixture()
def tag_one(project):
return Tag.objects.create(
label="Test Tag",
color="#fffff",
description="Test Tag description",
project=project,
)
@pytest.fixture()
def tag_two(project):
return Tag.objects.create(
label="Test Tag2",
color="#fffff",
description="Test Tag2 description",
project=project,
)
@pytest.fixture()
def project_two(organisation: Organisation) -> Project:
return Project.objects.create(name="Test Project Two", organisation=organisation)
@pytest.fixture()
def environment_two(project: Project) -> Environment:
return Environment.objects.create(name="Test Environment two", project=project)
@pytest.fixture
def project_two_environment(project_two: Project) -> Environment:
return Environment.objects.create(
name="Test Project two Environment", project=project_two
)
@pytest.fixture
def project_two_feature(project_two: Project) -> Feature:
return Feature.objects.create(
name="project_two_feature", project=project_two, initial_value="initial_value"
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.