id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
4,500 | test validate input value real real | #!/usr/bin/env python
"""Perform unit-test on functions user for insert cli."""
import numbers
import pytest
from orion.core.cli.insert import _validate_input_value
from orion.core.io.space_builder import SpaceBuilder
@pytest.fixture()
def real_space():
"""Fixture for real space"""
return SpaceBuilder().build({"x": "uniform(-10,20)"})
@pytest.fixture()
def integer_space():
"""Fixture for integer space"""
return SpaceBuilder().build({"x": "uniform(-10,20,discrete=True)"})
@pytest.fixture()
def categorical_space():
"""Fixture for categorical space"""
return SpaceBuilder().build({"x": "choices([10.1,11,'12','string'])"})
def METHOD_NAME(real_space):
"""Test if real value passed to real space is validated properly"""
namespace = "x"
is_valid, casted_value = _validate_input_value("10.0", real_space, namespace)
assert is_valid
assert isinstance(casted_value, numbers.Number)
def test_validate_input_value_real_integer(real_space):
"""Test if integer value passed to real space is validated properly"""
namespace = "x"
is_valid, casted_value = _validate_input_value("10", real_space, namespace)
assert is_valid
assert isinstance(casted_value, numbers.Number)
def test_validate_input_value_real_string(real_space):
"""Test if string value passed to real space is rejected properly"""
namespace = "x"
is_valid, casted_value = _validate_input_value("string", real_space, namespace)
assert not is_valid
def test_validate_input_value_real_out_of_bound(real_space):
"""Test if out of bound values passed to real space are rejected properly"""
namespace = "x"
is_valid, casted_value = _validate_input_value("100.0", real_space, namespace)
assert not is_valid
is_valid, casted_value = _validate_input_value("100", real_space, namespace)
assert not is_valid
def test_validate_input_value_integer_real(integer_space):
"""Test if real value passed to integer space is validated properly"""
namespace = "x"
is_valid, casted_value = _validate_input_value("10.0", integer_space, namespace)
assert is_valid
assert isinstance(casted_value, numbers.Number)
def test_validate_input_value_integer_integer(integer_space):
"""Test if integer value passed to integer space is validated properly"""
namespace = "x"
is_valid, casted_value = _validate_input_value("10", integer_space, namespace)
assert is_valid
assert isinstance(casted_value, numbers.Number)
def test_validate_input_value_integer_string(integer_space):
"""Test if string value passed to integer space is rejected properly"""
namespace = "x"
is_valid, casted_value = _validate_input_value("string", integer_space, namespace)
assert not is_valid
def test_validate_input_value_integer_out_of_bound(integer_space):
"""Test if out of bound values passed to integer space are rejected properly"""
namespace = "x"
is_valid, casted_value = _validate_input_value("100.0", integer_space, namespace)
assert not is_valid
is_valid, casted_value = _validate_input_value("100", integer_space, namespace)
assert not is_valid
def test_validate_input_value_categorical_real_hit(categorical_space):
"""Test if real value passed to categorical space is validated properly"""
namespace = "x"
is_valid, casted_value = _validate_input_value("10.1", categorical_space, namespace)
assert is_valid
assert isinstance(casted_value, numbers.Number)
def test_validate_input_value_categorical_real_nohit(categorical_space):
"""Test if bad real value passed to categorical space is rejected properly"""
namespace = "x"
is_valid, casted_value = _validate_input_value("10", categorical_space, namespace)
assert not is_valid
is_valid, casted_value = _validate_input_value("10.0", categorical_space, namespace)
assert not is_valid
is_valid, casted_value = _validate_input_value("10.2", categorical_space, namespace)
assert not is_valid
def test_validate_input_value_categorical_integer_hit(categorical_space):
"""Test if integer value passed to categorical space is validated properly"""
namespace = "x"
is_valid, casted_value = _validate_input_value("11", categorical_space, namespace)
assert is_valid
assert isinstance(casted_value, numbers.Number)
is_valid, casted_value = _validate_input_value("11.0", categorical_space, namespace)
assert is_valid
assert isinstance(casted_value, numbers.Number)
def test_validate_input_value_categorical_integer_nohit(categorical_space):
"""Test if bad integer value passed to categorical space is rejected properly"""
namespace = "x"
is_valid, casted_value = _validate_input_value("15", categorical_space, namespace)
assert not is_valid
def test_validate_input_value_categorical_string_number(categorical_space):
"""Test if string number value passed to categorical space is validated properly"""
namespace = "x"
# Make sure integer 12 does not pass
is_valid, casted_value = _validate_input_value("12", categorical_space, namespace)
assert not is_valid
# Now test "12" as a string
is_valid, casted_value = _validate_input_value("'12'", categorical_space, namespace)
assert is_valid
assert isinstance(casted_value, str)
def test_validate_input_value_categorical_string_value(categorical_space):
"""Test if literal string value passed to categorical space is validated properly"""
namespace = "x"
is_valid, casted_value = _validate_input_value(
"random", categorical_space, namespace
)
assert not is_valid
is_valid, casted_value = _validate_input_value(
"string", categorical_space, namespace
)
assert is_valid
assert isinstance(casted_value, str) |
4,501 | test get queue | import os
import time
import uuid
import grpc
import pytest
from armada_client.client import ArmadaClient
from armada_client.k8s.io.api.core.v1 import generated_pb2 as core_v1
from armada_client.k8s.io.apimachinery.pkg.api.resource import (
generated_pb2 as api_resource,
)
from armada_client.typings import JobState
def submit_sleep_job(client):
pod = core_v1.PodSpec(
containers=[
core_v1.Container(
name="sleep",
image="alpine:latest",
args=["sleep", "10s"],
resources=core_v1.ResourceRequirements(
requests={
"cpu": api_resource.Quantity(string="0.2"),
"memory": api_resource.Quantity(string="64Mi"),
},
limits={
"cpu": api_resource.Quantity(string="0.2"),
"memory": api_resource.Quantity(string="64Mi"),
},
),
)
],
)
return [
client.create_job_request_item(
priority=0, namespace="personal-anonymous", pod_spec=pod
)
]
def wait_for(client: ArmadaClient, queue, job_set_id=None):
"""
Waits for a queue and optionally the job_set_id to be active.
Ensures that following steps will not fail.
"""
timeout = 20
while True:
try:
# queue active test
client.get_queue(name=queue)
if job_set_id:
events = client.get_job_events_stream(
queue=queue, job_set_id=job_set_id
)
for _ in events:
break
return True
except grpc.RpcError as e:
code = e.code()
if code != grpc.StatusCode.NOT_FOUND:
raise e
timeout -= 1
time.sleep(1)
if timeout <= 0:
raise Exception("Timeout")
@pytest.fixture(scope="session", name="client")
def no_auth_client() -> ArmadaClient:
server_name = os.environ.get("ARMADA_SERVER", "localhost")
server_port = os.environ.get("ARMADA_PORT", "50051")
server_ssl = os.environ.get("ARMADA_SSL", "false")
if server_ssl.lower() == "true":
channel_credentials = grpc.ssl_channel_credentials()
return ArmadaClient(
channel=grpc.secure_channel(
f"{server_name}:{server_port}", channel_credentials
)
)
else:
return ArmadaClient(
channel=grpc.insecure_channel(f"{server_name}:{server_port}")
)
@pytest.fixture(scope="session", name="queue_name")
def get_queue():
return f"queue-{uuid.uuid1()}"
@pytest.fixture(scope="session", autouse=True)
def create_queue(client: ArmadaClient, queue_name):
queue = client.create_queue_request(name=queue_name, priority_factor=1)
client.create_queue(queue)
wait_for(client, queue=queue_name)
def test_batch_update_and_create_queues(client: ArmadaClient):
# Need to separately create queue name so that it is not
# automatically created by the fixture.
queue_name1 = f"queue-{uuid.uuid1()}"
queue_name2 = f"queue-{uuid.uuid1()}"
queue1 = client.create_queue_request(name=queue_name1, priority_factor=1)
queue2 = client.create_queue_request(name=queue_name2, priority_factor=1)
client.create_queues([queue1, queue2])
queue1 = client.get_queue(name=queue_name1)
queue2 = client.get_queue(name=queue_name2)
assert queue1.priority_factor == queue2.priority_factor
updated_queue1 = client.create_queue_request(name=queue_name1, priority_factor=2)
updated_queue2 = client.create_queue_request(name=queue_name2, priority_factor=2)
client.update_queues([updated_queue1, updated_queue2])
queue1 = client.get_queue(name=queue_name1)
queue2 = client.get_queue(name=queue_name2)
assert queue1.priority_factor == queue2.priority_factor
def METHOD_NAME(client: ArmadaClient, queue_name):
queue = client.get_queue(name=queue_name)
assert queue.name == queue_name
def test_get_queue_info(client: ArmadaClient, queue_name):
queue = client.get_queue_info(name=queue_name)
assert queue.name == queue_name
assert not queue.active_job_sets
def test_submit_job_and_cancel_by_id(client: ArmadaClient, queue_name):
job_set_name = f"set-{uuid.uuid1()}"
jobs = client.submit_jobs(
queue=queue_name,
job_set_id=job_set_name,
job_request_items=submit_sleep_job(client),
)
wait_for(client, queue=queue_name, job_set_id=job_set_name)
cancelled_message = client.cancel_jobs(job_id=jobs.job_response_items[0].job_id)
assert cancelled_message.cancelled_ids[0] == jobs.job_response_items[0].job_id
def test_submit_job_and_cancel_by_job_id(client: ArmadaClient, queue_name):
job_set_name = f"set-{uuid.uuid1()}"
jobs = client.submit_jobs(
queue=queue_name,
job_set_id=job_set_name,
job_request_items=submit_sleep_job(client),
)
job_id = jobs.job_response_items[0].job_id
wait_for(client, queue=queue_name, job_set_id=job_set_name)
cancelled_message = client.cancel_jobs(job_id=job_id)
assert cancelled_message.cancelled_ids[0] == job_id
def test_submit_job_and_cancelling_with_filter(client: ArmadaClient, queue_name):
job_set_name = f"set-{uuid.uuid4()}"
client.submit_jobs(
queue=queue_name,
job_set_id=job_set_name,
job_request_items=submit_sleep_job(client),
)
wait_for(client, queue=queue_name, job_set_id=job_set_name)
client.cancel_jobset(
queue=queue_name,
job_set_id=job_set_name,
filter_states=[JobState.RUNNING, JobState.PENDING],
)
def test_get_job_events_stream(client: ArmadaClient, queue_name):
job_set_name = f"set-{uuid.uuid1()}"
client.submit_jobs(
queue=queue_name,
job_set_id=job_set_name,
job_request_items=submit_sleep_job(client),
)
wait_for(client, queue=queue_name, job_set_id=job_set_name)
event_stream = client.get_job_events_stream(
queue=queue_name, job_set_id=job_set_name
)
# Avoiding fickle tests, we will just pass this test as long as we find an event.
found_event = False
for _ in event_stream:
found_event = True
break
assert found_event |
4,502 | test optimizer | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
import oneflow
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
class TestGraphOptimizer(flow.unittest.TestCase):
def METHOD_NAME(test_case):
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.para0 = flow.nn.Parameter(flow.Tensor(10, 4))
def forward(self, x):
x = flow._C.matmul(x, self.para0)
return x
m = CustomModule()
learning_rate = 0.1
momentum = 0.2
weight_decay = 0.7
sgd0 = flow.optim.SGD(
[
{
"params": [m.para0],
"lr": learning_rate,
"momentum": momentum,
"weight_decay": weight_decay,
}
]
)
cosine_lr = flow.optim.lr_scheduler.CosineDecayLR(
sgd0, decay_steps=100, alpha=0.1
)
class CustomGraph0(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = m
self.add_optimizer(sgd0)
def build(self, x):
out = self.m(x)
out = out.mean()
out.backward()
return out
g = CustomGraph0()
x = flow.Tensor(4, 10)
flow.nn.init.uniform_(x, a=-1.0, b=1.0)
z = g._compile(x)
print("repr(g): \n", repr(g))
print("g.config.proto: \n", g.config.proto)
print("graph proto: \n", g._graph_proto)
def test_multi_optimizer_conf(test_case):
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.para0 = flow.nn.Parameter(flow.Tensor(1, 4))
self.para1 = flow.nn.Parameter(flow.Tensor(1, 4))
self.para2 = flow.nn.Parameter(flow.Tensor(1, 4))
self.para2.requires_grad_(False)
self.para3 = flow.nn.Parameter(flow.Tensor(1, 4))
self.para4 = flow.nn.Parameter(flow.Tensor(1, 4))
def forward(self, x):
x = flow._C.matmul(self.para0, x)
y = flow._C.matmul(self.para3, x)
return x, y
m = CustomModule()
learning_rate = 0.1
momentum = 0.2
sgd0 = flow.optim.SGD(
[
{
"params": [m.para0, m.para1, m.para2],
"lr": learning_rate,
"momentum": momentum,
"weight_decay": 0.3,
}
]
)
sgd1 = flow.optim.SGD(
[
{
"params": [m.para3],
"lr": learning_rate,
"momentum": momentum,
"weight_decay": 0.4,
},
{
"params": [m.para4],
"lr": learning_rate,
"momentum": 0.9,
"weight_decay": 0.5,
},
]
)
cosine_lr0 = flow.optim.lr_scheduler.CosineDecayLR(
sgd0, decay_steps=10, alpha=0.01
)
constant_warmup_cosine_lr0 = flow.optim.lr_scheduler.WarmUpLR(
cosine_lr0, warmup_factor=0.5, warmup_iters=5, warmup_method="constant"
)
cosine_lr1 = flow.optim.lr_scheduler.CosineDecayLR(
sgd1, decay_steps=100, alpha=0.1
)
linear_warmup_cosine_lr1 = flow.optim.lr_scheduler.WarmUpLR(
cosine_lr1, warmup_factor=0.5, warmup_iters=5, warmup_method="linear"
)
class CustomGraph0(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = m
self.add_optimizer(sgd0, lr_sch=constant_warmup_cosine_lr0)
self.add_optimizer(sgd1, lr_sch=linear_warmup_cosine_lr1)
def build(self, x):
out0, out1 = self.m(x)
out0.backward()
out1.backward()
return out0, out1
g = CustomGraph0()
x = flow.Tensor(4, 10)
flow.nn.init.uniform_(x, a=-1.0, b=1.0)
g._filter_states()
g._generate_config_proto()
print("repr(g): \n", repr(g))
print("g.config.proto: \n", g.config.proto)
@unittest.skip("skip for now, becase it failed 2 times in past week")
def test_optimizer_with_clip_grad(test_case):
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.para0 = flow.nn.Parameter(flow.Tensor(10, 4))
def forward(self, x):
x = flow._C.matmul(x, self.para0)
return x
m = CustomModule()
learning_rate = 0.1
momentum = 0.2
scale = 0.3
weight_decay = 0.7
clip_grad_max_norm = 1.0
clip_grad_norm_type = 2.0
sgd0 = flow.optim.SGD(
[
{
"params": [m.para0],
"lr": learning_rate,
"momentum": momentum,
"scale": scale,
"weight_decay": weight_decay,
"clip_grad_max_norm": clip_grad_max_norm,
"clip_grad_norm_type": clip_grad_norm_type,
}
]
)
class CustomGraph0(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = m
self.add_optimizer(sgd0)
def build(self, x):
out = self.m(x)
out = out.sum()
out.backward()
return out
g = CustomGraph0()
x = flow.Tensor(4, 10)
flow.nn.init.uniform_(x, a=-1.0, b=1.0)
z = g._compile(x)
print("repr(g): \n", repr(g))
print("g.config.proto: \n", g.config.proto)
print("graph proto: \n", g._graph_proto)
if __name__ == "__main__":
unittest.main() |
4,503 | test fill last interval | #!/usr/bin/env python3
###############################################################################
#
# Copyright 2016 - 2021, Thomas Lauf, Paul Beckingham, Federico Hernandez.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# https://www.opensource.org/licenses/mit-license.php
#
###############################################################################
import os
import sys
import unittest
# Ensure python finds the local simpletap module
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from basetest import Timew, TestCase
class TestFillHint(TestCase):
def setUp(self):
"""Executed before each test in the class"""
self.t = Timew()
def test_unfilled_track_in_gap(self):
"""Add closed interval into a gap without fill"""
self.t("track 20160709T050000Z - 20160709T060000Z one")
self.t("track 20160709T090000Z - 20160709T100000Z three")
code, out, err = self.t("track 20160709T070000Z - 20160709T080000Z two")
self.assertNotIn('Backfilled to ', out)
self.assertNotIn('Filled to ', out)
j = self.t.export()
self.assertEqual(len(j), 3)
self.assertClosedInterval(j[0],
expectedStart="20160709T050000Z",
expectedEnd="20160709T060000Z",
expectedTags=["one"])
self.assertClosedInterval(j[1],
expectedStart="20160709T070000Z",
expectedEnd="20160709T080000Z",
expectedTags=["two"])
self.assertClosedInterval(j[2],
expectedStart="20160709T090000Z",
expectedEnd="20160709T100000Z",
expectedTags=["three"])
def test_filled_track_in_gap(self):
"""Add closed interval into a gap with fill"""
self.t("track 20160709T050000Z - 20160709T060000Z one")
self.t("track 20160709T090000Z - 20160709T100000Z three")
code, out, err = self.t("track 20160709T070000Z - 20160709T080000Z two :fill")
self.assertIn('Backfilled to ', out)
self.assertIn('Filled to ', out)
j = self.t.export()
self.assertEqual(len(j), 3)
self.assertClosedInterval(j[0],
expectedStart="20160709T050000Z",
expectedEnd="20160709T060000Z",
expectedTags=["one"])
self.assertClosedInterval(j[1],
expectedStart="20160709T060000Z",
expectedEnd="20160709T090000Z",
expectedTags=["two"])
self.assertClosedInterval(j[2],
expectedStart="20160709T090000Z",
expectedEnd="20160709T100000Z",
expectedTags=["three"])
def test_filled_start(self):
"""Add an open interval with fill"""
self.t("track 20160710T100000Z - 20160710T110000Z one")
code, out, err = self.t("start 20160710T113000Z two :fill")
self.assertIn('Backfilled to ', out)
self.assertNotIn('Filled to ', out)
j = self.t.export()
self.assertEqual(len(j), 2)
self.assertClosedInterval(j[0],
expectedStart="20160710T100000Z",
expectedEnd="20160710T110000Z",
expectedTags=["one"])
self.assertOpenInterval(j[1],
expectedStart="20160710T110000Z",
expectedTags=["two"])
class TestFillCommand(TestCase):
def setUp(self):
"""Executed before each test in the class"""
self.t = Timew()
# def test_fill_command(self):
# """Create gaps, then fill them"""
# self.t("track 20160724T090000 - 20160724T100000 foo")
# # 11:12 gap here.
# self.t("track 20160724T101112 - 20160724T101213 bar")
# # 47:47 gap here.
# self.t("track 20160724T110000 - 20160724T120000 baz")
#
# # Eliminate gaps.
# code, out, err = self.t("fill @2")
# self.assertIn('Backfilled @2 to 2016-07-24T10:00:00', out)
# self.assertIn('Filled @2 to 2016-07-24T11:00:00', out)
def METHOD_NAME(self):
"""TI-75: The :fill hint not properly detecting the last interval"""
self.t("track 20170805T0100 - 20170805T0200 tag1")
self.t("track 20170805T0200 - 20170805T0300 tag2")
# Gap 0300 - 0400
self.t("start 20170805T0400 tag3")
code, out, err = self.t("summary :ids")
self.tap(out)
self.tap(err)
code, out, err = self.t("track :fill 20170805T0300 - 20170805T0330 tag4")
self.tap(out)
self.tap(err)
if __name__ == "__main__":
from simpletap import TAPTestRunner
unittest.main(testRunner=TAPTestRunner()) |
4,504 | update doc strings | # ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright (c) 2008-2022
# National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
#
# Test the path solver
#
import os
import pyomo.common.unittest as unittest
from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args
from pyomo.common.fileutils import this_file_dir, PYOMO_ROOT_DIR
import pyomo.opt
import pyomo.scripting.pyomo_main as pyomo_main
from pyomo.scripting.util import cleanup
# Ensure plugins are registered
import pyomo.environ
currdir = this_file_dir()
exdir = os.path.join(PYOMO_ROOT_DIR, 'examples', 'mpec')
solvers = pyomo.opt.check_available_solvers('path')
class CommonTests:
solve = True
solver = 'path'
def run_solver(self, *_args, **kwds):
if self.solve:
args = ['solve']
args.append('--solver=' + self.solver)
args.append('--save-results=result.yml')
args.append('--results-format=yaml')
args.append('--solver-options="lemke_start=automatic output_options=yes"')
else:
args = ['convert']
args.append('-c')
args.append('--symbolic-solver-labels')
args.append('--file-determinism=2')
if False:
args.append('--stream-solver')
args.append('--tempdir=' + currdir)
args.append('--keepfiles')
args.append('--logging=debug')
args = args + list(_args)
os.chdir(currdir)
print('***')
print(' '.join(args))
try:
output = pyomo_main.main(args)
except SystemExit:
output = None
except:
output = None
raise
cleanup()
print('***')
return output
def referenceFile(self, problem, solver):
return os.path.join(currdir, problem + '.txt')
def getObjective(self, fname):
FILE = open(fname, 'r')
data = yaml.load(FILE, **yaml_load_args)
FILE.close()
solutions = data.get('Solution', [])
ans = []
for x in solutions:
ans.append(x.get('Objective', {}))
return ans
def METHOD_NAME(self):
for key in dir(self):
if key.startswith('test'):
getattr(self, key).__doc__ = " (%s)" % getattr(self, key).__name__
def test_munson1a(self):
self.problem = 'test_munson1a'
self.run_solver(os.path.join(exdir, 'munson1a.py'))
self.check('munson1a', self.solver)
def test_munson1b(self):
self.problem = 'test_munson1b'
self.run_solver(os.path.join(exdir, 'munson1b.py'))
self.check('munson1b', self.solver)
def test_munson1c(self):
self.problem = 'test_munson1c'
self.run_solver(os.path.join(exdir, 'munson1c.py'))
self.check('munson1c', self.solver)
def test_munson1d(self):
self.problem = 'test_munson1d'
self.run_solver(os.path.join(exdir, 'munson1d.py'))
self.check('munson1d', self.solver)
def check(self, problem, solver):
refObj = self.getObjective(self.referenceFile(problem, solver))
ansObj = self.getObjective(os.path.join(currdir, 'result.yml'))
self.assertEqual(len(refObj), len(ansObj))
for i in range(len(refObj)):
self.assertEqual(len(refObj[i]), len(ansObj[i]))
if isinstance(refObj[i], str):
continue
for key, val in refObj[i].items():
self.assertAlmostEqual(
val['Value'], ansObj[i].get(key, None)['Value'], places=2
)
@unittest.skipIf(not yaml_available, "YAML is not available")
@unittest.skipIf(not 'path' in solvers, "The 'path' executable is not available")
class Solve_PATH(unittest.TestCase, CommonTests):
def tearDown(self):
if os.path.exists(os.path.join(currdir, 'result.yml')):
os.remove(os.path.join(currdir, 'result.yml'))
if __name__ == "__main__":
unittest.main() |
4,505 | test delete user member | # This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import logging
# Django
from django.contrib.auth.models import User
from django.urls import reverse
# wger
from wger.core.tests.base_testcase import WgerTestCase
logger = logging.getLogger(__name__)
class DeleteUserTestCase(WgerTestCase):
"""
Tests deleting the user account and all his data
"""
def delete_user(self, fail=False):
"""
Helper function
"""
response = self.client.get(reverse('core:user:delete'))
self.assertEqual(User.objects.filter(username='test').count(), 1)
if fail:
self.assertEqual(response.status_code, 302)
else:
self.assertEqual(response.status_code, 200)
# Wrong user password
if not fail:
response = self.client.post(
reverse('core:user:delete'),
{'password': 'not the user password'},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(User.objects.filter(username='test').count(), 1)
# Correct user password
response = self.client.post(reverse('core:user:delete'), {'password': 'testtest'})
self.assertEqual(response.status_code, 302)
if fail:
self.assertEqual(User.objects.filter(username='test').count(), 1)
else:
self.assertEqual(User.objects.filter(username='test').count(), 0)
def test_delete_user_logged_in(self):
"""
Tests deleting the own account as a logged in user
"""
self.user_login('test')
self.delete_user(fail=False)
def test_delete_user_anonymous(self):
"""
Tests deleting the own account as an anonymous user
"""
self.delete_user(fail=True)
class DeleteUserByAdminTestCase(WgerTestCase):
"""
Tests deleting a user account by a gym administrator
"""
def delete_user(self, fail=False):
"""
Helper function
"""
response = self.client.get(reverse('core:user:delete', kwargs={'user_pk': 2}))
self.assertEqual(User.objects.filter(username='test').count(), 1)
if fail:
self.assertIn(
response.status_code, (302, 403),
f'Unexpected status code for user {self.current_user}'
)
else:
self.assertEqual(
response.status_code, 200, f'Unexpected status code for user {self.current_user}'
)
# Wrong admin password
if not fail:
response = self.client.post(
reverse('core:user:delete', kwargs={'user_pk': 2}), {'password': 'blargh'}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(User.objects.filter(username='test').count(), 1)
# Correct user password
response = self.client.post(
reverse('core:user:delete', kwargs={'user_pk': 2}), {'password': self.current_password}
)
if fail:
self.assertIn(response.status_code, (302, 403))
self.assertEqual(User.objects.filter(username='test').count(), 1)
else:
self.assertEqual(response.status_code, 302)
self.assertEqual(User.objects.filter(username='test').count(), 0)
def test_delete_user_manager(self):
"""
Tests deleting the user account as a gym manager
"""
self.user_login('manager1')
self.delete_user(fail=False)
def test_delete_user_manager2(self):
"""
Tests deleting the user account as a gym manager
"""
self.user_login('manager2')
self.delete_user(fail=False)
def test_delete_user_general_manager(self):
"""
Tests deleting the user account as a general manager
"""
self.user_login('general_manager1')
self.delete_user(fail=False)
def test_delete_user_general_manager2(self):
"""
Tests deleting the user account as a general manager
"""
self.user_login('general_manager2')
self.delete_user(fail=False)
def test_delete_user(self):
"""
Tests deleting the user account as a regular user
"""
self.user_login('test')
self.delete_user(fail=True)
def test_delete_user_trainer(self):
"""
Tests deleting the user account as a gym trainer
"""
self.user_login('trainer1')
self.delete_user(fail=True)
def test_delete_user_trainer2(self):
"""
Tests deleting the user account as a gym trainer
"""
self.user_login('trainer4')
self.delete_user(fail=True)
def test_delete_user_trainer_other(self):
"""
Tests deleting the user account as a gym trainer of another gym
"""
self.user_login('trainer4')
self.delete_user(fail=True)
def test_delete_user_manager_other(self):
"""
Tests deleting the user account as a gym manager of another gym
"""
self.user_login('manager3')
self.delete_user(fail=True)
def METHOD_NAME(self):
"""
Tests deleting the user account as a gym member
"""
self.user_login('member1')
self.delete_user(fail=True)
def test_delete_user_member2(self):
"""
Tests deleting the user account as a gym member
"""
self.user_login('member4')
self.delete_user(fail=True)
def test_delete_user_anonymous(self):
"""
Tests deleting the user account as an anonymous user
"""
self.delete_user(fail=True) |
4,506 | is delta set | from time import time
import pytz
from _datetime import datetime, timedelta
from typing import Optional, Tuple
from pydantic import BaseModel
from enum import Enum
from tracardi.service.time import parse_date, parse_date_delta
class DatetimeType(str, Enum):
second = 'second'
minute = 'minute'
hour = 'hour'
day = 'day'
week = 'week'
month = 'month'
year = 'year'
class DateDeltaPayload(BaseModel):
value: int
entity: DatetimeType
def get_delta(self):
entity = self.entity
value = abs(self.value)
if entity in ['second', 'seconds']:
return timedelta(seconds=value)
elif entity in ['minute', 'minutes']:
return timedelta(minutes=value)
elif entity in ['hour', 'hours']:
return timedelta(hours=value)
elif entity in ['day', 'days']:
return timedelta(days=value)
elif entity in ['week', 'weeks']:
return timedelta(weeks=value)
elif entity in ['month', 'months']:
return timedelta(days=value * 31)
elif entity in ['year', 'years']:
return timedelta(days=value * 356)
return None
class DatetimePayload(BaseModel):
second: Optional[int] = None
minute: Optional[int] = None
hour: Optional[int] = None
date: Optional[int] = None
month: Optional[int] = None
year: Optional[int] = None
meridiem: Optional[str] = None
timeZone: int = 0
@staticmethod
def now():
now = datetime.utcnow()
return DatetimePayload(year=now.year, month=now.month, date=now.day,
hour=now.hour, minute=now.minute, second=now.second,
meridiem=now.strftime("%p"))
@staticmethod
def build(date: datetime) -> 'DatetimePayload':
return DatetimePayload(year=date.year, month=date.month, date=date.day,
hour=date.hour, minute=date.minute, second=date.second,
meridiem=date.strftime("%p"))
def is_set(self):
return self.year is not None \
and self.month is not None \
and self.date is not None \
and self.hour is not None \
and self.minute is not None \
and self.second is not None \
and self.meridiem is not None
def get_date(self) -> [datetime, None]:
if self.is_set():
return datetime(year=self.year,
month=self.month,
day=self.date,
hour=self.hour,
minute=self.minute,
second=self.second)
return None
def __str__(self):
return "{}/{}/{} {}:{}:{} {}{}{:02d}".format(
self.year,
self.month,
self.date,
self.hour,
self.minute,
self.second,
self.meridiem,
"+" if self.timeZone >= 0 else "-",
self.timeZone
)
class DatePayload(BaseModel):
delta: Optional[DateDeltaPayload] = None
absolute: Optional[DatetimePayload] = None
@staticmethod
def create(string: str) -> 'DatePayload':
if string == 'now':
return DatePayload()
else:
date = parse_date(string)
if date is not None:
return DatePayload(
absolute=DatetimePayload.build(date)
)
else:
delta = parse_date_delta(string)
if delta is not None:
date = datetime.fromtimestamp(time() + delta)
return DatePayload(
absolute=DatetimePayload.build(date)
)
raise ValueError(f"Could not parse date {string}")
def get_date(self) -> datetime:
if self.absolute is None:
absolute_date = datetime.now()
else:
absolute_date = self.absolute.get_date()
# If absolute date is None, Then use now
if absolute_date is None:
absolute_date = datetime.now()
# Get delta
if self.METHOD_NAME():
delta, sign = self._get_delta()
return absolute_date + (sign * delta)
return absolute_date
def is_absolute(self):
return self.absolute is not None and not self.METHOD_NAME()
def METHOD_NAME(self) -> bool:
return self.delta is not None
def _get_delta(self) -> (delta, int):
return self.delta.get_delta(), -1 if self.delta.value < 0 else 1
class DatetimeRangePayload(BaseModel):
minDate: Optional[DatePayload] = None
maxDate: Optional[DatePayload] = None
where: Optional[str] = ""
timeZone: Optional[str] = None
start: Optional[int] = 0
limit: Optional[int] = 20
rand: Optional[float] = 0
def get_dates(self) -> (datetime, datetime):
if self._is_now(self.minDate):
self.minDate = DatePayload(absolute=DatetimePayload.now())
if self._is_now(self.maxDate):
self.maxDate = DatePayload(absolute=DatetimePayload.now())
# Set Anchor date
if self._is_min_date_absolute() and not self._is_max_date_absolute():
self.maxDate.absolute = self.minDate.absolute
elif not self._is_min_date_absolute() and self._is_max_date_absolute():
self.minDate.absolute = self.maxDate.absolute
elif not self._is_min_date_absolute() and not self._is_max_date_absolute():
self.minDate.absolute = DatetimePayload.now()
self.maxDate.absolute = DatetimePayload.now()
min_date = self.minDate.get_date()
max_date = self.maxDate.get_date()
if min_date > max_date or min_date == max_date:
raise ValueError(
"Incorrect time range. From date `{}` is earlier then to date `{}` or dates are equal.".format(
min_date, max_date
))
return min_date, max_date
def _is_now(self, date: DatePayload):
return date is None or (date.absolute is None and date.delta is None)
def _is_min_date_absolute(self):
return self.minDate is None or self.minDate.is_absolute()
def _is_max_date_absolute(self):
return self.maxDate is None or self.maxDate.is_absolute()
@staticmethod
def convert_to_local_datetime(utc_datetime, timezone) -> Tuple[datetime, Optional[str]]:
try:
local_tz = pytz.timezone(timezone)
local_dt = utc_datetime.replace(tzinfo=pytz.utc).astimezone(local_tz)
return local_tz.normalize(local_dt), timezone # .normalize might be unnecessary
except pytz.exceptions.UnknownTimeZoneError as e:
# todo log error
return utc_datetime, None |
4,507 | fire and forget task | """ General utils
IMPORTANT: lowest level module
I order to avoid cyclic dependences, please
DO NOT IMPORT ANYTHING from .
"""
import asyncio
import logging
import os
from collections.abc import Awaitable, Coroutine, Iterable
from pathlib import Path
from typing import Any, Generator
import toolz
from pydantic import NonNegativeInt
_logger = logging.getLogger(__name__)
def is_production_environ() -> bool:
"""
If True, this code most probably
runs in a production container of one of the
osparc-simcore services.
"""
# WARNING: based on a convention that is not constantly verified
return os.environ.get("SC_BUILD_TARGET") == "production"
def get_http_client_request_total_timeout() -> int | None:
return int(os.environ.get("HTTP_CLIENT_REQUEST_TOTAL_TIMEOUT", "20")) or None
def get_http_client_request_aiohttp_connect_timeout() -> int | None:
return int(os.environ.get("HTTP_CLIENT_REQUEST_AIOHTTP_CONNECT_TIMEOUT", 0)) or None
def get_http_client_request_aiohttp_sock_connect_timeout() -> int | None:
return (
int(os.environ.get("HTTP_CLIENT_REQUEST_AIOHTTP_SOCK_CONNECT_TIMEOUT", "5"))
or None
)
def is_osparc_repo_dir(path: Path) -> bool:
# TODO: implement with git cli
expected = (".github", "packages", "services")
got = [p.name for p in path.iterdir() if p.is_dir()]
return all(d in got for d in expected)
def search_osparc_repo_dir(start: str | Path, max_iterations=8) -> Path | None:
"""Returns path to root repo dir or None if it does not exists
NOTE: assumes starts is a path within repo
"""
max_iterations = max(max_iterations, 1)
root_dir = Path(start)
iteration_number = 0
while not is_osparc_repo_dir(root_dir) and iteration_number < max_iterations:
root_dir = root_dir.parent
iteration_number += 1
return root_dir if is_osparc_repo_dir(root_dir) else None
# FUTURES
def METHOD_NAME(
obj: Coroutine,
*,
task_suffix_name: str,
fire_and_forget_tasks_collection: set[asyncio.Task],
) -> asyncio.Task:
task = asyncio.create_task(obj, name=f"fire_and_forget_task_{task_suffix_name}")
fire_and_forget_tasks_collection.add(task)
def log_exception_callback(fut: asyncio.Future):
try:
fut.result()
except asyncio.CancelledError:
_logger.warning("%s spawned as fire&forget was cancelled", fut)
except Exception: # pylint: disable=broad-except
_logger.exception("Error occurred while running task %s!", task.get_name())
task.add_done_callback(log_exception_callback)
task.add_done_callback(fire_and_forget_tasks_collection.discard)
return task
# // tasks
async def logged_gather(
*tasks: Awaitable[Any],
reraise: bool = True,
log: logging.Logger = _logger,
max_concurrency: int = 0,
) -> list[Any]:
"""
Thin wrapper around asyncio.gather that allows excuting ALL tasks concurently until the end
even if any of them fail. Finally, all errors are logged and the first raised (if reraise=True)
as asyncio.gather would do with return_exceptions=True
WARNING: Notice that not stopping after the first exception is raised, adds the
risk that some tasks might terminate with unhandled exceptions. To avoid this
use directly asyncio.gather(*tasks, return_exceptions=True).
:param reraise: reraises first exception (in order the tasks were passed) concurrent tasks, defaults to True
:param log: passing the logger gives a chance to identify the origin of the gather call, defaults to current submodule's logger
:return: list of tasks results and errors e.g. [1, 2, ValueError("task3 went wrong"), 33, "foo"]
"""
wrapped_tasks: tuple | list
if max_concurrency > 0:
semaphore = asyncio.Semaphore(max_concurrency)
async def sem_task(task: Awaitable[Any]) -> Any:
async with semaphore:
return await task
wrapped_tasks = [sem_task(t) for t in tasks]
else:
wrapped_tasks = tasks
results: list[Any] = await asyncio.gather(*wrapped_tasks, return_exceptions=True)
error = None
for i, value in enumerate(results):
if isinstance(value, Exception):
log.warning(
"Error in %i-th concurrent task %s: %s",
i + 1,
str(tasks[i]),
str(value),
)
if not error:
error = value
if reraise and error:
# WARNING: Notice that ONLY THE FIRST exception is raised.
# The rest is all logged above.
raise error
return results
def ensure_ends_with(input_string: str, char: str) -> str:
if not input_string.endswith(char):
input_string += char
return input_string
def partition_gen(
input_list: Iterable, *, slice_size: NonNegativeInt
) -> Generator[tuple[Any, ...], None, None]:
"""
Given an iterable and the slice_size yields tuples containing
slice_size elements in them.
Inputs:
input_list= [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
slice_size = 5
Outputs:
[(1, 2, 3, 4, 5), (6, 7, 8, 9, 10), (11, 12, 13)]
"""
if not input_list:
yield ()
yield from toolz.partition_all(slice_size, input_list) |
4,508 | isfilestore | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC, abstractmethod
from urllib.parse import urlparse
from typing import Union, List, Dict, Tuple, BinaryIO, TextIO, Iterator
from ...utils import stringify_path
path_type = Union[str, os.PathLike]
class FileSystem(ABC):
"""
Abstract filesystem interface
"""
@abstractmethod
def cat(self, path: path_type) -> bytes:
"""
Return contents of file as a bytes object
Parameters
----------
path : str or path-like
File path to read content from.
Returns
-------
contents : bytes
"""
@abstractmethod
def ls(self, path: path_type) -> List[path_type]:
"""
Return list of file paths
Returns
-------
paths : list
"""
@abstractmethod
def delete(self, path: path_type, recursive: bool = False):
"""
Delete the indicated file or directory
Parameters
----------
path : str
recursive : bool, default False
If True, also delete child paths for directories
"""
def disk_usage(self, path: path_type) -> int:
"""
Compute bytes used by all contents under indicated path in file tree
Parameters
----------
path : string
Can be a file path or directory
Returns
-------
usage : int
"""
path = stringify_path(path)
path_info = self.stat(path)
if path_info["type"] == "file":
return path_info["size"]
total = 0
for root, directories, files in self.walk(path):
for child_path in files:
abspath = self.path_join(root, child_path)
total += self.stat(abspath)["size"]
return total
def path_join(self, *args):
return self.pathsep.join(args)
def path_split(self, path):
"""
Split a pathname. Returns tuple "(head, tail)" where "tail" is everything after the final slash. Either part
may be empty.
Parameters
----------
path : string
Can be a file path or directory
Returns
-------
usage : int
"""
splits = path.rsplit(self.pathsep, 1)
if len(splits) == 1:
return "", splits[0]
else:
return splits
@abstractmethod
def stat(self, path: path_type) -> Dict:
"""
Information about a filesystem entry.
Returns
-------
stat : dict
"""
def rm(self, path: path_type, recursive: bool = False):
"""
Alias for FileSystem.delete
"""
return self.delete(path, recursive=recursive)
def mv(self, path, new_path):
"""
Alias for FileSystem.rename
"""
return self.rename(path, new_path)
@abstractmethod
def rename(self, path: path_type, new_path: path_type):
"""
Rename file, like UNIX mv command
Parameters
----------
path : string
Path to alter
new_path : string
Path to move to
"""
@abstractmethod
def mkdir(self, path: path_type, create_parents: bool = True):
"""
Create a directory.
Parameters
----------
path : str
Path to the directory.
create_parents : bool, default True
If the parent directories don't exists create them as well.
"""
@abstractmethod
def exists(self, path: path_type):
"""
Return True if path exists.
Parameters
----------
path : str
Path to check.
"""
@abstractmethod
def isdir(self, path: path_type) -> bool:
"""
Return True if path is a directory.
Parameters
----------
path : str
Path to check.
"""
@abstractmethod
def isfile(self, path: path_type) -> bool:
"""
Return True if path is a file.
Parameters
----------
path : str
Path to check.
"""
@abstractmethod
def METHOD_NAME(self) -> bool:
"""
Returns True if this FileSystem is a unix-style file store with
directories.
"""
@abstractmethod
def open(self, path: path_type, mode: str = "rb") -> Union[BinaryIO, TextIO]:
"""
Open file for reading or writing.
"""
@abstractmethod
def walk(self, path: path_type) -> Iterator[Tuple[str, List[str], List[str]]]:
"""
Directory tree generator.
Parameters
----------
path : str
Returns
-------
generator
"""
@abstractmethod
def glob(self, path: path_type, recursive: bool = False) -> List[path_type]:
"""
Return a list of paths matching a pathname pattern.
Parameters
----------
path : str
Pattern may contain simple shell-style wildcards
recursive : bool
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
Returns
-------
paths : List
"""
@property
def pathsep(self) -> str:
return "/"
@staticmethod
def parse_from_path(uri: str):
parsed_uri = urlparse(uri)
options = dict()
options["host"] = parsed_uri.netloc.rsplit("@", 1)[-1].rsplit(":", 1)[0]
if parsed_uri.port:
options["port"] = parsed_uri.port
if parsed_uri.username:
options["user"] = parsed_uri.username
if parsed_uri.password:
options["password"] = parsed_uri.password
return options
@classmethod
def get_storage_options(cls, storage_options: Dict, uri: str) -> Dict:
options = cls.parse_from_path(uri)
storage_options.update(options)
return storage_options |
4,509 | entropy hierarchical | import matplotlib.cm
import matplotlib.gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .entropy_sample import entropy_sample
from .optim_complexity_tolerance import complexity_tolerance
def METHOD_NAME(
signal, scale="default", dimension=2, tolerance="sd", show=False, **kwargs
):
"""**Hierarchical Entropy (HEn)**
Hierarchical Entropy (HEn) can be viewed as a generalization of the multiscale
decomposition used in :func:`multiscale entropy <entropy_multiscale>`, and the Haar wavelet
decomposition since it generate subtrees of the hierarchical tree. It preserves the strength of
the multiscale decomposition with additional components of higher frequency in different
scales. The hierarchical decomposition, unlike the wavelet decomposition, contains redundant
components, which makes it sensitive to the dynamical richness of the time series.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
scale : int
The maximum scale factor. Can only be a number of "default". Though it behaves a bit
differently here, see :func:`complexity_multiscale` for details.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
method : str
Method for symbolic sequence partitioning. Can be one of ``"MEP"`` (default),
``"linear"``, ``"uniform"``, ``"kmeans"``.
**kwargs : optional
Other keyword arguments (currently not used).
Returns
-------
SyDyEn : float
Symbolic Dynamic Entropy (SyDyEn) of the signal.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon, entropy_multiscale
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal
signal = nk.signal_simulate(duration=5, frequency=[97, 98, 100], noise=0.05)
# Compute Hierarchical Entropy (HEn)
@savefig p_entropy_hierarchical1.png scale=100%
hen, info = nk.entropy_hierarchical(signal, show=True, scale=5, dimension=3)
@suppress
plt.close()
References
----------
* Jiang, Y., Peng, C. K., & Xu, Y. (2011). Hierarchical entropy analysis for biological
signals. Journal of Computational and Applied Mathematics, 236(5), 728-742.
* Li, W., Shen, X., & Li, Y. (2019). A comparative study of multiscale sample entropy and
hierarchical entropy and its application in feature extraction for ship-radiated noise.
Entropy, 21(8), 793.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Get max scale
if isinstance(scale, str):
N = int(2 ** np.floor(np.log2(len(signal))))
# Max scale where N / (2 ** (scale - 1)) < 8
scale = 1
while N / (2 ** (scale - 1)) > 8 and scale < len(signal) / 2:
scale += 1
# Store parameters
info = {
"Scale": np.arange(1, scale + 1),
"Dimension": dimension,
"Tolerance": complexity_tolerance(
signal,
method=tolerance,
dimension=dimension,
show=False,
)[0],
}
# TODO: Simplify this code, make it clearer and step by step, following the paper more closely
Q, N = _hierarchical_decomposition(signal, scale=scale)
HEns = np.zeros(len(Q))
for T in range(len(Q)):
Temp = Q[T, : int(N / (2 ** (int(np.log2(T + 1)))))]
# This could be exposed to have different type of entropy estimators
HEns[T], _ = entropy_sample(Temp, delay=1, dimension=dimension, tolerance=info["Tolerance"])
Sn = np.zeros(scale)
for t in range(scale):
vals = HEns[(2 ** t) - 1 : (2 ** (t + 1)) - 1]
Sn[t] = np.mean(vals[np.isfinite(vals)])
# The HEn index is quantified as the area under the curve (AUC),
# which is like the sum normalized by the number of values. It's similar to the mean.
hen = np.trapz(Sn[np.isfinite(Sn)]) / len(Sn[np.isfinite(Sn)])
if show is True:
# Color normalization values by extending beyond the range of the mean values
colormin = np.min(Sn) - np.ptp(Sn) * 0.1
colormax = np.max(Sn) + np.ptp(Sn) * 0.1
plt.figure()
G = matplotlib.gridspec.GridSpec(10, 1)
ax1 = plt.subplot(G[:2, :])
ax1.plot(np.arange(1, scale + 1), Sn, color="black", zorder=0)
ax1.scatter(
np.arange(1, scale + 1),
Sn,
c=Sn,
zorder=1,
# Color map and color normalization values
cmap="spring",
vmin=colormin,
vmax=colormax,
)
ax1.set_xticks(np.arange(1, scale + 1))
ax1.set_xlabel("Scale Factor")
ax1.set_ylabel("Entropy")
ax1.set_title("Hierarchical Entropy")
N = 2 ** (scale - 1)
x = np.zeros(2 * N - 1, dtype=int)
x[0] = N
y = -1 * (scale - np.log2(np.arange(1, 2 * N)) // 1) + scale + 1
for k in range(1, 2 * N):
Q = int(np.log2(k) // 1)
P = int((k) // 2) - 1
if k > 1:
if k % 2:
x[k - 1] = x[P] + N / (2 ** Q)
else:
x[k - 1] = x[P] - N / (2 ** Q)
Edges = np.vstack((np.repeat(np.arange(1, N), 2), np.arange(2, 2 * N))).transpose() - 1
labx = ["".join(k) for k in np.round(HEns, 3).astype(str)]
ax2 = plt.subplot(G[3:, :])
for k in range(len(x) - 1):
ax2.plot(x[Edges[k, :]], y[Edges[k, :]], color="black", zorder=0)
ax2.annotate(labx[k], (x[k], y[k]), fontsize=8)
ax2.scatter(
x,
y,
c=HEns,
zorder=1,
# Color map and color normalization values
cmap="spring",
vmin=colormin,
vmax=colormax,
)
ax2.annotate(labx[-1], (x[-1], y[-1]), fontsize=8)
ax2.invert_yaxis()
ax2.set_ylabel("Scale Factor")
# return MSx, Sn, CI
return hen, info
def _hierarchical_decomposition(signal, scale=3):
N = int(2 ** np.floor(np.log2(len(signal))))
if N / (2 ** (scale - 1)) < 8:
raise Exception(
"Signal length is too short to estimate entropy at the lowest"
" subtree. Consider reducing the value of scale."
)
Q = np.zeros(((2 ** scale) - 1, N))
Q[0, :] = signal[:N]
p = 1
for k in range(scale - 1):
for n in range(2 ** k):
Temp = Q[(2 ** k) + n - 1, :]
# 1. We define an averaging operator Q0. It is the the low frequency component.
Q[p, : N // 2] = (Temp[::2] + Temp[1::2]) / 2
# 2. We define a difference frequency component. It is the the high frequency component.
Q[p + 1, : N // 2] = (Temp[::2] - Temp[1::2]) / 2
p += 2
return Q, N |
4,510 | time elapsed | # Copyright The Lightning AI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Timer ^^^^^"""
import logging
import time
from datetime import timedelta
from typing import Any, Dict, Optional, Union
import lightning.pytorch as pl
from lightning.pytorch.callbacks.callback import Callback
from lightning.pytorch.trainer.states import RunningStage
from lightning.pytorch.utilities import LightningEnum
from lightning.pytorch.utilities.exceptions import MisconfigurationException
from lightning.pytorch.utilities.rank_zero import rank_zero_info
log = logging.getLogger(__name__)
class Interval(LightningEnum):
step = "step"
epoch = "epoch"
class Timer(Callback):
"""The Timer callback tracks the time spent in the training, validation, and test loops and interrupts the Trainer
if the given time limit for the training loop is reached.
Args:
duration: A string in the format DD:HH:MM:SS (days, hours, minutes seconds), or a :class:`datetime.timedelta`,
or a dict containing key-value compatible with :class:`~datetime.timedelta`.
interval: Determines if the interruption happens on epoch level or mid-epoch.
Can be either ``"epoch"`` or ``"step"``.
verbose: Set this to ``False`` to suppress logging messages.
Raises:
MisconfigurationException:
If ``interval`` is not one of the supported choices.
Example::
from lightning.pytorch import Trainer
from lightning.pytorch.callbacks import Timer
# stop training after 12 hours
timer = Timer(duration="00:12:00:00")
# or provide a datetime.timedelta
from datetime import timedelta
timer = Timer(duration=timedelta(weeks=1))
# or provide a dictionary
timer = Timer(duration=dict(weeks=4, days=2))
# force training to stop after given time limit
trainer = Trainer(callbacks=[timer])
# query training/validation/test time (in seconds)
timer.time_elapsed("train")
timer.start_time("validate")
timer.end_time("test")
"""
def __init__(
self,
duration: Optional[Union[str, timedelta, Dict[str, int]]] = None,
interval: str = Interval.step,
verbose: bool = True,
) -> None:
super().__init__()
if isinstance(duration, str):
dhms = duration.strip().split(":")
dhms = [int(i) for i in dhms]
duration = timedelta(days=dhms[0], hours=dhms[1], minutes=dhms[2], seconds=dhms[3])
if isinstance(duration, dict):
duration = timedelta(**duration)
if interval not in set(Interval):
raise MisconfigurationException(
f"Unsupported parameter value `Timer(interval={interval})`. Possible choices are:"
f" {', '.join(set(Interval))}"
)
self._duration = duration.total_seconds() if duration is not None else None
self._interval = interval
self._verbose = verbose
self._start_time: Dict[RunningStage, Optional[float]] = {stage: None for stage in RunningStage}
self._end_time: Dict[RunningStage, Optional[float]] = {stage: None for stage in RunningStage}
self._offset = 0
def start_time(self, stage: str = RunningStage.TRAINING) -> Optional[float]:
"""Return the start time of a particular stage (in seconds)"""
stage = RunningStage(stage)
return self._start_time[stage]
def end_time(self, stage: str = RunningStage.TRAINING) -> Optional[float]:
"""Return the end time of a particular stage (in seconds)"""
stage = RunningStage(stage)
return self._end_time[stage]
def METHOD_NAME(self, stage: str = RunningStage.TRAINING) -> float:
"""Return the time elapsed for a particular stage (in seconds)"""
start = self.start_time(stage)
end = self.end_time(stage)
offset = self._offset if stage == RunningStage.TRAINING else 0
if start is None:
return offset
if end is None:
return time.monotonic() - start + offset
return end - start + offset
def time_remaining(self, stage: str = RunningStage.TRAINING) -> Optional[float]:
"""Return the time remaining for a particular stage (in seconds)"""
if self._duration is not None:
return self._duration - self.METHOD_NAME(stage)
return None
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._start_time[RunningStage.TRAINING] = time.monotonic()
def on_train_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._end_time[RunningStage.TRAINING] = time.monotonic()
def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._start_time[RunningStage.VALIDATING] = time.monotonic()
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._end_time[RunningStage.VALIDATING] = time.monotonic()
def on_test_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._start_time[RunningStage.TESTING] = time.monotonic()
def on_test_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._end_time[RunningStage.TESTING] = time.monotonic()
def on_fit_start(self, trainer: "pl.Trainer", *args: Any, **kwargs: Any) -> None:
# this checks the time after the state is reloaded, regardless of the interval.
# this is necessary in case we load a state whose timer is already depleted
if self._duration is None:
return
self._check_time_remaining(trainer)
def on_train_batch_end(self, trainer: "pl.Trainer", *args: Any, **kwargs: Any) -> None:
if self._interval != Interval.step or self._duration is None:
return
self._check_time_remaining(trainer)
def on_train_epoch_end(self, trainer: "pl.Trainer", *args: Any, **kwargs: Any) -> None:
if self._interval != Interval.epoch or self._duration is None:
return
self._check_time_remaining(trainer)
def state_dict(self) -> Dict[str, Any]:
return {"time_elapsed": {stage.value: self.METHOD_NAME(stage) for stage in RunningStage}}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
METHOD_NAME = state_dict.get("time_elapsed", {})
self._offset = METHOD_NAME.get(RunningStage.TRAINING.value, 0)
def _check_time_remaining(self, trainer: "pl.Trainer") -> None:
assert self._duration is not None
should_stop = self.METHOD_NAME() >= self._duration
should_stop = trainer.strategy.broadcast(should_stop)
trainer.should_stop = trainer.should_stop or should_stop
if should_stop and self._verbose:
elapsed = timedelta(seconds=int(self.METHOD_NAME(RunningStage.TRAINING)))
rank_zero_info(f"Time limit reached. Elapsed time is {elapsed}. Signaling Trainer to stop.") |
4,511 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetOfficeATPDataConnectorResult',
'AwaitableGetOfficeATPDataConnectorResult',
'get_office_atp_data_connector',
'get_office_atp_data_connector_output',
]
@pulumi.output_type
class GetOfficeATPDataConnectorResult:
"""
Represents OfficeATP (Office 365 Advanced Threat Protection) data connector.
"""
def __init__(__self__, data_types=None, etag=None, METHOD_NAME=None, kind=None, name=None, system_data=None, tenant_id=None, type=None):
if data_types and not isinstance(data_types, dict):
raise TypeError("Expected argument 'data_types' to be a dict")
pulumi.set(__self__, "data_types", data_types)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataTypes")
def data_types(self) -> Optional['outputs.AlertsDataTypeOfDataConnectorResponse']:
"""
The available data types for the connector.
"""
return pulumi.get(self, "data_types")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of the data connector
Expected value is 'OfficeATP'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant id to connect to, and get the data from.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetOfficeATPDataConnectorResult(GetOfficeATPDataConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetOfficeATPDataConnectorResult(
data_types=self.data_types,
etag=self.etag,
METHOD_NAME=self.METHOD_NAME,
kind=self.kind,
name=self.name,
system_data=self.system_data,
tenant_id=self.tenant_id,
type=self.type)
def get_office_atp_data_connector(data_connector_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOfficeATPDataConnectorResult:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230701preview:getOfficeATPDataConnector', __args__, opts=opts, typ=GetOfficeATPDataConnectorResult).value
return AwaitableGetOfficeATPDataConnectorResult(
data_types=pulumi.get(__ret__, 'data_types'),
etag=pulumi.get(__ret__, 'etag'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
tenant_id=pulumi.get(__ret__, 'tenant_id'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_office_atp_data_connector)
def get_office_atp_data_connector_output(data_connector_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetOfficeATPDataConnectorResult]:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
... |
4,512 | value | # coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import meta as _meta
__all__ = [
'PriorityClass',
]
@pulumi.output_type
class PriorityClass(dict):
"""
PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "apiVersion":
suggest = "api_version"
elif key == "globalDefault":
suggest = "global_default"
elif key == "preemptionPolicy":
suggest = "preemption_policy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PriorityClass. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PriorityClass.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PriorityClass.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
METHOD_NAME: int,
api_version: Optional[str] = None,
description: Optional[str] = None,
global_default: Optional[bool] = None,
kind: Optional[str] = None,
metadata: Optional['_meta.v1.outputs.ObjectMeta'] = None,
preemption_policy: Optional[str] = None):
"""
PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.
:param int value: value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.
:param str api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param str description: description is an arbitrary string that usually provides guidelines on when this priority class should be used.
:param bool global_default: globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.
:param str kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param '_meta.v1.ObjectMetaArgs' metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param str preemption_policy: preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.
"""
pulumi.set(__self__, "value", METHOD_NAME)
if api_version is not None:
pulumi.set(__self__, "api_version", 'scheduling.k8s.io/v1')
if description is not None:
pulumi.set(__self__, "description", description)
if global_default is not None:
pulumi.set(__self__, "global_default", global_default)
if kind is not None:
pulumi.set(__self__, "kind", 'PriorityClass')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if preemption_policy is not None:
pulumi.set(__self__, "preemption_policy", preemption_policy)
@property
@pulumi.getter
def METHOD_NAME(self) -> int:
"""
value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.
"""
return pulumi.get(self, "value")
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
description is an arbitrary string that usually provides guidelines on when this priority class should be used.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="globalDefault")
def global_default(self) -> Optional[bool]:
"""
globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.
"""
return pulumi.get(self, "global_default")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def metadata(self) -> Optional['_meta.v1.outputs.ObjectMeta']:
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter(name="preemptionPolicy")
def preemption_policy(self) -> Optional[str]:
"""
preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.
"""
return pulumi.get(self, "preemption_policy")
|
4,513 | deps | #!/usr/bin/env python3
"""
Script for initializing tests for queries related to glam-fenix-dev.
This only needs to be run once for the initial testing.
"""
import json
import shutil
import warnings
from collections import namedtuple
from multiprocessing import Pool
from pathlib import Path
import click
import yaml
from google.cloud import bigquery
from bigquery_etl.glam.utils import run
warnings.filterwarnings(
"ignore", "Your application has authenticated using end user credentials"
)
ROOT = Path(run("git rev-parse --show-toplevel"))
SQL_ROOT = ROOT / "sql" / "glam-fenix-dev" / "glam_etl"
TEST_ROOT = Path(__file__).parent
def list_queries(path):
"""List all of the queries in the glam_etl folder."""
return list([p for p in path.glob("*") if "__clients_daily" not in p.name])
def dryrun(sql):
"""Dry run a query and return the referenced tables.
This does not capture views, but only the tables referenced by those
views.
"""
client = bigquery.Client(project="glam-fenix-dev")
job = client.query(
sql,
job_config=bigquery.QueryJobConfig(
dry_run=True,
use_query_cache=False,
query_parameters=[
bigquery.ScalarQueryParameter("submission_date", "DATE", "2020-10-01"),
bigquery.ScalarQueryParameter("min_sample_id", "INT64", 0),
bigquery.ScalarQueryParameter("max_sample_id", "INT64", 99),
bigquery.ScalarQueryParameter("sample_size", "INT64", 100),
],
),
)
return job.referenced_tables
def _dryrun(query_path):
"""Return the dry run results from a path."""
return (query_path, dryrun(query_path.read_text()))
def calculate_dependencies(queries):
"""Create a list of dependencies between queries for bootstrapping schemas."""
with Pool(20) as p:
dryrun_references = p.map(_dryrun, queries)
res = []
for query, refs in dryrun_references:
for ref in refs:
res.append(
(
f"{ref.project}:{ref.dataset_id}.{ref.table_id}",
f"{query.parent.parent.parent.name}:"
f"{query.parent.parent.name}.{query.parent.name}",
)
)
return sorted(set(res))
@click.group()
def bootstrap():
"""Script for initializing tests for queries related to glam-fenix-dev."""
pass
@bootstrap.command()
@click.option("--output", default=TEST_ROOT / "dependencies.json")
def METHOD_NAME(output):
"""Create a dependency file with all links between queries and tables."""
path = Path(output)
METHOD_NAME = calculate_dependencies(
[p for p in SQL_ROOT.glob("**/*.sql") if "__clients_daily" not in p.name]
)
path.write_text(
json.dumps([dict(zip(["from", "to"], dep)) for dep in METHOD_NAME], indent=2)
)
@bootstrap.command()
@click.option(
"--dependency-file",
default=TEST_ROOT / "dependencies.json",
type=click.Path(dir_okay=False, exists=True),
)
def mermaid(dependency_file):
"""Generate a mermaid diagram of dependencies."""
METHOD_NAME = json.loads(Path(dependency_file).read_text())
# project, dataset, product, query
Node = namedtuple("node", ["qualified", "project", "dataset", "name", "product"])
def parse(x):
project, dataset_table = x.split(":")
dataset, table = dataset_table.split(".")
is_glam = dataset == "glam_etl"
return Node(
dataset_table if is_glam else x,
project,
dataset,
table.split("__")[1] if is_glam else table,
table.split("__")[0] if is_glam else None,
)
# build up mapping of nodes to parsed info
nodes = {}
for dep in METHOD_NAME:
src = dep["from"]
dst = dep["to"]
for x in [src, dst]:
if "*" not in x:
nodes[x] = parse(x)
print("graph LR")
projects = set(n.project for n in nodes.values())
for project in projects:
print(f'subgraph "{project}"')
datasets = set(n.dataset for n in nodes.values() if n.project == project)
for dataset in datasets:
print(f'subgraph "{dataset}"')
if dataset == "glam_etl":
products = set(
n.product for n in nodes.values() if n.dataset == "glam_etl"
)
for product in products:
print(f'subgraph "{product}"')
for n in set(
n
for n in nodes.values()
if n.dataset == "glam_etl" and n.product == product
):
print(f"{n.qualified}[{n.name}]")
print("end")
else:
for n in set(n for n in nodes.values() if n.dataset == dataset):
print(f"{n.qualified}[{n.name}]")
print("end")
print("end")
for dep in METHOD_NAME:
src = dep["from"]
dst = dep["to"]
if "*" in src:
for node in [
v for k, v in nodes.items() if k.startswith(src.replace("*", ""))
]:
print(f"{node.qualified} --> {nodes[dst].qualified}")
else:
print(f"{nodes[src].qualified} --> {nodes[dst].qualified}")
@bootstrap.command()
@click.option(
"--dependency-file",
default=TEST_ROOT / "dependencies.json",
type=click.Path(dir_okay=False, exists=True),
)
def skeleton(dependency_file):
"""Generate the skeleton for minimal tests.
Schemas are populated from the sql folder. Run the `deps` command to
generate a file of dependencies between queries.
"""
METHOD_NAME = json.loads(Path(dependency_file).read_text())
for query in list_queries(SQL_ROOT):
# only tests for org_mozilla_fenix_glam_nightly
if "org_mozilla_fenix_glam_nightly" not in query.name:
continue
print(f"copying schemas for {query.name}")
# views are also considered aggregates
path = TEST_ROOT / query.name / "test_minimal"
path.mkdir(parents=True, exist_ok=True)
# set query parameters
params = list(
map(
lambda x: dict(zip(["name", "type", "value"], x)),
[
("submission_date", "DATE", "2020-10-01"),
("min_sample_id", "INT64", 0),
("max_sample_id", "INT64", 99),
("sample_size", "INT64", 100),
],
)
)
with (path / "query_params.yaml").open("w") as fp:
yaml.dump(params, fp)
# now copy over the schemas
query_deps = [x["from"] for x in METHOD_NAME if query.name in x["to"]]
print(f"found dependencies: {json.dumps(query_deps, indent=2)}")
for dep in query_deps:
project = dep.split(":")[0]
dataset = dep.split(":")[1].split(".")[0]
table = dep.split(".")[1]
for schema in (ROOT / "sql" / project / dataset).glob(f"{table}/schema.*"):
print(f"copied dependency {schema}")
shutil.copyfile(
schema,
path
/ (
(
f"{project}.{dataset}."
if dataset != "glam_etl"
else f"{dataset}."
)
+ f"{schema.parent.name}.{schema.name}"
),
)
if __name__ == "__main__":
bootstrap() |
4,514 | test balance after tx | import os
import eth_utils
import rlp
import sys
sys.path.append("..")
from conflux.rpc import RpcClient
from test_framework.util import assert_equal, assert_raises_rpc_error, assert_greater_than, test_rpc_call_with_block_object
class TestGetBalance(RpcClient):
def test_genesis_account_balance(self):
addr = self.GENESIS_ADDR
balance = self.get_balance(addr)
assert_greater_than(balance, 0)
def test_address_not_exists(self):
addr = self.rand_addr()
balance = self.get_balance(addr)
assert_equal(0, balance)
def test_address_empty(self):
assert_raises_rpc_error(None, None, self.get_balance, "")
assert_raises_rpc_error(None, None, self.get_balance, "0x")
def test_address_too_short(self):
addr = self.rand_addr()
assert_raises_rpc_error(None, None, self.get_balance, addr[0:-2])
def test_address_too_long(self):
addr = self.rand_addr()
assert_raises_rpc_error(None, None, self.node.cfx_getBalance, addr + "6")
def test_address_lowercase(self):
addr = self.rand_addr()
balance = self.get_balance(addr.lower())
assert_equal(0, balance)
def test_address_uppercase(self):
addr = self.rand_addr()
balance = self.get_balance("0x" + addr[2:].upper())
assert_equal(0, balance)
def test_address_mixedcase(self):
addr = self.rand_addr()
addr = addr[0:-1].lower() + "A"
balance = self.get_balance(addr)
assert_equal(0, balance)
def test_epoch_earliest(self):
balance = self.get_balance(self.GENESIS_ADDR, self.EPOCH_EARLIEST)
assert_equal(balance, self.GENESIS_ORIGIN_COIN)
tx = self.new_tx()
self.send_tx(tx, True)
balance2 = self.get_balance(self.GENESIS_ADDR, self.EPOCH_EARLIEST)
assert_equal(balance2, self.GENESIS_ORIGIN_COIN)
def test_epoch_latest_state(self):
balance1 = self.get_balance(self.GENESIS_ADDR)
balance2 = self.get_balance(self.GENESIS_ADDR, self.EPOCH_LATEST_STATE)
assert_equal(balance1, balance2)
def test_epoch_latest_mined(self):
assert_raises_rpc_error(None, None, self.get_balance, self.GENESIS_ADDR, self.EPOCH_LATEST_MINED)
def test_epoch_num_too_large(self):
mined_epoch = self.epoch_number()
assert_raises_rpc_error(None, None, self.get_balance, self.GENESIS_ADDR, self.EPOCH_NUM(mined_epoch + 1))
stated_epoch = self.epoch_number(self.EPOCH_LATEST_STATE)
for num in range(stated_epoch + 1, mined_epoch):
assert_raises_rpc_error(None, None, self.get_balance, self.GENESIS_ADDR, self.EPOCH_NUM(num))
def METHOD_NAME(self):
addr = self.GENESIS_ADDR
original_balance = self.get_balance(addr)
# send a tx to change balance
tx = self.new_tx(value=789)
self.send_tx(tx, True)
# value + gas * price
cost = 789 + self.DEFAULT_TX_FEE
new_balance = self.get_balance(addr)
assert_equal(original_balance - cost, new_balance)
def test_pivot_chain_changed(self):
root = self.generate_block()
original_epoch = self.epoch_number()
original_balance = self.get_balance(self.GENESIS_ADDR)
# generate a tx to change the balance
tx = self.new_tx()
self.send_tx(tx, True)
num_blocks = self.epoch_number() - original_epoch
changed_balance = self.get_balance(self.GENESIS_ADDR)
assert_greater_than(original_balance, changed_balance)
# pivot changed without above tx
parent = root
for _ in range(0, num_blocks + 1):
parent = self.generate_block_with_parent(parent, [])
assert_equal(self.best_block_hash(), parent)
assert_equal(self.get_balance(self.GENESIS_ADDR), original_balance)
# generate a block on new pivot chain and refer the previous block
# that contains the above tx
self.wait_for_receipt(tx.hash_hex())
assert_equal(self.get_balance(self.GENESIS_ADDR), changed_balance)
def test_balance_with_block_object(self):
tx = self.new_tx(value=789)
cost = 789 + self.DEFAULT_TX_FEE
expected = self.get_balance(self.GENESIS_ADDR) - cost
test_rpc_call_with_block_object(
self,
[tx],
self.get_balance,
lambda x: x == expected,
[self.GENESIS_ADDR]
) |
4,515 | multi cursor select all occurrences | # vs title tracking requires an extension
# https://marketplace.visualstudio.com/items?itemName=mayerwin.RenameVisualStudioWindowTitle
# https://github.com/mayerwin/vs-customize-window-title (VS 2022 support in releases)
# I currently configure the extension as below
# Document (no solution) open: [documentName] - [ideName]
# No document or solution open: [idleName]
# Solution in break mode: [documentName] - [parentPath]\[solutionName] (Debugging) - [ideName]
# Solution in design mode: [documentName] - [parentPath]\[solutionName] - [ideName]
# Solution in running mode: [documentName] - [parentPath]\[solutionName] (Running) - [ideName]
from talon import Context, Module, actions
# is_mac = app.platform == "mac"
ctx = Context()
mod = Module()
apps = mod.apps
apps.visual_studio = """
os: windows
and app.name: Microsoft Visual Studio 2022
os: windows
and app.name: Microsoft Visual Studio 2019
os: windows
and app.name: devenv.exe
"""
ctx.matches = r"""
app: visual_studio
"""
from talon import Context, actions
ctx = Context()
ctx.matches = r"""
os: windows
app: visual_studio
"""
@ctx.action_class("app")
class AppActions:
# talon app actions
def tab_close():
actions.key("ctrl-f4")
def tab_next():
actions.key("ctrl-tab")
def tab_previous():
actions.key("ctrl-shift-tab")
def tab_reopen():
actions.key("ctrl-1 ctrl-r enter")
@ctx.action_class("code")
class CodeActions:
# talon code actions
def toggle_comment():
actions.key("ctrl-k ctrl-/")
@ctx.action_class("edit")
class EditActions:
# talon edit actions
def indent_more():
actions.key("tab")
def indent_less():
actions.key("shift-tab")
def save_all():
actions.key("ctrl-shift-s")
def find(text: str):
actions.key("ctrl-f")
actions.insert(text)
def line_swap_up():
actions.key("alt-up")
def line_swap_down():
actions.key("alt-down")
def line_clone():
actions.key("ctrl-d")
def jump_line(n: int):
actions.key("ctrl-g")
actions.sleep("100ms")
actions.insert(str(n))
actions.key("enter")
@ctx.action_class("win")
class WinActions:
def filename():
title = actions.win.title()
# this doesn't seem to be necessary on VSCode for Mac
# if title == "":
# title = ui.active_window().doc
result = title.split("-")[0].rstrip()
if "." in result:
# print(result)
return result
return ""
@ctx.action_class("user")
class UserActions:
# snippet.py support beginHelp close
def snippet_search(text: str):
"""TEST"""
actions.key("ctrl-k ctrl-x")
# def snippet_insert(text: str):
# """Inserts a snippet"""
# def snippet_create():
# """Triggers snippet creation"""
# snippet.py support end
# def select_word(verb: str):
# actions.key("ctrl-w")
# actions.user.perform_selection_action(verb)
# def select_next_occurrence(verbs: str, text: str):
# actions.edit.find(text)
# actions.sleep("100ms")
# actions.key("esc")
# if verbs is not None:
# actions.user.perform_selection_action(verbs)
# def select_previous_occurrence(verbs: str, text: str):
# actions.edit.find(text)
# actions.key("shift-enter")
# actions.sleep("100ms")
# actions.key("esc")
# if verbs is not None:
# actions.user.perform_selection_action(verbs)
# def go_to_line(verb: str, line: int):
# actions.key("ctrl-g")
# actions.insert(str(line))
# actions.key("enter")
# if verb is not None:
# actions.user.perform_movement_action(verb)
# def tab_jump(number: int):
# if number < 10:
# if is_mac:
# actions.key("ctrl-{}".format(number))
# else:
# actions.key("alt-{}".format(number))
# def tab_final():
# if is_mac:
# actions.key("ctrl-0")
# else:
# actions.key("alt-0")
# splits.py support begin
# def split_number(index: int):
# """Navigates to a the specified split"""
# if index < 9:
# if is_mac:
# actions.key("cmd-{}".format(index))
# else:
# actions.key("ctrl-{}".format(index))
# splits.py support end
# find_and_replace.py support begin
def find(text: str):
"""Triggers find in current editor"""
actions.key("ctrl-f")
if text:
actions.insert(text)
def find_next():
actions.key("enter")
def find_previous():
actions.key("shift-enter")
def find_everywhere(text: str):
"""Triggers find across project"""
actions.key("ctrl-shift-f")
if text:
actions.insert(text)
def find_toggle_match_by_case():
"""Toggles find match by case sensitivity"""
actions.key("alt-c")
def find_toggle_match_by_word():
"""Toggles find match by whole words"""
actions.key("alt-w")
def find_toggle_match_by_regex():
"""Toggles find match by regex"""
actions.key("alt-r")
def replace(text: str):
"""Search and replaces in the active editor"""
actions.key("ctrl-h")
if text:
actions.insert(text)
def replace_everywhere(text: str):
"""Search and replaces in the entire project"""
actions.key("ctrl-shift-h")
if text:
actions.insert(text)
def replace_confirm():
"""Confirm replace at current position"""
actions.key("alt-r")
def replace_confirm_all():
"""Confirm replace all"""
actions.key("alt-a")
def select_previous_occurrence(text: str):
actions.edit.find(text)
actions.key("shift-enter")
actions.sleep("100ms")
actions.key("esc")
def select_next_occurrence(text: str):
actions.edit.find(text)
actions.sleep("100ms")
actions.key("esc")
# find_and_replace.py support end
# multiple_cursor.py support begin
# note: visual studio has no explicit mode for multiple cursors; requires https://marketplace.visualstudio.com/items?itemName=VaclavNadrasky.MultiCaretBooster
def multi_cursor_add_above():
actions.key("shift-alt-up")
def multi_cursor_add_below():
actions.key("shift-alt-down")
# action(user.multi_cursor_add_to_line_ends): does not exist :(
def multi_cursor_disable():
actions.key("escape")
def multi_cursor_enable():
actions.skip()
def METHOD_NAME():
actions.key("shift-alt-;")
def multi_cursor_select_fewer_occurrences():
actions.key("shift-alt-k")
def multi_cursor_select_more_occurrences():
actions.key("shift-alt->") |
4,516 | transport main | @coroutine
def main(reactor, session):
# the session is joined and ready
result = yield session.call('com.example.add2', 2, 3)
print('result={}'.format(result))
# as we exit, this signals we are done with the session! the session
# can be recycled
if __name__ == '__main__':
client = Client(main=main)
react(client.run)
@coroutine
def setup(reactor, session):
# the session is joined and ready also!
def add2(a, b):
return a + b
yield session.register('com.example.add2', add2)
print('procedure registered')
# as we exit, this signals we are ready! the session must be kept.
if __name__ == '__main__':
client = Client(setup=setup)
react(client.run)
@coroutine
def client_main(reactor, client):
@coroutine
def METHOD_NAME(reactor, transport):
@coroutine
def session_main(reactor, session):
result = yield session.call('com.example.add2', 2, 3)
print('result={}'.format(result))
# returns when the session_main has finished (!), the session
# calls leave() or the underlying transport closes
yield transport.join(session_main, transport)
# returns when the transport_main won't reconnect
yield client.connect(METHOD_NAME)
if __name__ == '__main__':
client = Client(client_main=client_main)
react(client.run)
@coroutine
def session_main(reactor, session):
result = yield session.call('com.example.add2', 2, 3)
print('result={}'.format(result))
if __name__ == '__main__':
client = Client(session_main=session_main)
react(client.run)
@coroutine
def session_main(reactor, session):
def add2(a, b):
return a + b
yield session.register('com.example.add2', add2)
print('procedure registered')
txaio.return_value(txaio.create_future())
if __name__ == '__main__':
client = Client(session_main=session_main)
react(client.run)
@coroutine
def main1(reactor, session, details):
result = yield session.call('com.example.add2', 2, 3)
print('result={}'.format(result))
yield session.leave()
if __name__ == '__main__':
# hooking into on_join is the highest-level API -
# the user callback will fire with a joined session ready to use
# both the transport auto-reconnection logic and the session creation
# defaults in Client are reused
client = Client(on_join=main1)
react(client.run)
@coroutine
def main1(reactor, transport, details):
# transport.join() yields a joined session object when successful
session = yield transport.join(details.config.realm)
# the session is joined and can be used
result = yield session.call('com.example.add2', 2, 3)
print('result={}'.format(result))
yield session.leave()
if __name__ == '__main__':
# hooking into on_connect is a medium-level API -
# the user callback will fire with a connected transport which
# can be used to create new sessions from. the auto-reconnection
# logic in Client is reused. user code can reuse a transport while
# joining/leaving multiple times. with a multiplexing capable transport,
# user code may even create multiple concurrent sessions.
client = Client(on_open=main1)
react(client.run)
@coroutine
def main1(reactor, client, details):
# client.open() yields a connected transport when successful
transport = yield client.open()
# create a session running over the transport
session = yield transport.join(config.realm)
result = yield session.call('com.example.add2', 2, 3)
print('result={}'.format(result))
yield session.leave()
yield transport.close()
if __name__ == '__main__':
# hooking into on_create is a low-level API - the user callback
# will fire with a created client, and the user code can
# control the whole transport and session creation, connection and
# reconnection process.
client = Client(on_create=main1)
react(client.run)
@coroutine
def main1(reactor, client, config):
transport = yield client.open()
session = yield transport.join(config.realm)
result = yield session.call('com.example.add2', 2, 3)
print('result={}'.format(result))
yield session.leave()
yield transport.close()
if __name__ == '__main__':
# hooking into on_create is a low-level API - the user callback
# will fire with a created client, and the user code can
# control the whole transport and session creation, connection and
# reconnection process.
client = Client(on_create=main1)
react(client.run)
@coroutine
def main1(reactor, client, config):
while True:
delay = client.next_delay()
if delay:
yield sleep(delay)
else:
break
try:
# client.open() yields a connected WAMP transport
with yield client.open() as transport:
try:
with yield transport.join(config.realm) as session:
result = yield session.call('com.example.add2', 2, 3)
print('result={}'.format(result))
except Exception as e:
pass
except Exception as e:
pass
if __name__ == '__main__':
# hooking into on_create is a low-level API - the user callback
# will fire with a created client, and the user code can
# control the whole transport and session creation, connection and
# reconnection process.
client = Client(on_create=main1)
react(client.run)
@coroutine
def main2(reactor, connection):
# create a new transport from the connection
transport = yield connection.open()
# create a new session running on the transport
session = yield transport.join(connection.config.realm)
# now register a procedure
def add2(a, b):
return a + b
yield session.register('com.example.add2', add2)
# and call the procedure
result = yield session.call('com.example.add2', 2, 3)
print('result={}'.format(result))
# now leave the realm, which frees the underlying transport
# but freeze the session
yield session.leave(freeze=True)
# .. sleep, but not too long, otherwise router finally kills the session.
yield sleep(60)
# create a second, new transport from the connection
# this might be a 2nd TCP connection or a 2nd logical WAMP transport running
# over a single, multiplexed connection
transport2 = yield connection.open()
# now resume the session on the new transport. using the session token mechanism,
# the router will resume the session and deliver buffered events/calls to the
# resumed session
yield session.resume(transport2)
# create a 2nd session running over the 1st transport
session2 = transport.join(connection.config.realm)
# call the procedure registered on the (resumed) session running on transport2
result = yield session.call('com.example.add2', 2, 3)
print('result={}'.format(result))
# if the transport supports multiplexing, multiple session can run
# concurrently over the underlying transport
if transport.is_multiplexed:
session3 = yield transport.join(connection.config.realm)
# now finally leave sessions ..
yield session.leave()
yield session2.leave()
# .. and close the transports
yield transport.close()
yield transport2.close()
if __name__ == '__main__':
transports = [
{
'type': 'rawsocket',
'serializer': 'msgpack',
'endpoint': {
'type': 'unix',
'path': '/tmp/cb1.sock'
}
}
]
config = Config(realm='myrealm1')
connection = Connection(main, transports=transports, config=config)
react(connection.start) |
4,517 | fetch user badges | from typing import Optional
from discord.ext import commands
from common.models.db.item import Item
from common.models.db.user import User
from bot.cogs.core.database import Database
from bot.utils.misc import calc_total_wealth
from bot.villager_bot import VillagerBotCluster
class Badges(commands.Cog):
def __init__(self, bot: VillagerBotCluster):
self.bot = bot
self.d = bot.d
@property
def db(self) -> Database:
return self.bot.get_cog("Database")
async def METHOD_NAME(self, user_id) -> dict:
return dict(await self.db.METHOD_NAME(user_id))
async def update_user_badges(self, user_id, **kwargs):
await self.db.update_user_badges(user_id, **kwargs)
def emojify_badges(self, user_badges: dict) -> str:
emojis = []
for badge, value in dict(user_badges).items():
if not value:
continue
emoji_entry = self.d.emojis.badges[badge]
if isinstance(emoji_entry, list):
emojis.append(emoji_entry[value - 1])
else:
emojis.append(emoji_entry)
return " ".join(emojis)
async def update_badge_uncle_scrooge(
self, user_id: int, db_user: Optional[User] = None, user_items: list[Item] = None
) -> None:
badges = await self.METHOD_NAME(user_id)
if badges["uncle_scrooge"]:
return
if user_items is None:
user_items = await self.db.fetch_items(user_id)
if db_user is None:
db_user = await self.db.fetch_user(user_id)
total_wealth = calc_total_wealth(db_user, user_items)
if total_wealth > 100_000:
await self.update_user_badges(user_id, uncle_scrooge=True)
async def update_badge_collector(self, user_id: int, user_items: list[Item] = None) -> None:
# Levels are:
# I -> 16 unique items
# II -> 32 ||
# III -> 64 ||
# IV -> 128 ||
# V -> 256 ||
badges = await self.METHOD_NAME(user_id)
collector_level = badges["collector"]
if collector_level == 5:
return
if user_items is None:
user_items = await self.db.fetch_items(user_id)
user_items_len = len(user_items)
if collector_level < 5 and user_items_len >= 256:
await self.update_user_badges(user_id, collector=5)
elif collector_level < 4 and user_items_len >= 128:
await self.update_user_badges(user_id, collector=4)
elif collector_level < 3 and user_items_len >= 64:
await self.update_user_badges(user_id, collector=3)
elif collector_level < 2 and user_items_len >= 32:
await self.update_user_badges(user_id, collector=2)
elif collector_level < 1 and user_items_len >= 16:
await self.update_user_badges(user_id, collector=1)
async def update_badge_beekeeper(self, user_id: int, bees: int = None) -> None:
# levels are:
# I -> 100 bees
# II -> 1_000 bees
# III -> 100_000 bees
badges = await self.METHOD_NAME(user_id)
beekeeper_level = badges["beekeeper"]
if beekeeper_level == 3:
return
if bees is None:
bees = await self.db.fetch_item(user_id, "Jar Of Bees")
if bees is None:
bees = 0
else:
bees = bees.amount
if beekeeper_level < 3 and bees >= 100_000:
await self.update_user_badges(user_id, beekeeper=3)
elif beekeeper_level < 2 and bees >= 1000:
await self.update_user_badges(user_id, beekeeper=2)
elif beekeeper_level < 1 and bees >= 100:
await self.update_user_badges(user_id, beekeeper=1)
async def update_badge_pillager(self, user_id: int, pillaged_emeralds: int) -> None:
# levels are:
# I -> 100 emeralds stolen
# II -> 1_000 emeralds stolen
# III -> 100_000 emeralds stolen
badges = await self.METHOD_NAME(user_id)
pillager_level = badges["pillager"]
if pillager_level == 3:
return
if pillager_level < 3 and pillaged_emeralds >= 100_000:
await self.update_user_badges(user_id, pillager=3)
elif pillager_level < 2 and pillaged_emeralds >= 1_000:
await self.update_user_badges(user_id, pillager=2)
elif pillager_level < 1 and pillaged_emeralds >= 100:
await self.update_user_badges(user_id, pillager=1)
async def update_badge_murderer(self, user_id: int, murders: int) -> None:
# levels are:
# I -> 100 mobs cruelly genocided
# II -> 1_000 mobs cruelly genocided
# III -> 10_000 mobs cruelly genocided
badges = await self.METHOD_NAME(user_id)
murderer_level = badges["murderer"]
if murderer_level == 3:
return
if murderer_level < 3 and murders >= 10_000:
await self.update_user_badges(user_id, murderer=3)
elif murderer_level < 2 and murders >= 1_000:
await self.update_user_badges(user_id, murderer=2)
elif murderer_level < 1 and murders >= 100:
await self.update_user_badges(user_id, murderer=1)
async def update_badge_fisherman(self, user_id: int, fishies_fished: int) -> None:
# levels are:
# I -> 100 fishies fished (cod)
# II -> 1_000 fishies fished (tropical)
# III -> 10_000 fishies fished (rainbow trout)
# IV -> 100_000 fishies fished (emerald)
badges = await self.METHOD_NAME(user_id)
fisherman_level = badges["fisherman"]
if fisherman_level == 4:
return
if fisherman_level < 4 and fishies_fished >= 100_000:
await self.update_user_badges(user_id, fisherman=4)
elif fisherman_level < 3 and fishies_fished >= 10_000:
await self.update_user_badges(user_id, fisherman=3)
elif fisherman_level < 2 and fishies_fished >= 1_000:
await self.update_user_badges(user_id, fisherman=2)
elif fisherman_level < 1 and fishies_fished >= 100:
await self.update_user_badges(user_id, fisherman=1)
async def update_badge_enthusiast(self, user_id: int, commands_ran: int) -> None:
# levels are:
# I -> 10_000 commands ran
# II -> 1_000_000 commands ran
# III -> 100_000_000 commands ran
badges = await self.METHOD_NAME(user_id)
enthusiast_level = badges["enthusiast"]
if enthusiast_level < 3 and commands_ran > 10_000:
await self.update_user_badges(user_id, enthusiast=3)
elif enthusiast_level < 2 and commands_ran > 1_000_000:
await self.update_user_badges(user_id, enthusiast=2)
elif enthusiast_level < 1 and commands_ran > 100_000_000:
await self.update_user_badges(user_id, enthusiast=1)
async def setup(bot: VillagerBotCluster) -> None:
await bot.add_cog(Badges(bot)) |
4,518 | set up class | import dns
import os
import subprocess
import time
from authtests import AuthTest
from xfrserver.xfrserver import AXFRServer
zones = {
1: ["""
$ORIGIN example.""","""
@ 86400 SOA foo bar 1 2 3 4 5
@ 4242 NS ns1.example.
@ 4242 NS ns2.example.
ns1.example. 4242 A 192.0.2.1
ns2.example. 4242 A 192.0.2.2
"""],
2: ["""
$ORIGIN example.""","""
@ 86400 SOA foo bar 2 2 3 4 5
@ 4242 NS ns1.example.
@ 4242 NS ns2.example.
ns1.example. 4242 A 192.0.2.1
ns2.example. 4242 A 192.0.2.2
newrecord.example. 8484 A 192.0.2.42
"""],
3: ["""
$ORIGIN example.""","""
@ 86400 SOA foo bar 3 2 3 4 5""","""
@ 86400 SOA foo bar 2 2 3 4 5""","""
@ 86400 SOA foo bar 3 2 3 4 5""","""
@ 4242 NS ns3.example.
"""],
5: ["""
$ORIGIN example.""","""
@ 86400 SOA foo bar 5 2 3 4 5""","""
@ 86400 SOA foo bar 3 2 3 4 5""","""
@ 86400 SOA foo bar 4 2 3 4 5""","""
@ 86400 SOA foo bar 4 2 3 4 5""","""
@ 86400 SOA foo bar 5 2 3 4 5""","""
@ 4242 NS ns5.example.
"""],
8: ["""
$ORIGIN example.""","""
@ 86400 SOA foo bar 8 2 3 4 5""","""
@ 86400 SOA foo bar 5 2 3 4 5""","""
@ 86400 SOA foo bar 6 2 3 4 5""","""
@ 86400 SOA foo bar 6 2 3 4 5""","""
@ 86400 SOA foo bar 7 2 3 4 5""","""
@ 86400 SOA foo bar 7 2 3 4 5""","""
@ 86400 SOA foo bar 8 2 3 4 5""","""
"""]
}
xfrServerPort = 4244
xfrServer = AXFRServer(xfrServerPort, zones)
class TestIXFR(AuthTest):
_config_template = """
launch=gsqlite3 bind
gsqlite3-database=configs/auth/powerdns.sqlite
gsqlite3-dnssec
slave
slave-cycle-interval=1
query-cache-ttl=20
negquery-cache-ttl=60
"""
_zones = {}
global xfrServerPort
_xfrDone = 0
@classmethod
def METHOD_NAME(cls):
super(TestIXFR, cls).METHOD_NAME()
os.system("$PDNSUTIL --config-dir=configs/auth create-slave-zone example. 127.0.0.1:%s" % (xfrServerPort,))
os.system("$PDNSUTIL --config-dir=configs/auth set-meta example. IXFR 1")
def waitUntilCorrectSerialIsLoaded(self, serial, timeout=10):
global xfrServer
xfrServer.moveToSerial(serial)
attempts = 0
while attempts < timeout:
print('attempts=%s timeout=%s' % (attempts, timeout))
servedSerial = xfrServer.getServedSerial()
print('servedSerial=%s' % servedSerial)
if servedSerial > serial:
raise AssertionError("Expected serial %d, got %d" % (serial, servedSerial))
if servedSerial == serial:
self._xfrDone = self._xfrDone + 1
time.sleep(1)
return
attempts = attempts + 1
time.sleep(1)
raise AssertionError("Waited %d seconds for the serial to be updated to %d but the last served serial is still %d" % (timeout, serial, servedSerial))
def checkFullZone(self, serial, data=None):
global zones
# FIXME: 90% duplication from _getRecordsForSerial
zone = []
if not data:
data = zones[serial]
for i in dns.zone.from_text('\n'.join(data), relativize=False).iterate_rdatasets():
n, rds = i
rrs=dns.rrset.RRset(n, rds.rdclass, rds.rdtype)
rrs.update(rds)
zone.append(rrs)
expected =[[zone[0]], sorted(zone[1:], key=lambda rrset: (rrset.name, rrset.rdtype)), [zone[0]]] # AXFRs are SOA-wrapped
query = dns.message.make_query('example.', 'AXFR')
res = self.sendTCPQueryMultiResponse(query, count=len(expected))
answers = [r.answer for r in res]
answers[1].sort(key=lambda rrset: (rrset.name, rrset.rdtype))
self.assertEqual(answers, expected)
def checkIXFR(self, fromserial, toserial):
global zones, xfrServer
ixfr = []
soa1 = xfrServer._getSOAForSerial(fromserial)
soa2 = xfrServer._getSOAForSerial(toserial)
newrecord = [r for r in xfrServer._getRecordsForSerial(toserial) if r.name==dns.name.from_text('newrecord.example.')]
query = dns.message.make_query('example.', 'IXFR')
query.authority = [soa1]
expected = [[soa2], [soa1], [soa2], newrecord, [soa2]]
res = self.sendTCPQueryMultiResponse(query, count=len(expected))
answers = [r.answer for r in res]
# answers[1].sort(key=lambda rrset: (rrset.name, rrset.rdtype))
self.assertEqual(answers, expected)
# check the TTLs
answerPos = 0
for expectedAnswer in expected:
pos = 0
for rec in expectedAnswer:
self.assertEqual(rec.ttl, answers[answerPos][pos].ttl)
pos = pos + 1
answerPos = answerPos + 1
def test_a_XFR(self):
self.waitUntilCorrectSerialIsLoaded(1)
self.checkFullZone(1)
self.waitUntilCorrectSerialIsLoaded(2)
self.checkFullZone(2)
self.waitUntilCorrectSerialIsLoaded(3)
self.checkFullZone(3, data=["""
$ORIGIN example.""","""
@ 86400 SOA foo bar 3 2 3 4 5
@ 4242 NS ns1.example.
@ 4242 NS ns2.example.
@ 4242 NS ns3.example.
ns1.example. 4242 A 192.0.2.1
ns2.example. 4242 A 192.0.2.2
newrecord.example. 8484 A 192.0.2.42
"""])
self.waitUntilCorrectSerialIsLoaded(5)
self.checkFullZone(5, data=["""
$ORIGIN example.""","""
@ 86400 SOA foo bar 5 2 3 4 5
@ 4242 NS ns1.example.
@ 4242 NS ns2.example.
@ 4242 NS ns3.example.
@ 4242 NS ns5.example.
ns1.example. 4242 A 192.0.2.1
ns2.example. 4242 A 192.0.2.2
newrecord.example. 8484 A 192.0.2.42
"""])
# _b_ because we expect post-XFR testing state
def test_b_UDP_SOA_existing(self):
query = dns.message.make_query('example.', 'SOA')
expected = dns.message.make_response(query)
expected.answer.append(xfrServer._getSOAForSerial(5))
expected.flags |= dns.flags.AA
response = self.sendUDPQuery(query)
self.assertEqual(expected, response)
# check the TTLs
pos = 0
for rec in expected.answer:
self.assertEqual(rec.ttl, response.answer[pos].ttl)
pos = pos + 1
def test_b_UDP_SOA_not_loaded(self):
query = dns.message.make_query('example2.', 'SOA')
expected = dns.message.make_response(query)
expected.set_rcode(dns.rcode.REFUSED)
response = self.sendUDPQuery(query)
self.assertEqual(expected, response)
def test_b_UDP_SOA_not_configured(self):
query = dns.message.make_query('example3.', 'SOA')
expected = dns.message.make_response(query)
expected.set_rcode(dns.rcode.REFUSED)
response = self.sendUDPQuery(query)
self.assertEqual(expected, response)
def test_d_XFR(self):
self.waitUntilCorrectSerialIsLoaded(8)
self.checkFullZone(7, data=["""
$ORIGIN example.""","""
@ 86400 SOA foo bar 8 2 3 4 5
@ 4242 NS ns1.example.
@ 4242 NS ns2.example.
@ 4242 NS ns3.example.
@ 4242 NS ns5.example.
ns1.example. 4242 A 192.0.2.1
ns2.example. 4242 A 192.0.2.2
newrecord.example. 8484 A 192.0.2.42
"""])
ret = subprocess.check_output([os.environ['PDNSUTIL'],
'--config-dir=configs/auth',
'list-zone', 'example'], stderr=subprocess.STDOUT)
rets = ret.split(b'\n')
self.assertEqual(1, sum(b'SOA' in l for l in ret.split(b'\n'))) |
4,519 | num cells | #!/usr/bin/env python3
import arbor
import pandas
import seaborn
import matplotlib.pyplot as plt
# Construct chains of cells linked with gap junctions,
# Chains are connected by synapses.
# An event generator is attached to the first cell in the network.
#
# c --gj-- c --gj-- c --gj-- c --gj-- c
# |
# syn
# |
# c --gj-- c --gj-- c --gj-- c --gj-- c
#
# The individual cells consist of a soma and one dendrite
def make_cable_cell(gid):
# Build a segment tree
tree = arbor.segment_tree()
# Soma with radius 5 μm and length 2 * radius = 10 μm, (tag = 1)
s = tree.append(
arbor.mnpos, arbor.mpoint(-10, 0, 0, 5), arbor.mpoint(0, 0, 0, 5), tag=1
)
# Single dendrite with radius 2 μm and length 40 μm, (tag = 2)
tree.append(s, arbor.mpoint(0, 0, 0, 2), arbor.mpoint(40, 0, 0, 2), tag=2)
# Label dictionary for cell components
labels = arbor.label_dict(
{
# Mark location for synapse site at midpoint of dendrite (branch 0 soma + dendrite)
"synapse_site": "(location 0 0.6)",
# Gap junction site at connection point of soma and dendrite
"gj_site": "(location 0 0.2)",
# Label root of the tree
"root": "(root)",
}
).add_swc_tags()
# Paint dynamics onto the cell, hh on soma and passive properties on dendrite
decor = (
arbor.decor()
.paint('"soma"', arbor.density("hh"))
.paint('"dend"', arbor.density("pas"))
# Attach one synapse and gap junction each on their labeled sites
.place('"synapse_site"', arbor.synapse("expsyn"), "syn")
.place('"gj_site"', arbor.junction("gj"), "gj")
# Attach detector to cell root
.place('"root"', arbor.threshold_detector(-10), "detector")
)
return arbor.cable_cell(tree, decor, labels)
# Create a recipe that generates connected chains of cells
class chain_recipe(arbor.recipe):
def __init__(self, ncells_per_chain, nchains):
arbor.recipe.__init__(self)
self.nchains = nchains
self.ncells_per_chain = ncells_per_chain
self.props = arbor.neuron_cable_properties()
def METHOD_NAME(self):
return self.ncells_per_chain * self.nchains
def cell_description(self, gid):
return make_cable_cell(gid)
def cell_kind(self, gid):
return arbor.cell_kind.cable
# Create synapse connection between last cell of one chain and first cell of following chain
def connections_on(self, gid):
if (gid == 0) or (gid % self.ncells_per_chain > 0):
return []
else:
src = gid - 1
w = 0.05
d = 10
return [arbor.connection((src, "detector"), "syn", w, d)]
# Create gap junction connections between a cell within a chain and its neighbor(s)
def gap_junctions_on(self, gid):
conns = []
chain_begin = int(gid / self.ncells_per_chain) * self.ncells_per_chain
chain_end = chain_begin + self.ncells_per_chain
next_cell = gid + 1
prev_cell = gid - 1
if next_cell < chain_end:
conns.append(arbor.gap_junction_connection((gid + 1, "gj"), "gj", 0.015))
if prev_cell >= chain_begin:
conns.append(arbor.gap_junction_connection((gid - 1, "gj"), "gj", 0.015))
return conns
# Event generator at first cell
def event_generators(self, gid):
if gid == 0:
sched = arbor.explicit_schedule([1])
weight = 0.1
return [arbor.event_generator("syn", weight, sched)]
return []
# Place a probe at the root of each cell
def probes(self, gid):
return [arbor.cable_probe_membrane_voltage('"root"')]
def global_properties(self, kind):
return self.props
# Number of cells per chain
ncells_per_chain = 5
# Number of chains
nchains = 3
# Total number of cells
ncells = nchains * ncells_per_chain
# Instantiate recipe
recipe = chain_recipe(ncells_per_chain, nchains)
# Create a default simulation
sim = arbor.simulation(recipe)
# Set spike generators to record
sim.record(arbor.spike_recording.all)
# Sampler
handles = [sim.sample((gid, 0), arbor.regular_schedule(0.1)) for gid in range(ncells)]
# Run simulation for 100 ms
sim.run(100)
print("Simulation finished")
# Print spike times
print("spikes:")
for sp in sim.spikes():
print(" ", sp)
# Plot the results
print("Plotting results ...")
df_list = []
for gid in range(ncells):
samples, meta = sim.samples(handles[gid])[0]
df_list.append(
pandas.DataFrame(
{"t/ms": samples[:, 0], "U/mV": samples[:, 1], "Cell": f"cell {gid}"}
)
)
df = pandas.concat(df_list, ignore_index=True)
seaborn.relplot(data=df, kind="line", x="t/ms", y="U/mV", hue="Cell", errorbar=None)
plt.show() |
4,520 | harmonize countries | """
This code generates the garden step with the MPI dataset for both harmonized over time and
current margin estimates for the variables MPI, share of MPI poor and intensity of poverty.
"""
import pandas as pd
from owid.catalog import Dataset, Table
from structlog import get_logger
from etl.data_helpers import geo
from etl.helpers import PathFinder
from etl.paths import DATA_DIR
MEADOW_VERSION = "2022-12-13"
log = get_logger()
# naming conventions
N = PathFinder(__file__)
def run(dest_dir: str) -> None:
log.info("multidimensional_poverty_index.start")
# read dataset from meadow
ds_meadow = Dataset(DATA_DIR / f"meadow/ophi/{MEADOW_VERSION}/multidimensional_poverty_index")
tb_meadow = ds_meadow["multidimensional_poverty_index"]
df = pd.DataFrame(tb_meadow)
# %% [markdown]
# ### Note on `year`
#
# The way `year` is formatted – as a string variable often spanning two calendar years – won't work with our schema. We have to map the data to a single (integer) year.
#
# For now, arbitrarily, I take the first year in these cases and convert to integer.
# %%
# First year = first 4 characters of the year string
df["year"] = df["year"].str[:4].astype(int)
# %% [markdown]
# ## Multi-dimesional poverty measures
#
# At least initially, we will be primarily concerned with the three measures that relate to overall multi-dimensional poverty:
# - `Headcount ratio`: the share of population in multidimensional poverty
# - `Intensity`: a measure of the average depth of poverty (of the poor only – NB, not like the World Bank's poverty gap index)
# - `MPI`: the product of `Headcount ratio` and `Intensity`.
#
# These are multi-dimensional poverty measures – a weighted aggregation across many individual indicators.
# Here I prepare this data as I would for uploading to OWID grapher and visualize it – including both `hot` and `cme` data in the same file.
# %%
# Prep data for garden
# Modify variable names
df = df.replace({"M0": "mpi", "H": "share", "A": "intensity"})
# filter for main multi-dimensional pov measures
df = df[df["measure"].isin(["mpi", "share", "intensity"])].reset_index(drop=True)
# pivot to wide format
df = df.pivot_table(index=["country", "year"], columns=["flav", "measure", "area_lab"], values="b").reset_index()
# collapse multi-level index into single column names
df.columns = [" ".join(col).strip().replace(" ", "_") for col in df.columns.values]
# Format column names, making it all lowercase
df.columns = df.columns.str.lower() # type: ignore
log.info("multidimensional_poverty_index.harmonize_countries")
df = METHOD_NAME(df)
# create new dataset with the same metadata as meadow
ds_garden = Dataset.create_empty(dest_dir)
tb_garden = Table(df)
# update metadata from yaml file
ds_garden.metadata.update_from_yaml(N.metadata_path)
tb_garden.update_metadata_from_yaml(N.metadata_path, "multidimensional_poverty_index")
# For now the variable descriptions are stored as a list of strings, this transforms them into a single string
for col in tb_garden.columns:
if isinstance(tb_garden[col].metadata.description, list):
tb_garden[col].metadata.description = "\n".join(tb_garden[col].metadata.description)
ds_garden.add(tb_garden)
ds_garden.save()
log.info("multidimensional_poverty_index.end")
def METHOD_NAME(df: pd.DataFrame) -> pd.DataFrame:
unharmonized_countries = df["country"]
df = geo.METHOD_NAME(df=df, countries_file=str(N.country_mapping_path))
missing_countries = set(unharmonized_countries[df.country.isnull()])
if any(missing_countries):
raise RuntimeError(
"The following raw country names have not been harmonized. "
f"Please: (a) edit {N.country_mapping_path} to include these country "
f"names; or (b) add them to {N.excluded_countries_path}."
f"Raw country names: {missing_countries}"
)
return df |
4,521 | save | # -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2015 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""
Classes responsible for handling user configuration
"""
import configparser
import os
import re
import logging
from GTG.core.dirs import CONFIG_DIR
log = logging.getLogger(__name__)
DEFAULTS = {
'browser': {
"bg_color_enable": True,
"contents_preview_enable": False,
'tag_pane': True,
"sidebar_width": 120,
'collapsed_tasks': [],
'expanded_tags': [],
'view': 'default',
"opened_tasks": [],
'width': 400,
'height': 400,
'tasklist_sort_column': 5,
'tasklist_sort_order': 1,
"font_name": "",
'hour': "00",
'min': "00",
'autoclean': True,
'autoclean_days': 30,
'dark_mode': False,
'maximized': False,
},
'tag_editor': {
"custom_colors": [],
},
'plugins': {
"enabled": [],
"disabled": [],
},
'task': {
'position': [],
'size': [],
},
'backend': {}
}
def open_config_file(config_file):
""" Opens config file and makes additional checks
Creates config file if it doesn't exist and makes sure it is readable and
writable by user. That prevents surprise when user is not able to save
configuration when exiting the app.
"""
dirname = os.path.dirname(config_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
if not os.path.exists(config_file):
open(config_file, "w").close()
if not os.access(config_file, os.R_OK | os.W_OK):
raise Exception("File " + config_file + " is a configuration file "
"for gtg, but it cannot be read or written. "
"Please check it")
config = configparser.ConfigParser(interpolation=None)
try:
config.read(config_file)
except configparser.Error as e:
log.warning("Problem with opening file %s: %s", config_file, e)
return config
class SectionConfig():
""" Configuration only for a section (system or a task) """
def __init__(self, section_name, section, defaults, save_function):
""" Initiatizes section config:
- section_name: name for writing error logs
- section: section of the config handled by this object
- defaults: dictionary of default values
- save_function: function to be called to save changes (this function
needs to save the whole config)
"""
self._section_name = section_name
self._section = section
self._defaults = defaults
self._save_function = save_function
def _getlist(self, option):
""" Parses string representation of list from configuration
List can't contain an empty value as those are skipped over,
e.g. "a, ,b" is parsed as ['a', 'b'].
Accepted formats:
- "('a', 'b'),('c','d','e')" => ["('a', 'b')", "('c','d','e')"]
- "a, b" => ['a', 'b']
"""
raw = self._section.get(option)
if not raw:
return None
# Match tuples in format "('string1', 'string2', ...)"
values = re.findall(r'\(.*?\)', raw)
if not values:
# It only normal list
values = raw.split(',')
return [item.strip() for item in values if item]
def _type_function(self, default_value):
""" Returns function that returns correct type of value """
default_type = type(default_value)
if default_type in (list, tuple):
return self._getlist
elif default_type == int:
return self._section.getint
elif default_type == bool:
return self._section.getboolean
else:
return self._section.get
def get(self, option):
""" Get option from configuration.
If the option is not specified in the configuration or is of invalid
type, return default value. If there is no default value,
None is returned
"""
default_value = self._defaults.get(option)
get_function = self._type_function(default_value)
try:
value = get_function(option)
except ValueError as error:
value = None
log.warning('Invalid configuration value "%s" for %s in %s: %s',
self._section.get(option), option, self._section_name,
error)
if value is None and default_value is None:
raise ValueError(
'No valid configuration value or default value was '
'found for %s in %s'.format(option, self._section_name))
elif value is None:
return default_value
else:
return value
def set(self, option, value):
if type(value) in (list, tuple):
value = ','.join(str(item) for item in value)
else:
value = str(value)
self._section[option] = value
# Immediately save the configuration
self.METHOD_NAME()
def METHOD_NAME(self):
self._save_function()
class CoreConfig():
""" Class holding configuration to all systems and tasks """
def __init__(self):
self._conf_path = os.path.join(CONFIG_DIR, 'gtg.conf')
self._conf = open_config_file(self._conf_path)
self._task_conf_path = os.path.join(CONFIG_DIR, 'tasks.conf')
self._task_conf = open_config_file(self._task_conf_path)
self._backends_conf_path = os.path.join(CONFIG_DIR, 'backends.conf')
self._backends_conf = open_config_file(self._backends_conf_path)
def save_gtg_config(self):
self._conf.write(open(self._conf_path, 'w'))
def save_task_config(self):
self._task_conf.write(open(self._task_conf_path, 'w'))
def save_backends_config(self):
self._backends_conf.write(open(self._backends_conf_path, 'w'))
def get_subconfig(self, name):
""" Returns configuration object for special section of config """
if name not in self._conf:
self._conf.add_section(name)
defaults = DEFAULTS.get(name, dict())
return SectionConfig(
name, self._conf[name], defaults, self.save_gtg_config)
def get_task_config(self, task_id):
if task_id not in self._task_conf:
self._task_conf.add_section(task_id)
return SectionConfig(
f'Task {task_id}',
self._task_conf[task_id],
DEFAULTS['task'],
self.save_task_config)
def get_all_backends(self):
return self._backends_conf.sections()
def get_backend_config(self, backend):
if backend not in self._backends_conf:
self._backends_conf.add_section(backend)
return SectionConfig(
f'Backend {backend}',
self._backends_conf[backend],
DEFAULTS['backend'],
self.save_backends_config) |
4,522 | close | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import MonitorManagementClientConfiguration
from .operations import ScheduledQueryRulesOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class MonitorManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Monitor Management Client.
:ivar scheduled_query_rules: ScheduledQueryRulesOperations operations
:vartype scheduled_query_rules:
azure.mgmt.monitor.v2018_04_16.operations.ScheduledQueryRulesOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2018-04-16". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = MonitorManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: ARMPipelineClient = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.scheduled_query_rules = ScheduledQueryRulesOperations(
self._client, self._config, self._serialize, self._deserialize
)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def METHOD_NAME(self) -> None:
self._client.METHOD_NAME()
def __enter__(self) -> "MonitorManagementClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details: Any) -> None:
self._client.__exit__(*exc_details) |
4,523 | forward | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import logging
import os
import random
from collections import namedtuple
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from genotypes import Genotype
from ops import PRIMITIVES
from nni.algorithms.nas.pytorch.cdarts.utils import *
def get_logger(file_path):
""" Make python logger """
logger = logging.getLogger('cdarts')
log_format = '%(asctime)s | %(message)s'
formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p')
file_handler = logging.FileHandler(file_path)
file_handler.setFormatter(formatter)
# stream_handler = logging.StreamHandler()
# stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
return logger
class CyclicIterator:
def __init__(self, loader, sampler, distributed):
self.loader = loader
self.sampler = sampler
self.epoch = 0
self.distributed = distributed
self._next_epoch()
def _next_epoch(self):
if self.distributed:
self.sampler.set_epoch(self.epoch)
self.iterator = iter(self.loader)
self.epoch += 1
def __len__(self):
return len(self.loader)
def __iter__(self):
return self
def __next__(self):
try:
return next(self.iterator)
except StopIteration:
self._next_epoch()
return next(self.iterator)
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def METHOD_NAME(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
def parse_results(results, n_nodes):
concat = range(2, 2 + n_nodes)
normal_gene = []
reduction_gene = []
for i in range(n_nodes):
normal_node = []
reduction_node = []
for j in range(2 + i):
normal_key = 'normal_n{}_p{}'.format(i + 2, j)
reduction_key = 'reduce_n{}_p{}'.format(i + 2, j)
normal_op = results[normal_key].cpu().numpy()
reduction_op = results[reduction_key].cpu().numpy()
if sum(normal_op == 1):
normal_index = np.argmax(normal_op)
normal_node.append((PRIMITIVES[normal_index], j))
if sum(reduction_op == 1):
reduction_index = np.argmax(reduction_op)
reduction_node.append((PRIMITIVES[reduction_index], j))
normal_gene.append(normal_node)
reduction_gene.append(reduction_node)
genotypes = Genotype(normal=normal_gene, normal_concat=concat,
reduce=reduction_gene, reduce_concat=concat)
return genotypes
def param_size(model, loss_fn, input_size):
"""
Compute parameter size in MB
"""
x = torch.rand([2] + input_size).cuda()
y, _ = model(x)
target = torch.randint(model.n_classes, size=[2]).cuda()
loss = loss_fn(y, target)
loss.backward()
n_params = sum(np.prod(v.size()) for k, v in model.named_parameters() if not k.startswith('aux_head') and v.grad is not None)
return n_params / 1e6
def encode_tensor(data, device):
if isinstance(data, list):
if all(map(lambda o: isinstance(o, bool), data)):
return torch.tensor(data, dtype=torch.bool, device=device) # pylint: disable=not-callable
else:
return torch.tensor(data, dtype=torch.float, device=device) # pylint: disable=not-callable
if isinstance(data, dict):
return {k: encode_tensor(v, device) for k, v in data.items()}
return data
def reset_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True |
4,524 | validate private key | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Credentials helper module
"""
import base64
import json
import os
import tempfile
from typing import Dict, List, Optional
from cryptography.hazmat.primitives import serialization
from google import auth
from google.auth import impersonated_credentials
from metadata.generated.schema.security.credentials.gcpCredentials import (
GCPCredentials,
GcpCredentialsPath,
)
from metadata.generated.schema.security.credentials.gcpValues import (
GcpCredentialsValues,
)
from metadata.utils.logger import utils_logger
logger = utils_logger()
GOOGLE_CREDENTIALS = "GOOGLE_APPLICATION_CREDENTIALS"
GOOGLE_CLOUD_SCOPES = [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/drive",
]
class InvalidGcpConfigException(Exception):
"""
Raised when we have errors trying to set GCP credentials
"""
class InvalidPrivateKeyException(Exception):
"""
If the key cannot be serialised
"""
def METHOD_NAME(private_key: str) -> None:
"""
Make sure that a private key can be properly parsed
by cryptography backends
:param private_key: key to validate
"""
try:
serialization.load_pem_private_key(private_key.encode(), password=None)
except ValueError as err:
msg = f"Cannot serialise key: {err}"
raise InvalidPrivateKeyException(msg) from err
def create_credential_tmp_file(credentials: dict) -> str:
"""
Given a credentials' dict, store it in a tmp file
:param credentials: dictionary to store
:return: path to find the file
"""
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
cred_json = json.dumps(credentials, indent=4, separators=(",", ": "))
temp_file.write(cred_json.encode())
# Get the path of the temporary file
temp_file_path = temp_file.name
# The temporary file will be automatically closed when exiting the "with" block,
# but we can explicitly close it here to free up resources immediately.
temp_file.close()
# Return the path of the temporary file
return temp_file_path
def build_google_credentials_dict(gcp_values: GcpCredentialsValues) -> Dict[str, str]:
"""
Given GcPCredentialsValues, build a dictionary as the JSON file
downloaded from GCP with the service_account
:param gcp_values: GCP credentials
:return: Dictionary with credentials
"""
private_key_str = gcp_values.privateKey.get_secret_value()
# adding the replace string here to escape line break if passed from env
private_key_str = private_key_str.replace("\\n", "\n")
METHOD_NAME(private_key_str)
return {
"type": gcp_values.type,
"project_id": gcp_values.projectId.__root__,
"private_key_id": gcp_values.privateKeyId,
"private_key": private_key_str,
"client_email": gcp_values.clientEmail,
"client_id": gcp_values.clientId,
"auth_uri": str(gcp_values.authUri),
"token_uri": str(gcp_values.tokenUri),
"auth_provider_x509_cert_url": str(gcp_values.authProviderX509CertUrl),
"client_x509_cert_url": str(gcp_values.clientX509CertUrl),
}
def set_google_credentials(gcp_credentials: GCPCredentials) -> None:
"""
Set GCP credentials environment variable
:param gcp_credentials: GCPCredentials
"""
if isinstance(gcp_credentials.gcpConfig, GcpCredentialsPath):
os.environ[GOOGLE_CREDENTIALS] = str(gcp_credentials.gcpConfig.__root__)
return
if gcp_credentials.gcpConfig.projectId is None:
logger.info(
"No credentials available, using the current environment permissions authenticated via gcloud SDK."
)
return
if isinstance(gcp_credentials.gcpConfig, GcpCredentialsValues):
if (
gcp_credentials.gcpConfig.projectId
and not gcp_credentials.gcpConfig.privateKey
):
logger.info(
"Overriding default projectid, using the current environment permissions authenticated via gcloud SDK."
)
return
credentials_dict = build_google_credentials_dict(gcp_credentials.gcpConfig)
tmp_credentials_file = create_credential_tmp_file(credentials=credentials_dict)
os.environ[GOOGLE_CREDENTIALS] = tmp_credentials_file
return
raise InvalidGcpConfigException(
f"Error trying to set GCP credentials with {gcp_credentials}."
" Check https://docs.open-metadata.org/connectors/database/bigquery "
)
def generate_http_basic_token(username, password):
"""
Generates a HTTP basic token from username and password
Returns a token string (not a byte)
"""
token = base64.b64encode(f"{username}:{password}".encode("utf-8")).decode("utf-8")
return token
def get_gcp_default_credentials(
quota_project_id: Optional[str] = None,
scopes: Optional[List[str]] = None,
) -> auth.credentials.Credentials:
"""Get the default credentials
Args:
quota_project_id: quota project ID
scopes: Google Cloud sscopes
"""
scopes = scopes or GOOGLE_CLOUD_SCOPES
credentials, _ = auth.default(quota_project_id=quota_project_id, scopes=scopes)
return credentials
def get_gcp_impersonate_credentials(
impersonate_service_account: str,
quoted_project_id: Optional[str] = None,
scopes: Optional[List[str]] = None,
lifetime: Optional[int] = 3600,
) -> impersonated_credentials.Credentials:
"""Get the credentials to impersonate"""
scopes = scopes or GOOGLE_CLOUD_SCOPES
source_credentials, _ = auth.default()
if quoted_project_id:
source_credentials, quoted_project_id = auth.default(
quota_project_id=quoted_project_id
)
return impersonated_credentials.Credentials(
source_credentials=source_credentials,
target_principal=impersonate_service_account,
target_scopes=scopes,
lifetime=lifetime,
) |
4,525 | xendir |
import struct
class XSVFParser(object):
def __init__(self):
self._handlers = {
0x00: self.XCOMPLETE ,
0x01: self.XTDOMASK ,
0x02: self.XSIR ,
0x03: self.XSDR ,
0x04: self.XRUNTEST ,
0x07: self.XREPEAT ,
0x08: self.XSDRSIZE ,
0x09: self.XSDRTDO ,
0x0a: self.XSETSDRMASKS,
0x0b: self.XSDRINC ,
0x0c: self.XSDRB ,
0x0d: self.XSDRC ,
0x0e: self.XSDRE ,
0x0f: self.XSDRTDOB ,
0x10: self.XSDRTDOC ,
0x11: self.XSDRTDOE ,
0x12: self.XSTATE ,
0x13: self.METHOD_NAME ,
0x14: self.XENDDR ,
0x15: self.XSIR2 ,
0x16: self.XCOMMENT ,
0x17: self.XWAIT ,
}
def tdomask(self):
return self._xtdomask
def read_byte(self):
return self.read_bytes(1)[0]
def read_bytes(self, n):
c = self._f.read(n)
if len(c) == n:
return c
else:
raise RuntimeError('unexpected end of file')
def read_bits(self, n):
length_bytes = (n + 7) >> 3
return self.read_bytes(length_bytes)
def read_u32(self):
return struct.unpack('>I', self.read_bytes(4))[0]
def parse(self, f, debug=False):
self._f = f
self._debug = debug
self._xcomplete = False
self._xenddr = None
self._xendir = None
self._xruntest = 0
self._xsdrsize = None
self._xtdomask = None
self._commands = []
while self._xcomplete == False:
self.read_instruction()
self._f = None
return self._commands
def read_instruction(self):
instruction_id = self.read_byte()
if instruction_id in self._handlers:
instruction_handler = self._handlers[instruction_id]
result = instruction_handler()
if result is not None:
self._commands.append(result)
else:
raise RuntimeError('unexpected instruction 0x%02x' % instruction_id)
def XCOMPLETE(self):
self._xcomplete = True
def XTDOMASK(self):
length_bits = self._xsdrsize
self._xtdomask = self.read_bits(length_bits)
def XSIR(self):
length_bits = self.read_byte()
tdi = self.read_bits(length_bits)
if self._debug:
print('XSIR tdi=%d:%s' % (length_bits, tdi.hex()))
return {
'type': 'xsir',
'tdi': {
'length': length_bits,
'data': tdi
},
}
def XSDR(self):
length_bits = self._xsdrsize
tdi = self.read_bits(length_bits)
if self._debug:
print('XSDR tdi=%d:%s' % (length_bits, tdi.hex()))
return {
'type': 'xsdr',
'tdi': {
'length': length_bits,
'data': tdi,
},
}
def XRUNTEST(self):
self._xruntest = self.read_u32()
if self._debug:
print('XRUNTEST number=%d' % self._xruntest)
def XREPEAT(self):
repeat = self.read_byte()
# print('XREPEAT times=%d' % repeat)
def XSDRSIZE(self):
self._xsdrsize = self.read_u32()
def XSDRTDO(self):
length_bits = self._xsdrsize
tdi = self.read_bits(length_bits)
tdo_mask = self._xtdomask
self._tdo_expected = (length_bits, self.read_bits(length_bits))
wait = self._xruntest
if wait == 0:
end_state = self._xenddr
else:
end_state = 1 # Run-Test/Idle
if self._debug:
print('XSDRTDO tdi=%d:%s tdo_mask=%d:%s tdo_expected=%d:%s end_state=%u wait=%u' % (
length_bits, tdi.hex(),
length_bits, tdo_mask.hex(),
self._tdo_expected[0], self._tdo_expected[1].hex(),
end_state,
wait,
))
return {
'type': 'xsdrtdo',
'tdi': {
'length': length_bits,
'data': tdi
},
'tdo_mask': {
'length': length_bits,
'data': tdo_mask,
},
'tdo_expected': {
'length': self._tdo_expected[0],
'data': self._tdo_expected[1],
},
'end_state': end_state,
'wait': wait,
}
def XSETSDRMASKS(self):
raise RuntimeError('unimplemented')
def XSDRINC(self):
raise RuntimeError('unimplemented')
def XSDRB(self):
raise RuntimeError('unimplemented')
def XSDRC(self):
raise RuntimeError('unimplemented')
def XSDRE(self):
raise RuntimeError('unimplemented')
def XSDRTDOB(self):
raise RuntimeError('unimplemented')
def XSDRTDOC(self):
raise RuntimeError('unimplemented')
def XSDRTDOE(self):
raise RuntimeError('unimplemented')
def XSTATE(self):
state = self.read_byte()
if self._debug:
print('XSTATE %u' % state)
return {
'type': 'xstate',
'state': state,
}
def METHOD_NAME(self):
self._xendir = self.read_byte()
def XENDDR(self):
self._xenddr = self.read_byte()
def XSIR2(self):
raise RuntimeError('unimplemented')
def XCOMMENT(self):
raise RuntimeError('unimplemented')
def XWAIT(self):
wait_state = self.read_byte()
end_state = self.read_byte()
wait_time = self.read_u32() |
4,526 | test touch | import os
from pathlib import Path
from tempfile import TemporaryDirectory
import types
import unittest
from unittest import mock
from pythonforandroid import util
class TestUtil(unittest.TestCase):
"""
An inherited class of `unittest.TestCase`to test the module
:mod:`~pythonforandroid.util`.
"""
@mock.patch("pythonforandroid.util.makedirs")
def test_ensure_dir(self, mock_makedirs):
"""
Basic test for method :meth:`~pythonforandroid.util.ensure_dir`. Here
we make sure that the mentioned method is called only once.
"""
util.ensure_dir("fake_directory")
mock_makedirs.assert_called_once_with("fake_directory")
@mock.patch("shutil.rmtree")
@mock.patch("pythonforandroid.util.mkdtemp")
def test_temp_directory(self, mock_mkdtemp, mock_shutil_rmtree):
"""
Basic test for method :meth:`~pythonforandroid.util.temp_directory`. We
perform this test by `mocking` the command `mkdtemp` and
`shutil.rmtree` and we make sure that those functions are called in the
proper place.
"""
mock_mkdtemp.return_value = "/temp/any_directory"
with util.temp_directory():
mock_mkdtemp.assert_called_once()
mock_shutil_rmtree.assert_not_called()
mock_shutil_rmtree.assert_called_once_with("/temp/any_directory")
@mock.patch("pythonforandroid.util.chdir")
def test_current_directory(self, moch_chdir):
"""
Basic test for method :meth:`~pythonforandroid.util.current_directory`.
We `mock` chdir and we check that the command is executed once we are
inside a python's `with` statement. Then we check that `chdir has been
called with the proper arguments inside this `with` statement and also
that, once we leave the `with` statement, is called again with the
current working path.
"""
chdir_dir = "/temp/any_directory"
# test chdir to existing directory
with util.current_directory(chdir_dir):
moch_chdir.assert_called_once_with("/temp/any_directory")
moch_chdir.assert_has_calls(
[mock.call("/temp/any_directory"), mock.call(os.getcwd())]
)
def test_current_directory_exception(self):
"""
Another test for method
:meth:`~pythonforandroid.util.current_directory`, but here we check
that using the method with a non-existing-directory raises an `OSError`
exception.
.. note:: test chdir to non-existing directory, should raise error,
for py3 the exception is FileNotFoundError and IOError for py2, to
avoid introduce conditions, we test with a more generic exception
"""
with self.assertRaises(OSError), util.current_directory(
"/fake/directory"
):
pass
@mock.patch("pythonforandroid.util.walk")
def test_walk_valid_filens(self, mock_walk):
"""
Test method :meth:`~pythonforandroid.util.walk_valid_filens`
In here we simulate the following directory structure:
/fake_dir
|-- README
|-- setup.py
|-- __pycache__
|-- |__
|__Lib
|-- abc.pyc
|-- abc.py
|__ ctypes
|-- util.pyc
|-- util.py
Then we execute the method in order to check that we got the expected
result, which should be:
.. code-block:: python
:emphasize-lines: 2-4
expected_result = {
"/fake_dir/README",
"/fake_dir/Lib/abc.pyc",
"/fake_dir/Lib/ctypes/util.pyc",
}
"""
simulated_walk_result = [
["/fake_dir", ["__pycache__", "Lib"], ["README", "setup.py"]],
["/fake_dir/Lib", ["ctypes"], ["abc.pyc", "abc.py"]],
["/fake_dir/Lib/ctypes", [], ["util.pyc", "util.py"]],
]
mock_walk.return_value = simulated_walk_result
file_ens = util.walk_valid_filens(
"/fake_dir", ["__pycache__"], ["*.py"]
)
self.assertIsInstance(file_ens, types.GeneratorType)
expected_result = {
"/fake_dir/README",
"/fake_dir/Lib/abc.pyc",
"/fake_dir/Lib/ctypes/util.pyc",
}
result = set(file_ens)
self.assertEqual(result, expected_result)
def test_util_exceptions(self):
"""
Test exceptions for a couple of methods:
- method :meth:`~pythonforandroid.util.BuildInterruptingException`
- method :meth:`~pythonforandroid.util.handle_build_exception`
Here we create an exception with method
:meth:`~pythonforandroid.util.BuildInterruptingException` and we run it
inside method :meth:`~pythonforandroid.util.handle_build_exception` to
make sure that it raises an `SystemExit`.
"""
exc = util.BuildInterruptingException(
"missing dependency xxx", instructions="pip install --user xxx"
)
with self.assertRaises(SystemExit):
util.handle_build_exception(exc)
def test_move(self):
with mock.patch(
"pythonforandroid.util.LOGGER"
) as m_logger, TemporaryDirectory() as base_dir:
new_path = Path(base_dir) / "new"
# Set up source
old_path = Path(base_dir) / "old"
with open(old_path, "w") as outfile:
outfile.write("Temporary content")
# Non existent source
with self.assertRaises(FileNotFoundError):
util.move(new_path, new_path)
m_logger.debug.assert_called()
m_logger.error.assert_not_called()
m_logger.reset_mock()
assert old_path.exists()
assert not new_path.exists()
# Successful move
util.move(old_path, new_path)
assert not old_path.exists()
assert new_path.exists()
m_logger.debug.assert_called()
m_logger.error.assert_not_called()
m_logger.reset_mock()
# Move over existing:
existing_path = Path(base_dir) / "existing"
existing_path.touch()
util.move(new_path, existing_path)
with open(existing_path, "r") as infile:
assert infile.read() == "Temporary content"
m_logger.debug.assert_called()
m_logger.error.assert_not_called()
m_logger.reset_mock()
def METHOD_NAME(self):
# Just checking the new file case.
# Assume the existing file timestamp case will work if this does.
with TemporaryDirectory() as base_dir:
new_file_path = Path(base_dir) / "new_file"
assert not new_file_path.exists()
util.touch(new_file_path)
assert new_file_path.exists() |
4,527 | debugger goto clipboard | # XXX - execute until line number/cursor
# XXX - more memory printing he thumping
from talon import Context, Module
mod = Module()
mod.tag("debugger", desc="Tag for enabling generic debugger commands")
ctx = Context()
ctx.matches = r"""
tag: debugger
"""
x86_registers = {
"air": "eax",
"bat": "ebx",
"cap": "ecx",
"drum": "edx",
"source": "esi",
"dest": "edi",
"stack": "esp",
"frame": "ebp",
"instruction": "eip",
}
x64_registers = {
# general purpose
"air": "rax",
"racks": "rax",
"bat": "rbx",
"cap": "rcx",
"drum": "rdx",
"source": "rsi",
"dest": "rdi",
"stack": "rsp",
"stack pointer": "rsp",
"frame": "rbp",
"frame pointer": "rbp",
"base": "rbp",
"base pointer": "rbp",
"eight": "r8",
"nine": "r9",
"ten": "r10",
"eleven": "r11",
"twelve": "r12",
"thirteen": "r13",
"fourteen": "r14",
"fifteen": "r15",
# pointers
"instruction": "rip",
"rip": "rip",
# segment
}
# XXX - pass by windbg to dump
windows_x64_register_parameters = ["rcx", "rdx", "r8", "r9"]
# XXX - make this dynamic
ctx.lists["self.registers"] = x64_registers
# assembly_languages = {
# "x86": x86_registers,
# "x64": x64_registers,
# }
mod.list("registers", desc="Main architecture register set")
@mod.capture(rule="{self.registers}")
def registers(m) -> str:
"Returns a register"
return m.registers
@mod.action_class
class Actions:
def debugger_step_into():
"""Step into an instruction in the debugger"""
def debugger_step_over():
"""Step over an instruction in the debugger"""
def debugger_step_line():
"""Step into a source line in the debugger"""
def debugger_step_over_line():
"""Step over a source line in the debugger"""
def debugger_step_out():
"""Step until function exit in the debugger"""
def debugger_continue():
"""Continue execution in the debugger"""
def debugger_restart():
"""Restart execution in the debugger"""
def debugger_start():
"""Start debugging"""
def debugger_stop():
"""Stop the debugger"""
def debugger_exit():
"""Exit the debugger"""
def debugger_detach():
"""Detach the debugger"""
def debugger_backtrace():
"""Print a back trace in the debugger"""
def debugger_get_register():
"""Print specific register in the debugger"""
def debugger_set_register():
"""Set specific register in the debugger"""
def debugger_show_registers():
"""Print the current registers in the debugger"""
def debugger_break_now():
"""Break into the debugger"""
def debugger_break_here():
"""Set a break on the current line"""
def debugger_show_breakpoints():
"""Print the current breakpoints in the debugger"""
def debugger_add_sw_breakpoint():
"""Add one software breakpoint in the debugger"""
def debugger_add_hw_breakpoint():
"""Add one hardware breakpoint in the debugger"""
def debugger_clear_all_breakpoints():
"""Clear all breakpoints in the debugger"""
def debugger_clear_breakpoint():
"""Clear one breakpoint in the debugger"""
def debugger_clear_breakpoint_id(number_small: int):
"""Clear one breakpoint id in the debugger"""
def debugger_disable_breakpoint_id(number_small: int):
"""Disable one breakpoint id in the debugger"""
def debugger_disable_breakpoint():
"""Disable one breakpoint in the debugger"""
def debugger_disable_all_breakpoints():
"""Disable all breakpoints in the debugger"""
def debugger_enable_breakpoint():
"""Enable one breakpoint in the debugger"""
def debugger_enable_breakpoint_id(number_small: int):
"""Enable one breakpoint id in the debugger"""
def debugger_enable_all_breakpoints():
"""Enable all breakpoints in the debugger"""
def debugger_disassemble():
"""Preps the disassemble command in the debugger"""
def debugger_disassemble_here():
"""Disassembles instructions at the current instruction pointer"""
def debugger_disassemble_clipboard():
"""Disassemble instructions at an address in the clipboard"""
def debugger_goto_address():
"""Jump to a specific address in the debugger"""
def METHOD_NAME():
"""Jump to a specific address stored in the clipboard"""
def debugger_goto_highlighted():
"""Jump to a specific highlighted address in the debugger"""
def debugger_dump_ascii_string():
"""Display as specific address as an ascii string in the debugger"""
def debugger_dump_unicode_string():
"""Display as specific address as an unicode string in the debugger"""
def debugger_dump_pointers():
"""Display as specific address as a list of pointers in the debugger"""
def debugger_inspect_type():
"""Inspect a specific data type in the debugger"""
def debugger_clear_line():
"""Clear unwanted data from the command line"""
def debugger_list_modules():
"""List the loaded modules in the debuggee memory space""" |
4,528 | need segmentation | import typing
from os.path import basename, dirname, isdir, isfile
from pathlib import Path
from qtpy.QtWidgets import QFileDialog
from PartSegCore.io_utils import LoadBase
if typing.TYPE_CHECKING: # pragma: no cover
from PartSeg.common_backend.base_settings import BaseSettings
class LoadProperty(typing.NamedTuple):
load_location: typing.List[typing.Union[str, Path]]
selected_filter: str
load_class: typing.Type[LoadBase]
IORegister = typing.Union[typing.Dict[str, type(LoadBase)], type(LoadBase), str, typing.List[type(LoadBase)]]
class IOMethodMock:
__new_style__ = False
def __init__(self, name: str):
self.name = name
def get_name(self) -> str:
return self.name
def get_name_with_suffix(self) -> str:
return self.get_name()
def get_short_name(self) -> str:
return self.get_name()
@staticmethod
def get_default_extension() -> str:
return ""
@staticmethod
def METHOD_NAME() -> bool:
return False
@staticmethod
def need_mask() -> bool:
return False
@staticmethod
def get_fields() -> list:
return []
@staticmethod
def number_of_files() -> int:
return 1
@staticmethod
def save(*args, **kwargs):
"""For keep compatibility with SaveBase"""
class LoadRegisterFileDialog(QFileDialog):
def __init__(
self,
io_register: IORegister,
caption,
parent=None,
):
if isinstance(io_register, str):
io_register = {io_register: IOMethodMock(io_register)}
if isinstance(io_register, list):
io_register = {x.get_name(): x for x in io_register}
if not isinstance(io_register, typing.MutableMapping):
io_register = {io_register.get_name(): io_register}
super().__init__(parent, caption)
self.io_register = {x.get_name_with_suffix(): x for x in io_register.values()}
self.setNameFilters(list(self.io_register.keys()))
class CustomLoadDialog(LoadRegisterFileDialog):
def __init__(
self,
load_register: IORegister,
parent=None,
caption="Load file",
history: typing.Optional[typing.List[str]] = None,
):
super().__init__(load_register, caption, parent)
self.setOption(QFileDialog.Option.DontUseNativeDialog, True)
self.setFileMode(QFileDialog.FileMode.ExistingFile)
self.setAcceptMode(QFileDialog.AcceptMode.AcceptOpen)
self.files_list = []
self.setWindowTitle("Open File")
if history is not None:
history = self.history() + history
self.setHistory(history)
def accept(self):
selected_files = [
x for x in self.selectedFiles() if self.fileMode == QFileDialog.FileMode.Directory or isfile(x)
]
if not selected_files:
return
if len(selected_files) == 1 and self.fileMode != QFileDialog.FileMode.Directory and isdir(selected_files[0]):
super().accept()
return
self.files_list.extend(selected_files)
chosen_class: LoadBase = self.io_register[self.selectedNameFilter()]
if len(self.files_list) < chosen_class.number_of_files():
self.setNameFilters([chosen_class.get_name()])
self.setWindowTitle("Open File for:" + ",".join(basename(x) for x in self.files_list))
self.selectFile(chosen_class.get_next_file(self.files_list))
else:
super().accept()
def get_result(self) -> LoadProperty:
chosen_class: typing.Type[LoadBase] = self.io_register[self.selectedNameFilter()]
return LoadProperty(self.files_list, self.selectedNameFilter(), chosen_class)
class PLoadDialog(CustomLoadDialog):
def __init__(
self,
load_register: typing.Union[typing.Dict[str, type(LoadBase)], type(LoadBase)],
*,
settings: "BaseSettings",
path: str,
default_directory: typing.Optional[str] = None,
filter_path="",
parent=None,
caption="Load file",
):
if default_directory is None:
default_directory = str(Path.home())
super().__init__(
load_register=load_register,
parent=parent,
caption=caption,
history=settings.get_path_history(),
)
self.settings = settings
self.path_in_dict = path
self.filter_path = filter_path
self.setDirectory(self.settings.get(path, default_directory))
if self.filter_path:
self.selectNameFilter(self.settings.get(self.filter_path, ""))
def accept(self):
super().accept()
if self.result() != QFileDialog.Accepted:
return
directory = dirname(self.selectedFiles()[0])
self.settings.add_path_history(directory)
self.settings.set(self.path_in_dict, directory)
if self.filter_path:
self.settings.set(self.filter_path, self.selectedNameFilter())
class SelectDirectoryDialog(QFileDialog):
def __init__(
self,
settings: "BaseSettings",
settings_path: typing.Union[str, typing.List[str]],
default_directory: typing.Optional[str] = None,
parent=None,
) -> None:
super().__init__(parent, "Select directory")
if default_directory is None:
default_directory = str(Path.home())
self.settings = settings
self.setFileMode(QFileDialog.FileMode.Directory)
self.setAcceptMode(QFileDialog.AcceptMode.AcceptOpen)
if isinstance(settings_path, list):
for path_ in reversed(settings_path):
default_directory = self.settings.get(path_, default_directory)
self.setDirectory(default_directory)
self.path_in_dict = settings_path[0]
else:
self.setDirectory(self.settings.get(settings_path, default_directory))
self.path_in_dict = settings_path
history = self.history() + settings.get_path_history()
self.setHistory(history)
def accept(self) -> None:
super().accept()
if self.result() != QFileDialog.DialogCode.Accepted:
return
directory = self.selectedFiles()[0]
self.settings.add_path_history(directory)
self.settings.set(self.path_in_dict, directory) |
4,529 | plot windows | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Plot area
=========
A custom QMdiArea where to add custom PlotWindows
"""
import numpy as np
from silx.utils.weakref import WeakList
from silx.gui import qt
from .plot1D import Plot1D
from .plot2D import Plot2D
class MdiSubWindow(qt.QMdiSubWindow):
def __init__(self, parent=None):
super(MdiSubWindow, self).__init__(parent=parent)
self.setAttribute(qt.Qt.WA_DeleteOnClose, True)
def closeEvent(self, event):
super(MdiSubWindow, self).closeEvent(event)
# Renumber the plot windows and emit the changed signal.
self.mdiArea().renumberPlotWindows()
self.mdiArea().changed.emit()
class PlotArea(qt.QMdiArea):
changed = qt.pyqtSignal()
def __init__(self, parent=None):
super(PlotArea, self).__init__(parent=parent)
#: Context menu
self.setContextMenuPolicy(qt.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showContextMenu)
#: Set the order of the subwindows returned by subWindowList.
self.setActivationOrder(qt.QMdiArea.CreationOrder)
self.setWindowTitle('PlotArea')
self.setMinimumSize(960, 960)
self.setGeometry(0, 0, 1280, 1024)
def getPlotWindow(self, index):
"""get the PlotWindow widget object for a given index"""
return self.subWindowList()[index].widget()
def METHOD_NAME(self):
widgets = WeakList()
for subWindow in self.subWindowList():
widgets.append(subWindow.widget())
return widgets
def showContextMenu(self, position):
menu = qt.QMenu('Plot Area Menu', self)
action = qt.QAction('Add Plot1D', self,
triggered=self.addPlot1D)
menu.addAction(action)
action = qt.QAction('Add Plot2D Window', self,
triggered=self.addPlot2D)
menu.addAction(action)
menu.addSeparator()
action = qt.QAction('Cascade Windows', self,
triggered=self.cascadeSubWindows)
menu.addAction(action)
action = qt.QAction('Tile Windows', self,
triggered=self.tileSubWindows)
menu.addAction(action)
menu.exec_(self.mapToGlobal(position))
def addPlot1D(self, title=None):
return self.addPlotWindow(plotType='1D', title=title)
def addPlot2D(self, title=None):
return self.addPlotWindow(plotType='2D', title=title)
def addPlotWindow(self, *args, plotType='1D', title=None):
"""add a plot window in the mdi Area
Parameters
----------
plotType : str
type of plot:
'1D' (= curves)
'2D' (= images),
"""
subWindow = MdiSubWindow(parent=self)
if plotType == '2D':
plotWindow = Plot2D(parent=subWindow, title=title)
else:
plotWindow = Plot1D(parent=subWindow, title=title)
plotWindow.setIndex(len(self.METHOD_NAME()))
subWindow.setWidget(plotWindow)
subWindow.show()
self.changed.emit()
return plotWindow
def renumberPlotWindows(self):
for index, plotWindow in enumerate(self.METHOD_NAME()):
plotWindow.setIndex(index)
class PlotAreaMainWindow(qt.QMainWindow):
def __init__(self, app=None, parent=None):
super(PlotAreaMainWindow, self).__init__(parent=parent)
self.app = app
self.plotArea = PlotArea()
self.setCentralWidget(self.plotArea)
# Add (empty) menu bar -> contents added later
self.menuBar = qt.QMenuBar()
self.setMenuBar(self.menuBar)
self.closeAction = qt.QAction(
"&Quit", self, shortcut="Ctrl+Q", triggered=self.onClose)
self._addMenuAction(self.menuBar, self.closeAction)
# Populate the menu bar with common actions and shortcuts
def _addMenuAction(self, menu, action, deferShortcut=False):
"""Add action to menu as well as self so that when the menu bar is
invisible, its actions are still available. If deferShortcut
is True, set the shortcut context to widget-only, where it
will avoid conflict with shortcuts already bound to the
widgets themselves.
"""
menu.addAction(action)
self.addAction(action)
if deferShortcut:
action.setShortcutContext(qt.Qt.WidgetShortcut)
else:
action.setShortcutContext(qt.Qt.ApplicationShortcut)
def onClose(self):
self.app.lastWindowClosed.connect(qt.pyqtSignal(quit()))
def main():
global app
app = qt.QApplication([])
# Create the ad hoc window containing a PlotWidget and associated tools
window = PlotAreaMainWindow(app)
window.setAttribute(qt.Qt.WA_DeleteOnClose)
window.setWindowTitle("PlotArea Main Window")
window.show()
# Add two plot windows to the plot area.
window.plotArea.addPlotWindow(plotType='1D')
window.plotArea.addPlotWindow(plotType='2D')
plot0 = window.plotArea.getPlotWindow(0)
plot1 = window.plotArea.getPlotWindow(1)
# Add an 1D data + 2D image to the plots
x0 = np.linspace(-10, 10, 200)
x1 = np.linspace(-10, 5, 150)
x = np.outer(x0, x1)
image = np.sin(x) / x
plot0.addCurve(x0, np.sin(x0)/x0, legend='test curve 0')
plot0.addCurve(x1, np.sin(x1)/x1+0.1, legend='test curve 1')
plot1.addImage(image)
app.exec_()
if __name__ == '__main__':
main() |
4,530 | handler | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"elastic-san delete",
is_preview=True,
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete an Elastic SAN.
:example: Delete an Elastic SAN.
az elastic-san delete -g {rg} -n {san_name}
"""
_aaz_info = {
"version": "2022-12-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.elasticsan/elasticsans/{}", "2022-12-01-preview"],
]
}
AZ_SUPPORT_NO_WAIT = True
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.elastic_san_name = AAZStrArg(
options=["-n", "--name", "--elastic-san-name"],
help="The name of the ElasticSan.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^[A-Za-z0-9]+((-|_)[a-z0-9A-Z]+)*$",
max_length=24,
min_length=3,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.ElasticSansDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class ElasticSansDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ElasticSan/elasticSans/{elasticSanName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"elasticSanName", self.ctx.args.elastic_san_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-12-01-preview",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
4,531 | fixe d to numpy int64 | #!/usr/bin/env python
#
# Copyright (c) 2012-2023 Snowflake Computing Inc. All rights reserved.
#
from __future__ import annotations
import decimal
import time
from datetime import datetime, timedelta, tzinfo
from logging import getLogger
from typing import TYPE_CHECKING
import pytz
from pytz import UTC
from .constants import PARAMETER_TIMEZONE
from .converter import _generate_tzinfo_from_tzoffset
if TYPE_CHECKING:
from numpy import datetime64, float64, int64
try:
import numpy
except ImportError:
numpy = None
try:
import tzlocal
except ImportError:
tzlocal = None
ZERO_EPOCH = datetime.utcfromtimestamp(0)
logger = getLogger(__name__)
class ArrowConverterContext:
"""Python helper functions for arrow conversions.
Windows timestamp functions are necessary because Windows cannot handle -ve timestamps.
Putting the OS check into the non-windows function would probably take up more CPU cycles then
just deciding this at compile time.
"""
def __init__(
self,
session_parameters: dict[str, str | int | bool] | None = None,
) -> None:
if session_parameters is None:
session_parameters = {}
self._timezone = (
None
if PARAMETER_TIMEZONE not in session_parameters
else session_parameters[PARAMETER_TIMEZONE]
)
@property
def timezone(self) -> str:
return self._timezone
@timezone.setter
def timezone(self, tz) -> None:
self._timezone = tz
def _get_session_tz(self) -> tzinfo | UTC:
"""Get the session timezone or use the local computer's timezone."""
try:
tz = "UTC" if not self.timezone else self.timezone
return pytz.timezone(tz)
except pytz.exceptions.UnknownTimeZoneError:
logger.warning("converting to tzinfo failed")
if tzlocal is not None:
return tzlocal.get_localzone()
else:
try:
return datetime.timezone.utc
except AttributeError:
return pytz.timezone("UTC")
def TIMESTAMP_TZ_to_python(
self, epoch: int, microseconds: int, tz: int
) -> datetime:
tzinfo = _generate_tzinfo_from_tzoffset(tz - 1440)
return datetime.fromtimestamp(epoch, tz=tzinfo) + timedelta(
microseconds=microseconds
)
def TIMESTAMP_TZ_to_python_windows(
self, epoch: int, microseconds: int, tz: int
) -> datetime:
tzinfo = _generate_tzinfo_from_tzoffset(tz - 1440)
t = ZERO_EPOCH + timedelta(seconds=epoch, microseconds=microseconds)
if pytz.utc != tzinfo:
t += tzinfo.utcoffset(t)
return t.replace(tzinfo=tzinfo)
def TIMESTAMP_NTZ_to_python(self, epoch: int, microseconds: int) -> datetime:
return datetime.utcfromtimestamp(epoch) + timedelta(microseconds=microseconds)
def TIMESTAMP_NTZ_to_python_windows(
self, epoch: int, microseconds: int
) -> datetime:
return ZERO_EPOCH + timedelta(seconds=epoch, microseconds=microseconds)
def TIMESTAMP_LTZ_to_python(self, epoch: int, microseconds: int) -> datetime:
tzinfo = self._get_session_tz()
return datetime.fromtimestamp(epoch, tz=tzinfo) + timedelta(
microseconds=microseconds
)
def TIMESTAMP_LTZ_to_python_windows(
self, epoch: int, microseconds: int
) -> datetime:
try:
tzinfo = self._get_session_tz()
ts = ZERO_EPOCH + timedelta(seconds=epoch, microseconds=microseconds)
return pytz.utc.localize(ts, is_dst=False).astimezone(tzinfo)
except OverflowError:
logger.debug(
"OverflowError in converting from epoch time to "
"timestamp_ltz: %s(ms). Falling back to use struct_time."
)
return time.localtime(microseconds)
def REAL_to_numpy_float64(self, py_double: float) -> float64:
return numpy.float64(py_double)
def METHOD_NAME(self, py_long: int) -> int64:
return numpy.int64(py_long)
def FIXED_to_numpy_float64(self, py_long: int, scale: int) -> float64:
return numpy.float64(decimal.Decimal(py_long).scaleb(-scale))
def DATE_to_numpy_datetime64(self, py_days: int) -> datetime64:
return numpy.datetime64(py_days, "D")
def TIMESTAMP_NTZ_ONE_FIELD_to_numpy_datetime64(
self, value: int, scale: int
) -> datetime64:
nanoseconds = int(decimal.Decimal(value).scaleb(9 - scale))
return numpy.datetime64(nanoseconds, "ns")
def TIMESTAMP_NTZ_TWO_FIELD_to_numpy_datetime64(
self, epoch: int, fraction: int
) -> datetime64:
nanoseconds = int(decimal.Decimal(epoch).scaleb(9) + decimal.Decimal(fraction))
return numpy.datetime64(nanoseconds, "ns") |
4,532 | test create | # -----------------------------------------------------------------------------
# BSD 3-Clause License
#
# Copyright (c) 2022-2023, Science and Technology Facilities Council
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
# Author R. W. Ford, STFC Daresbury Lab
'''Module containing tests for the ScalarArgMetadata class.
'''
import pytest
from fparser.two import Fortran2003
from psyclone.domain.lfric.kernel import ScalarArgMetadata
def METHOD_NAME():
'''Test that an instance of ScalarArgMetadata can be created
successfully.
'''
field_arg = ScalarArgMetadata("GH_REAL", "GH_READ")
assert isinstance(field_arg, ScalarArgMetadata)
assert field_arg.form == "gh_scalar"
assert field_arg._datatype == "gh_real"
assert field_arg._access == "gh_read"
def test_get_metadata():
'''Test that the _get_metadata class method works as expected.'''
fparser2_tree = ScalarArgMetadata.create_fparser2(
"arg_type(GH_SCALAR, GH_REAL, GH_READ)", Fortran2003.Part_Ref)
datatype, access = ScalarArgMetadata._get_metadata(fparser2_tree)
assert datatype == "GH_REAL"
assert access == "GH_READ"
def test_get_datatype_access_metadata():
'''Test that the get_datatype_access_metadata class method works as
expected. Test that all relevant check and get methods are called
by raising exceptions within them, as well as checking for valid
input.
'''
# check_fparser2_arg
with pytest.raises(TypeError) as info:
_ = ScalarArgMetadata._get_datatype_access_metadata("hello")
assert ("Expected kernel metadata to be encoded as an fparser2 "
"Part_Ref object but found type 'str' with value 'hello'."
in str(info.value))
# check_nargs
fparser2_tree = ScalarArgMetadata.create_fparser2(
"arg_type(x)", Fortran2003.Part_Ref)
with pytest.raises(ValueError) as info:
_ = ScalarArgMetadata._get_datatype_access_metadata(fparser2_tree)
assert ("Expected kernel metadata to have 3 arguments, but "
"found 1 in 'arg_type(x)'." in str(info.value))
# check_first_arg
fparser2_tree = ScalarArgMetadata.create_fparser2(
"arg_type(GH_FIELD, GH_REAL, GH_READ)", Fortran2003.Part_Ref)
with pytest.raises(ValueError) as info:
_ = ScalarArgMetadata._get_datatype_access_metadata(fparser2_tree)
assert ("Metadata for 'scalar' kernel arguments should have 'gh_scalar' "
"as the first metadata property, but found 'GH_FIELD'."
in str(info.value))
# OK
fparser2_tree = ScalarArgMetadata.create_fparser2(
"arg_type(GH_SCALAR, GH_REAL, GH_READ)", Fortran2003.Part_Ref)
datatype, access = ScalarArgMetadata._get_datatype_access_metadata(
fparser2_tree)
assert datatype == "GH_REAL"
assert access == "GH_READ"
def test_fortran_string():
'''Test that the fortran_string method works as expected.'''
fortran_string = "arg_type(gh_scalar, gh_real, gh_read)"
field_arg = ScalarArgMetadata.create_from_fortran_string(fortran_string)
result = field_arg.fortran_string()
assert result == fortran_string
def test_check_datatype():
'''Test the check_datatype method works as expected.'''
ScalarArgMetadata.check_datatype("GH_REAL")
with pytest.raises(ValueError) as info:
ScalarArgMetadata.check_datatype("invalid")
assert ("The 'datatype descriptor' metadata should be a recognised value "
"(one of ['gh_real', 'gh_integer', 'gh_logical']) but found "
"'invalid'." in str(info.value))
def test_check_access():
'''Test the check_access method works as expected.'''
ScalarArgMetadata.check_access("GH_READ")
ScalarArgMetadata.check_access("gh_sum")
with pytest.raises(ValueError) as info:
ScalarArgMetadata.check_access("invalid")
assert ("The 'access descriptor' metadata should be a recognised value "
"(one of ['gh_read', 'gh_sum']) but found 'invalid'."
in str(info.value)) |
4,533 | process path token | #! /usr/bin/env python
import argparse
import re
import sys
import yaml
RE_TILDE = re.compile('~0')
RE_SLASH = re.compile('~1')
parser = argparse.ArgumentParser()
parser.add_argument('orig_file', help='file to apply patch to')
parser.add_argument('patch_file', help='patch to apply')
parser.add_argument('-i',
'--index',
help='which root-level object to patch (default: 0)',
type=int,
default=0)
parser.add_argument('-p',
'--patch-index',
help=('which root-level object to use as'
' the patch (default: 0)'),
type=int,
default=0)
parser.add_argument('-e',
'--environment',
help=('replcae {ENV} in the patch with'
' the given value (default: staging)'),
default='staging')
def METHOD_NAME(tok):
return RE_TILDE.sub('~', RE_SLASH.sub('/', tok))
def apply_patch(obj, patch):
for p in patch:
op = p.get('op', None)
path = p.get('path', None)
val = p.get('value', None)
if not op: continue
if not path: continue
ptr = obj
key = None
if path:
if path == '/':
key = ''
else:
tokens = [METHOD_NAME(tok)
for tok in path.split('/')[1:]]
tokens, key = tokens[:-1], tokens[-1]
for t in tokens:
if isinstance(ptr, list):
t = int(t)
ptr = ptr[t]
if op == 'add':
if isinstance(ptr, list):
if key == '-':
ptr.append(val)
else:
key = int(key)
ptr.insert(key, val)
else:
ptr[key].update(val)
elif op == 'remove':
if isinstance(ptr, list):
if key == '-':
ptr.pop()
else:
key = int(key)
ptr.remove(key)
else:
ptr.pop(key)
elif op == 'replace':
if isinstance(ptr, list):
if key == '-':
ptr[-1] = val
else:
key = int(key)
ptr[key] = val
else:
ptr[key] = val
else:
continue # TODO(opadron): finish this if we ever start caring
# about copy, move, or test
def process_patch(patch, env):
if isinstance(patch, list):
return [process_patch(p, env) for p in patch]
if isinstance(patch, dict):
return {process_patch(k, env): process_patch(v, env)
for k, v in patch.items()}
if isinstance(patch, str):
return patch.format(ENV=env)
return patch
def warn(*args):
sys.stdout.write('\n')
sys.stdout.write('\x1b[1;33m')
sys.stdout.write(*args)
sys.stdout.write('\x1b[0m\n')
# MAIN ENTRY POINT
args = parser.parse_args()
f = sys.stdin
f_name = '<stdin>'
if args.patch_file:
f = open(args.patch_file)
f_name = args.patch_file
patch = None
with f:
try:
roots = list(yaml.full_load_all(f))
patch = roots[args.patch_index]
except yaml.scanner.ScannerError as e:
warn('patch file failed to parse')
sys.stdout.write('\x1b[1;31m')
e.problem_mark.name = f_name
print(e)
sys.stdout.write('\x1b[0m')
# some sanity checks
if patch['apiVersion'] != 'v1':
raise ValueError('patch["apiVersion"] != "v1"')
if patch['kind'] != 'ConfigMap':
raise ValueError('patch["kind"] != "ConfigMap"')
annotation_missing = (
(
(
(patch.get('metadata') or {})
.get('annotations') or {}
)
.get('cd.spack.io/staged-resource', '0')
) in
(None, 'false', '0', 'off', 'no', 'disabled'))
if annotation_missing:
raise ValueError('patch annotation missing or disabled:'
' cd.spack.io/staged-resource')
patch = patch.get('data', {}).get('patch', None)
if patch is None:
raise ValueError('patch.data.patch missing or empty')
try:
patch = yaml.full_load(patch)
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
warn('patch data failed to parse')
sys.stdout.write('\x1b[1;31m')
print(e)
sys.stdout.write('\x1b[0m')
orig_file = open(args.orig_file)
orig_file_name = args.orig_file
target = None
with orig_file:
try:
roots = list(yaml.full_load_all(orig_file))
target = roots[args.index]
except yaml.scanner.ScannerError as e:
warn('file failed to parse')
sys.stdout.write('\x1b[1;31m')
e.problem_mark.name = orig_file_name
print(e)
sys.stdout.write('\x1b[0m')
patch = process_patch(patch, env=args.environment)
apply_patch(target, patch)
yaml.dump(target, sys.stdout) |
4,534 | type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSignalRReplicaResult',
'AwaitableGetSignalRReplicaResult',
'get_signal_r_replica',
'get_signal_r_replica_output',
]
@pulumi.output_type
class GetSignalRReplicaResult:
"""
A class represent a replica resource.
"""
def __init__(__self__, id=None, location=None, name=None, provisioning_state=None, sku=None, system_data=None, tags=None, METHOD_NAME=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. E.g. "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}"
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.ResourceSkuResponse']:
"""
The billing information of the resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSignalRReplicaResult(GetSignalRReplicaResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSignalRReplicaResult(
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
sku=self.sku,
system_data=self.system_data,
tags=self.tags,
METHOD_NAME=self.METHOD_NAME)
def get_signal_r_replica(replica_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSignalRReplicaResult:
"""
Get the replica and its properties.
:param str replica_name: The name of the replica.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the resource.
"""
__args__ = dict()
__args__['replicaName'] = replica_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:signalrservice/v20230301preview:getSignalRReplica', __args__, opts=opts, typ=GetSignalRReplicaResult).value
return AwaitableGetSignalRReplicaResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
sku=pulumi.get(__ret__, 'sku'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_signal_r_replica)
def get_signal_r_replica_output(replica_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSignalRReplicaResult]:
"""
Get the replica and its properties.
:param str replica_name: The name of the replica.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the resource.
"""
... |
4,535 | sync marker | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Read/Write Avro File Object Containers."""
import logging
import sys
from ..avro import avro_io_async
from ..avro import schema
from .datafile import DataFileException
from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY
PY3 = sys.version_info[0] == 3
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Constants
# Codecs supported by container files:
VALID_CODECS = frozenset(['null'])
class AsyncDataFileReader(object): # pylint: disable=too-many-instance-attributes
"""Read files written by DataFileWriter."""
def __init__(self, reader, datum_reader, **kwargs):
"""Initializes a new data file reader.
Args:
reader: Open file to read from.
datum_reader: Avro datum reader.
"""
self._reader = reader
self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader)
self._header_reader = kwargs.pop('header_reader', None)
self._header_decoder = None if self._header_reader is None else \
avro_io_async.AsyncBinaryDecoder(self._header_reader)
self._datum_decoder = None # Maybe reset at every block.
self._datum_reader = datum_reader
self.codec = "null"
self._block_count = 0
self._cur_object_index = 0
self._meta = None
self._sync_marker = None
async def init(self):
# In case self._reader only has partial content(without header).
# seek(0, 0) to make sure read the (partial)content from beginning.
await self._reader.seek(0, 0)
# read the header: magic, meta, sync
await self._read_header()
# ensure codec is valid
avro_codec_raw = self.get_meta('avro.codec')
if avro_codec_raw is None:
self.codec = "null"
else:
self.codec = avro_codec_raw.decode('utf-8')
if self.codec not in VALID_CODECS:
raise DataFileException('Unknown codec: %s.' % self.codec)
# get ready to read
self._block_count = 0
# object_position is to support reading from current position in the future read,
# no need to downloading from the beginning of avro.
if hasattr(self._reader, 'object_position'):
self.reader.track_object_position()
# header_reader indicates reader only has partial content. The reader doesn't have block header,
# so we read use the block count stored last time.
# Also ChangeFeed only has codec==null, so use _raw_decoder is good.
if self._header_reader is not None:
self._datum_decoder = self._raw_decoder
self.datum_reader.writer_schema = (
schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8')))
return self
async def __aenter__(self):
return self
async def __aexit__(self, data_type, value, traceback):
# Perform a close if there's no exception
if data_type is None:
self.close()
def __aiter__(self):
return self
# read-only properties
@property
def reader(self):
return self._reader
@property
def raw_decoder(self):
return self._raw_decoder
@property
def datum_decoder(self):
return self._datum_decoder
@property
def datum_reader(self):
return self._datum_reader
@property
def METHOD_NAME(self):
return self._sync_marker
@property
def meta(self):
return self._meta
# read/write properties
@property
def block_count(self):
return self._block_count
def get_meta(self, key):
"""Reports the value of a given metadata key.
Args:
key: Metadata key (string) to report the value of.
Returns:
Value associated to the metadata key, as bytes.
"""
return self._meta.get(key)
async def _read_header(self):
header_reader = self._header_reader if self._header_reader else self._reader
header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder
# seek to the beginning of the file to get magic block
await header_reader.seek(0, 0)
# read header into a dict
header = await self.datum_reader.read_data(META_SCHEMA, header_decoder)
# check magic number
if header.get('magic') != MAGIC:
fail_msg = "Not an Avro data file: %s doesn't match %s." \
% (header.get('magic'), MAGIC)
raise schema.AvroException(fail_msg)
# set metadata
self._meta = header['meta']
# set sync marker
self._sync_marker = header['sync']
async def _read_block_header(self):
self._block_count = await self.raw_decoder.read_long()
if self.codec == "null":
# Skip a long; we don't need to use the length.
await self.raw_decoder.skip_long()
self._datum_decoder = self._raw_decoder
else:
raise DataFileException("Unknown codec: %r" % self.codec)
async def _skip_sync(self):
"""
Read the length of the sync marker; if it matches the sync marker,
return True. Otherwise, seek back to where we started and return False.
"""
proposed_sync_marker = await self.reader.read(SYNC_SIZE)
if SYNC_SIZE > 0 and not proposed_sync_marker:
raise StopAsyncIteration
if proposed_sync_marker != self.METHOD_NAME:
await self.reader.seek(-SYNC_SIZE, 1)
async def __anext__(self):
"""Return the next datum in the file."""
if self.block_count == 0:
await self._skip_sync()
# object_position is to support reading from current position in the future read,
# no need to downloading from the beginning of avro file with this attr.
if hasattr(self._reader, 'object_position'):
await self.reader.track_object_position()
self._cur_object_index = 0
await self._read_block_header()
datum = await self.datum_reader.read(self.datum_decoder)
self._block_count -= 1
self._cur_object_index += 1
# object_position is to support reading from current position in the future read,
# This will track the index of the next item to be read.
# This will also track the offset before the next sync marker.
if hasattr(self._reader, 'object_position'):
if self.block_count == 0:
# the next event to be read is at index 0 in the new chunk of blocks,
await self.reader.track_object_position()
await self.reader.set_object_index(0)
else:
await self.reader.set_object_index(self._cur_object_index)
return datum
def close(self):
"""Close this reader."""
self.reader.close()
if __name__ == '__main__':
raise Exception('Not a standalone module') |
4,536 | test check existing external data | import json
import pytest
from asyncpg.connection import Connection
from gql.client import AsyncClientSession
from cdb.api.db.crud.external_data import (
get_all_external_datas_by_beneficiary_id_and_source,
get_last_external_data_by_beneficiary_id_and_source,
save_external_data_with_info,
)
from cdb.api.db.models.beneficiary import Beneficiary
from cdb.api.db.models.external_data import ExternalSource, format_external_data
from cdb.api.db.models.professional import Professional
from cdb.cdb_csv.models.csv_row import PrincipalCsvRow, get_sha256
from cdb.cdb_csv.pe import (
insert_external_data_for_beneficiary_and_professional,
map_principal_row,
save_external_data,
)
async def test_insert_external_data(
db_connection: Connection,
beneficiary_sophie_tifour: Beneficiary,
pe_principal_csv_series,
):
# Get the first row
_, row = next(pe_principal_csv_series.iterrows())
csv_row: PrincipalCsvRow = await map_principal_row(row)
await insert_external_data_for_beneficiary_and_professional(
db_connection,
beneficiary_sophie_tifour,
ExternalSource.PE_FTP,
format_external_data(
csv_row.dict(), {"beneficiary": beneficiary_sophie_tifour.dict()}
),
"myhash",
professional=None,
)
external_data = await get_last_external_data_by_beneficiary_id_and_source(
db_connection, beneficiary_sophie_tifour.id, ExternalSource.PE_FTP
)
assert external_data is not None
assert external_data.info is not None
assert external_data.info.beneficiary_id == beneficiary_sophie_tifour.id
async def METHOD_NAME(
db_connection: Connection,
beneficiary_sophie_tifour: Beneficiary,
professional_pierre_chevalier: Professional,
pe_principal_csv_series,
):
external_data = await get_last_external_data_by_beneficiary_id_and_source(
db_connection, beneficiary_sophie_tifour.id, ExternalSource.PE_FTP
)
assert external_data is None
# Get the first row
_, row = next(pe_principal_csv_series.iterrows())
csv_row: PrincipalCsvRow = await map_principal_row(row)
hash_result: str = await get_sha256(csv_row)
external_data = await save_external_data(
db_connection,
beneficiary_sophie_tifour,
csv_row,
hash_result,
professional=professional_pierre_chevalier,
)
assert external_data is not None
assert external_data.info is not None
assert external_data.data["parsed"]["beneficiary"]["lastname"] == "Tifour"
assert external_data.data["parsed"]["professional"]["lastname"] == "Chevalier"
assert external_data.data["source"]["nom"] == "TIFOUR"
datas = await get_all_external_datas_by_beneficiary_id_and_source(
db_connection, beneficiary_sophie_tifour.id, ExternalSource.PE_FTP
)
assert len(datas) == 1
beneficiary_sophie_tifour.lastname = "Newname"
csv_row.nom = "Newname"
professional_pierre_chevalier.lastname = "Newlastname"
hash_result: str = await get_sha256(csv_row)
external_data = await save_external_data(
db_connection,
beneficiary_sophie_tifour,
csv_row,
hash_result,
professional=professional_pierre_chevalier,
)
assert external_data is not None
assert external_data.info is not None
assert external_data.data["parsed"]["beneficiary"]["lastname"] == "Newname"
assert external_data.data["parsed"]["professional"]["lastname"] == "Newlastname"
assert external_data.data["source"]["nom"] == "Newname"
assert (
external_data.hash
== "8b816e2ff8b67d36c5432f0f1eb74c66cc4c50c616296969448639799ba02368"
)
datas = await get_all_external_datas_by_beneficiary_id_and_source(
db_connection, beneficiary_sophie_tifour.id, ExternalSource.PE_FTP
)
assert len(datas) == 2
@pytest.mark.graphql
async def test_save_external_data_with_info(
gql_admin_client: AsyncClientSession,
db_connection: Connection,
beneficiary_edwina_skinner: Beneficiary,
):
data = {"test": "choucroute"}
external_data_id = await save_external_data_with_info(
gql_admin_client,
beneficiary_edwina_skinner.id,
data,
ExternalSource.PE_IO,
)
result = await db_connection.fetchrow(
"""
SELECT beneficiary_id, data, source, hash
FROM external_data
LEFT JOIN external_data_info ON external_data_id = id
WHERE id = $1
""",
external_data_id,
)
assert result
assert result["beneficiary_id"] == beneficiary_edwina_skinner.id
assert result["data"] == json.dumps(data)
assert (
result["hash"]
== "573836d0c6192bdebdaf246803617e7960a59f6c419dbc8e5dbc8912f85a4dd3"
) |
4,537 | configure parser | """Base provider module for all Lexicon providers"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
from re import Pattern
from typing import Any, Dict, List, Optional, Union
from lexicon.config import ConfigResolver, legacy_config_resolver
class Provider(ABC):
"""
This is the abstract class for all lexicon Providers.
It provides common functionality and ensures that all implemented
Providers follow a standard ducktype.
All standardized options will be provided here as defaults, but can be overwritten
by environmental variables and cli arguments.
Common options are:
action
domain
type
name
content
ttl
priority
identifier
The provider_env_cli_options will also contain any Provider specific options:
auth_username
auth_token
auth_password
...
:param config: is a ConfigResolver object that contains all the options
for this provider, merged from CLI and Env variables.
"""
def __init__(self, config: Union[ConfigResolver, Dict]):
if not isinstance(config, ConfigResolver):
# If config is a plain dict, we are in a legacy situation.
# To protect the Provider API, the legacy dict is handled in a
# correctly defined ConfigResolver.
# Also, there may be some situation where `provider` key is not set in the config.
# It should not happen when Lexicon is called from Client, as it will set itself
# this key. However, there were no automated logic if the Provider is used directly.
# So we provide this logic here.
if not config.get("provider_name") and not config.get("provider"):
config[
"provider_name"
] = __name__ # Obviously we use the module name itself.
self.config = legacy_config_resolver(config)
else:
self.config = config
# Default ttl
self.config.with_dict({"ttl": 3600})
self.provider_name = self.config.resolve(
"lexicon:provider_name"
) or self.config.resolve("lexicon:provider")
self.domain = str(self.config.resolve("lexicon:domain"))
self.domain_id = None
# Provider API: instance methods
@abstractmethod
def authenticate(self) -> None:
"""
Authenticate against provider,
Make any requests required to get the domain's id for this provider,
so it can be used in subsequent calls.
Should throw AuthenticationError or requests.HTTPError if authentication fails for any reason,
of if the domain does not exist.
"""
def cleanup(self) -> None:
"""
Clean any relevant resource before this provider instance is closed.
"""
@abstractmethod
def create_record(self, rtype: str, name: str, content: str) -> bool:
"""
Create record. If record already exists with the same content, do nothing.
"""
@abstractmethod
def list_records(
self,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
) -> List[Dict]:
"""
List all records. Return an empty list if no records found
type, name and content are used to filter records.
If possible filter during the query, otherwise filter after response is received.
"""
@abstractmethod
def update_record(
self,
identifier: Optional[str] = None,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
) -> bool:
"""
Update a record. Identifier must be specified.
"""
@abstractmethod
def delete_record(
self,
identifier: Optional[str] = None,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
) -> bool:
"""
Delete an existing record.
If record does not exist, do nothing.
If an identifier is specified, use it, otherwise do a lookup using type, name and content.
"""
# Provider API: static methods
@staticmethod
@abstractmethod
def get_nameservers() -> Union[List[str], List[Pattern]]:
"""
Return the list of nameservers for this DNS provider
"""
@staticmethod
@abstractmethod
def METHOD_NAME(parser: ArgumentParser) -> None:
"""
Configure the given parser for the provider needs
(e.g. specific CLI flags for auth)
"""
# Helpers
def _request(
self,
action: str = "GET",
url: str = "/",
data: Optional[Dict] = None,
query_params: Optional[Dict] = None,
) -> Any:
"""Execute an HTTP request against the DNS provider API"""
raise NotImplementedError(
"You must implement _request() to use _get()/_post()/_put()/_patch()/_delete() methods."
)
def _get(self, url: str = "/", query_params: Optional[Dict] = None) -> Any:
return self._request("GET", url, query_params=query_params)
def _post(
self,
url: str = "/",
data: Optional[Dict] = None,
query_params: Optional[Dict] = None,
) -> Any:
return self._request("POST", url, data=data, query_params=query_params)
def _put(
self,
url: str = "/",
data: Optional[Dict] = None,
query_params: Optional[Dict] = None,
) -> Any:
return self._request("PUT", url, data=data, query_params=query_params)
def _patch(
self,
url: str = "/",
data: Optional[Dict] = None,
query_params: Optional[Dict] = None,
) -> Any:
return self._request("PATCH", url, data=data, query_params=query_params)
def _delete(self, url: str = "/", query_params: Optional[Dict] = None) -> Any:
return self._request("DELETE", url, query_params=query_params)
def _fqdn_name(self, record_name: str) -> str:
# strip trailing period from fqdn if present
record_name = record_name.rstrip(".")
# check if the record_name is fully specified
if not record_name.endswith(self.domain):
record_name = f"{record_name}.{self.domain}"
return f"{record_name}." # return the fqdn name
def _full_name(self, record_name: str) -> str:
# strip trailing period from fqdn if present
record_name = record_name.rstrip(".")
# check if the record_name is fully specified
if not record_name.endswith(self.domain):
record_name = f"{record_name}.{self.domain}"
return record_name
def _relative_name(self, record_name: str) -> str:
# strip trailing period from fqdn if present
record_name = record_name.rstrip(".")
# check if the record_name is fully specified
if record_name.endswith(self.domain):
record_name = record_name[: -len(self.domain)]
record_name = record_name.rstrip(".")
return record_name
def _clean_TXT_record(self, record: Dict) -> Dict:
if record["type"] == "TXT":
# Some providers have quotes around the TXT records,
# so we're going to remove those extra quotes
record["content"] = record["content"][1:-1]
return record
def _get_lexicon_option(self, option: str) -> Any:
return self.config.resolve(f"lexicon:{option}")
def _get_provider_option(self, option: str) -> Any:
return self.config.resolve(f"lexicon:{self.provider_name}:{option}") |
4,538 | can be accepted | import rules
from django.db.models import Q
from pretalx.person.permissions import can_change_submissions, is_reviewer
from pretalx.submission.models import SubmissionStates
@rules.predicate
def has_submissions(user, obj):
event = obj.event
return event.submissions.filter(speakers__in=[user]).exists()
@rules.predicate
def is_speaker(user, obj):
obj = getattr(obj, "submission", obj)
return obj and user in obj.speakers.all()
@rules.predicate
def can_be_withdrawn(user, obj):
return obj and SubmissionStates.WITHDRAWN in SubmissionStates.valid_next_states.get(
obj.state, []
)
@rules.predicate
def can_be_rejected(user, obj):
return obj and SubmissionStates.REJECTED in SubmissionStates.valid_next_states.get(
obj.state, []
)
@rules.predicate
def METHOD_NAME(user, obj):
return obj and SubmissionStates.ACCEPTED in SubmissionStates.valid_next_states.get(
obj.state, []
)
@rules.predicate
def can_be_confirmed(user, obj):
return obj and SubmissionStates.CONFIRMED in SubmissionStates.valid_next_states.get(
obj.state, []
)
@rules.predicate
def can_be_canceled(user, obj):
return obj and SubmissionStates.CANCELED in SubmissionStates.valid_next_states.get(
obj.state, []
)
@rules.predicate
def can_be_removed(user, obj):
return obj and SubmissionStates.DELETED in SubmissionStates.valid_next_states.get(
obj.state, []
)
@rules.predicate
def can_be_edited(user, obj):
return obj and obj.editable
@rules.predicate
def is_review_author(user, obj):
return obj and obj.user == user
@rules.predicate
def can_be_reviewed(user, obj):
if not obj:
return False
obj = getattr(obj, "submission", obj)
phase = obj.event.active_review_phase and obj.event.active_review_phase.can_review
state = obj.state == SubmissionStates.SUBMITTED
return bool(state and phase)
@rules.predicate
def can_view_reviews(user, obj):
phase = obj.event.active_review_phase
if not phase:
return False
if phase.can_see_other_reviews == "always":
return True
if phase.can_see_other_reviews == "after_review":
return obj.reviews.filter(user=user).exists()
return False
@rules.predicate
def can_view_all_reviews(user, obj):
phase = obj.event.active_review_phase
if not phase:
return False
return phase.can_see_other_reviews == "always"
@rules.predicate
def has_reviewer_access(user, obj):
from pretalx.submission.models import Submission
obj = getattr(obj, "submission", obj)
if not isinstance(obj, Submission):
raise Exception("Incorrect use of reviewer permissions")
if user in obj.assigned_reviewers.all():
return True
phase = obj.event.active_review_phase
if not phase:
return False
if phase.proposal_visibility == "all":
return user.teams.filter(
Q(Q(all_events=True) | Q(limit_events__in=[obj.event]))
& Q(Q(limit_tracks__isnull=True) | Q(limit_tracks__in=[obj.track])),
is_reviewer=True,
).exists()
return False
@rules.predicate
def reviewer_can_change_submissions(user, obj):
return (
obj.event.active_review_phase
and obj.event.active_review_phase.can_change_submission_state
)
rules.add_perm(
"submission.accept_or_reject_submissions",
can_change_submissions | (is_reviewer & reviewer_can_change_submissions),
)
rules.add_perm("submission.perform_actions", is_speaker)
rules.add_perm("submission.withdraw_submission", can_be_withdrawn & is_speaker)
rules.add_perm(
"submission.reject_submission",
can_be_rejected
& (can_change_submissions | (is_reviewer & reviewer_can_change_submissions)),
)
rules.add_perm(
"submission.accept_submission",
METHOD_NAME
& (can_change_submissions | (is_reviewer & reviewer_can_change_submissions)),
)
rules.add_perm(
"submission.confirm_submission",
can_be_confirmed & (is_speaker | can_change_submissions),
)
rules.add_perm(
"submission.cancel_submission",
can_be_canceled & (is_speaker | can_change_submissions),
)
rules.add_perm("submission.remove_submission", can_be_removed & can_change_submissions)
rules.add_perm(
"submission.edit_submission", (can_be_edited & is_speaker) | can_change_submissions
)
rules.add_perm(
"submission.view_submission",
is_speaker | can_change_submissions | has_reviewer_access,
)
rules.add_perm("submission.review_submission", has_reviewer_access & can_be_reviewed)
rules.add_perm(
"submission.edit_review", has_reviewer_access & can_be_reviewed & is_review_author
)
rules.add_perm("submission.view_reviews", has_reviewer_access | can_change_submissions)
rules.add_perm("submission.edit_speaker_list", is_speaker | can_change_submissions)
rules.add_perm(
"submission.view_feedback",
is_speaker | can_change_submissions | has_reviewer_access,
) |
4,539 | run | """
Copyright (c) 2020-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import time
from collections import OrderedDict
from itertools import chain, cycle
from threading import Thread
from .queue import AsyncQueue, Signal, StubQueue, VoidQueue, is_stop_signal
from .timer import TimerGroup, IncrementalTimer
class PipelineStep:
def __init__(self):
self.input_queue = None
self.output_queue = VoidQueue()
self.working = False
self.timers = TimerGroup()
self.total_time = IncrementalTimer()
self.own_time = IncrementalTimer()
self._start_t = None
self._thread = None
def process(self, item):
raise NotImplementedError
def end(self):
pass
def setup(self):
pass
def start(self):
if self.input_queue is None or self.output_queue is None:
raise Exception("No input or output queue")
if self._thread is not None:
raise Exception("Thread is already running")
self._thread = Thread(target=self.METHOD_NAME)
self._thread.start()
self.working = True
def join(self):
self.input_queue.put(Signal.STOP)
self._thread.join()
self._thread = None
self.working = False
def METHOD_NAME(self):
self._start_t = time.time()
self.setup()
self.total_time = IncrementalTimer()
self.own_time = IncrementalTimer()
while True:
self.total_time.tick()
item = self.input_queue.get()
if self._check_output(item):
break
self.own_time.tick()
output = self.process(item)
self.own_time.tock()
if self._check_output(output):
break
self.total_time.tock()
self.input_queue.task_done()
self.output_queue.put(output)
self.input_queue.close()
self.end()
self.working = False
def _check_output(self, item):
if is_stop_signal(item):
self.output_queue.put(item)
return True
return False
class AsyncPipeline:
def __init__(self):
self.steps = OrderedDict()
self.sync_steps = OrderedDict()
self.async_step = []
self._void_queue = VoidQueue()
self._last_step = None
self._last_parallel = False
def add_step(self, name, new_pipeline_step, max_size=100, parallel=True):
new_pipeline_step.output_queue = self._void_queue
if self._last_step:
if parallel or self._last_parallel:
queue = AsyncQueue(maxsize=max_size)
else:
queue = StubQueue()
self._last_step.output_queue = queue
new_pipeline_step.input_queue = queue
else:
new_pipeline_step.input_queue = self._void_queue
if parallel:
self.steps[name] = new_pipeline_step
else:
self.sync_steps[name] = new_pipeline_step
self._last_step = new_pipeline_step
self._last_parallel = parallel
def run(self):
for step in self.steps.values():
if not step.working:
step.start()
self._run_sync_steps()
def close(self):
for step in self.steps.values():
step.input_queue.put(Signal.STOP_IMMEDIATELY)
for step in self.steps.values():
step.join()
def print_statistics(self):
log.info("Metrics report:")
for name, step in chain(self.sync_steps.items(), self.steps.items(), ):
log.info("\t{} total: {}".format(name, step.total_time))
log.info("\t{} own: {}".format(name, step.own_time))
def _run_sync_steps(self):
"""Run steps in main thread"""
if not self.sync_steps:
while not self._void_queue.finished:
pass
return
for step in self.sync_steps.values():
step.working = True
step.setup()
for step in cycle(self.sync_steps.values()):
step.total_time.tick()
item = step.input_queue.get()
if is_stop_signal(item):
step.input_queue.close()
step.output_queue.put(item)
break
step.own_time.tick()
output = step.process(item)
step.own_time.tock()
if is_stop_signal(output):
step.input_queue.close()
step.output_queue.put(output)
break
step.total_time.tock()
step.output_queue.put(output)
for step in self.sync_steps.values():
step.working = False
step.end() |
4,540 | tear down | import logging
from unittest import mock
from unittest.mock import MagicMock
from django.test import TestCase
from django.utils import timezone
from eth_account import Account
from gnosis.eth.ethereum_client import (
EthereumClient,
EthereumClientProvider,
EthereumNetwork,
)
from ...history.tests.utils import just_test_if_mainnet_node
from ...utils.redis import get_redis
from ..models import TokenList
from ..services import PriceService, PriceServiceProvider
from ..tasks import (
EthValueWithTimestamp,
calculate_token_eth_price_task,
fix_pool_tokens_task,
update_token_info_from_token_list_task,
)
from .factories import TokenFactory, TokenListFactory
from .mocks import token_list_mock
logger = logging.getLogger(__name__)
class TestTasks(TestCase):
def setUp(self) -> None:
PriceServiceProvider.del_singleton()
get_redis().flushall()
def METHOD_NAME(self) -> None:
get_redis().flushall()
@mock.patch(
"safe_transaction_service.tokens.tasks.get_ethereum_network",
return_value=EthereumNetwork.MAINNET,
)
def test_fix_pool_tokens_task(self, get_network_mock: MagicMock):
self.assertEqual(fix_pool_tokens_task.delay().result, 0)
get_network_mock.return_value = EthereumNetwork.RINKEBY
self.assertIsNone(fix_pool_tokens_task.delay().result)
@mock.patch.object(
PriceService, "get_token_eth_value", autospec=True, return_value=4815
)
@mock.patch.object(timezone, "now", return_value=timezone.now())
def test_calculate_token_eth_price_task(
self, timezone_now_mock: MagicMock, get_token_eth_value_mock: MagicMock
):
random_token_address = Account.create().address
random_redis_key = Account.create().address
expected = EthValueWithTimestamp(
get_token_eth_value_mock.return_value, timezone_now_mock.return_value
)
self.assertEqual(
calculate_token_eth_price_task.delay(
random_token_address, random_redis_key
).result,
expected,
)
# Check caching works even if we change the token_address
another_token_address = Account.create().address
self.assertEqual(
calculate_token_eth_price_task.delay(
another_token_address, random_redis_key
).result,
expected,
)
with self.settings(CELERY_ALWAYS_EAGER=False):
random_token_address = Account.create().address
random_redis_key = Account.create().address
calculate_token_eth_price_task.delay(random_token_address, random_redis_key)
def test_calculate_token_eth_price_task_without_mock(self):
mainnet_node_url = just_test_if_mainnet_node()
EthereumClientProvider.instance = EthereumClient(mainnet_node_url)
dai_address = "0x6B175474E89094C44Da98b954EedeAC495271d0F"
random_redis_key = Account.create().address
eth_value_with_timestamp = calculate_token_eth_price_task(
dai_address, random_redis_key
)
self.assertGreater(eth_value_with_timestamp.eth_value, 0.0)
pool_together_address = "0x334cBb5858417Aee161B53Ee0D5349cCF54514CF"
random_redis_key = Account.create().address
eth_value_with_timestamp = calculate_token_eth_price_task(
pool_together_address, random_redis_key
)
self.assertGreater(eth_value_with_timestamp.eth_value, 0.0)
random_token_address = Account.create().address
random_redis_key = Account.create().address
eth_value_with_timestamp = calculate_token_eth_price_task(
random_token_address, random_redis_key
)
self.assertEqual(eth_value_with_timestamp.eth_value, 0.0)
del EthereumClientProvider.instance
@mock.patch.object(
PriceService, "get_token_eth_value", autospec=True, return_value=4815
)
@mock.patch.object(
PriceService, "get_token_usd_price", autospec=True, return_value=0.0
)
@mock.patch.object(timezone, "now", return_value=timezone.now())
def test_return_last_valid_token_price(
self,
timezone_now_mock: MagicMock,
get_token_usd_price: MagicMock,
get_token_eth_value_mock: MagicMock,
):
random_token_address = Account.create().address
random_redis_key = Account.create().address
expected = EthValueWithTimestamp(
get_token_eth_value_mock.return_value, timezone_now_mock.return_value
)
self.assertEqual(
calculate_token_eth_price_task.delay(
random_token_address, random_redis_key
).result,
expected,
)
get_token_eth_value_mock.return_value = 0.0
self.assertEqual(
calculate_token_eth_price_task.delay(
random_token_address, random_redis_key, True
).result,
expected,
)
@mock.patch(
"safe_transaction_service.tokens.tasks.get_ethereum_network",
return_value=EthereumNetwork.MAINNET,
)
@mock.patch.object(
TokenList, "get_tokens", autospec=True, return_value=token_list_mock["tokens"]
)
def test_update_token_info_from_token_list_task(
self, get_tokens_mock: MagicMock, get_ethereum_network_mock: MagicMock
):
TokenListFactory()
# No tokens in database, so nothing is updated
self.assertEqual(update_token_info_from_token_list_task.delay().result, 0)
# Create random token, it won't be updated as it's not matching any token on the list
TokenFactory()
self.assertEqual(update_token_info_from_token_list_task.delay().result, 0)
# Create a token in the list, it should be updated
TokenFactory(address="0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2")
self.assertEqual(update_token_info_from_token_list_task.delay().result, 1)
# Create another token in the list, both should be updated
TokenFactory(address="0x2260FAC5E5542a773Aa44fBCfeDf7C193bc2C599")
self.assertEqual(update_token_info_from_token_list_task.delay().result, 2) |
4,541 | compute load | # -*- coding: utf-8 -*-
#
from itertools import groupby
from common.utils import get_logger
from terminal.const import ComponentLoad
logger = get_logger(__name__)
class ComputeLoadUtil:
# system status
@staticmethod
def _common_compute_system_status(value, thresholds):
if thresholds[0] <= value <= thresholds[1]:
return ComponentLoad.normal.value
elif thresholds[1] < value <= thresholds[2]:
return ComponentLoad.high.value
else:
return ComponentLoad.critical.value
@classmethod
def _compute_system_stat_status(cls, stat):
system_stat_thresholds_mapper = {
'cpu_load': [0, 5, 20],
'memory_used': [0, 85, 95],
'disk_used': [0, 80, 99]
}
system_status = {}
for stat_key, thresholds in system_stat_thresholds_mapper.items():
stat_value = getattr(stat, stat_key)
if stat_value is None:
msg = 'stat: {}, stat_key: {}, stat_value: {}'
logger.debug(msg.format(stat, stat_key, stat_value))
stat_value = 0
status = cls._common_compute_system_status(stat_value, thresholds)
system_status[stat_key] = status
return system_status
@classmethod
def METHOD_NAME(cls, stat):
if not stat:
return ComponentLoad.offline
system_status_values = cls._compute_system_stat_status(stat).values()
if ComponentLoad.critical in system_status_values:
return ComponentLoad.critical
elif ComponentLoad.high in system_status_values:
return ComponentLoad.high
else:
return ComponentLoad.normal
class TypedComponentsStatusMetricsUtil(object):
def __init__(self):
self.components = []
self.grouped_components = []
self.get_components()
def get_components(self):
from ..models import Terminal
components = Terminal.objects.filter(is_deleted=False).order_by('type')
grouped_components = groupby(components, lambda c: c.type)
grouped_components = [(i[0], list(i[1])) for i in grouped_components]
self.grouped_components = grouped_components
self.components = components
def get_metrics(self):
metrics = []
for _tp, components in self.grouped_components:
metric = {
'total': 0,
'type': _tp,
'session_active': 0,
ComponentLoad.high: [],
ComponentLoad.normal: [],
ComponentLoad.offline: [],
ComponentLoad.critical: [],
}
for component in components:
metric[component.load].append(component.name)
metric['total'] += 1
metric['session_active'] += component.get_online_session_count()
metrics.append(metric)
return metrics
class ComponentsPrometheusMetricsUtil(TypedComponentsStatusMetricsUtil):
def __init__(self):
super().__init__()
self.metrics = self.get_metrics()
@staticmethod
def convert_status_metrics(metrics):
return {
'any': metrics['total'],
'normal': metrics['normal'],
'high': metrics['high'],
'critical': metrics['critical'],
'offline': metrics['offline']
}
def get_component_status_metrics(self):
prometheus_metrics = list()
# 各组件状态个数汇总
prometheus_metrics.append('# JumpServer 各组件状态个数汇总')
status_metric_text = 'jumpserver_components_status_total{component_type="%s", status="%s"} %s'
for metric in self.metrics:
tp = metric['type']
prometheus_metrics.append(f'## 组件: {tp}')
status_metrics = self.convert_status_metrics(metric)
for status, value in status_metrics.items():
metric_text = status_metric_text % (tp, status, value)
prometheus_metrics.append(metric_text)
return prometheus_metrics
def get_component_session_metrics(self):
prometheus_metrics = list()
# 各组件在线会话数汇总
prometheus_metrics.append('# JumpServer 各组件在线会话数汇总')
session_active_metric_text = 'jumpserver_components_session_active_total{component_type="%s"} %s'
for metric in self.metrics:
tp = metric['type']
prometheus_metrics.append(f'## 组件: {tp}')
metric_text = session_active_metric_text % (tp, metric['session_active'])
prometheus_metrics.append(metric_text)
return prometheus_metrics
def get_component_stat_metrics(self):
prometheus_metrics = list()
# 各组件节点指标
prometheus_metrics.append('# JumpServer 各组件一些指标')
state_metric_text = 'jumpserver_components_%s{component_type="%s", component="%s"} %s'
stats_key = [
'cpu_load', 'memory_used', 'disk_used', 'session_online'
]
old_stats_key = [
'system_cpu_load_1', 'system_memory_used_percent',
'system_disk_used_percent', 'session_active_count'
]
old_stats_key_mapper = dict(zip(stats_key, old_stats_key))
for stat_key in stats_key:
prometheus_metrics.append(f'## 指标: {stat_key}')
for component in self.components:
if not component.is_alive:
continue
component_stat = component.last_stat
if not component_stat:
continue
metric_text = state_metric_text % (
stat_key, component.type, component.name, getattr(component_stat, stat_key)
)
prometheus_metrics.append(metric_text)
old_stat_key = old_stats_key_mapper.get(stat_key)
old_metric_text = state_metric_text % (
old_stat_key, component.type, component.name, getattr(component_stat, stat_key)
)
prometheus_metrics.append(old_metric_text)
return prometheus_metrics
def get_prometheus_metrics_text(self):
prometheus_metrics = list()
for method in [
self.get_component_status_metrics,
self.get_component_session_metrics,
self.get_component_stat_metrics
]:
prometheus_metrics.extend(method())
prometheus_metrics.append('\n')
prometheus_metrics_text = '\n'.join(prometheus_metrics)
return prometheus_metrics_text |
4,542 | parse avi params | # Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache License 2.0
#!/usr/bin/python
import sys, os, json, traceback, re, time
from avi.sdk.avi_api import ApiSession
from oauth2client.client import GoogleCredentials
from googleapiclient import discovery
'''
This ControlScript is executed on the Avi Controller every time there is a
CC_IP_ATTACHED or a CC_IP_DETACHED event.
CC_IP_ATTACHED: Event is triggered when a VIP is attached to a SE
CC_IP_DETACHED: Event is triggered when a VIP is detached from a SE, usually
when a SE goes down or a scale in occurs
The goal of this script is to add a route to GCP with the destination as the
VIP and nextHopIp as the GCP instance IP on which the Avi SE is running after a
CC_IP_ATTACHED event. After a CC_IP_DETACHED event, the goal of the script is
to remove the corresponding route.
Script assumptions:
1) The Avi Controller GCP instance has scope=compute-rw to be able to modify
routes in GCP
2) 'description' field in the Avi Service Engine Group is configured as a
JSON encoded string containing GCP project, zone and network
Event details contain the Avi SE UUID and the VIP.
1) GET Avi SE object from UUID and extract Avi SE IP address (which is
the same as the GCP instance IP address) and Avi Service Engine Group link
2) GET Avi Service Engine Group object. The 'description' field in the
Service Engine Group is a JSON encoded string containing GCP project and
network URL. Extract project and network from the 'description' field
3) Extract all routes matching destRange as VIP from GCP
4) If event is CC_IP_DETACHED, remove matching route with
destRange as vip and nextHopIp as instance IP in the appr network
If event is CC_IP_ATTACHED and no matching route exists already, add a new
route with destRange as vip and nextHopIp as instance IP in appr network
'''
def METHOD_NAME(argv):
if len(argv) != 2:
return {}
script_parms = json.loads(argv[1])
return script_parms
def create_avi_endpoint():
token=os.environ.get('API_TOKEN')
user=os.environ.get('USER')
# tenant=os.environ.get('TENANT')
return ApiSession.get_session("localhost", user, token=token,
tenant='admin')
def google_compute():
credentials = GoogleCredentials.get_application_default()
return discovery.build('compute', 'v1', credentials=credentials)
def gcp_program_route(gcp, event_id, project, network, inst_ip, vip):
# List all routes for vip
result = gcp.routes().list(project=project,
filter='destRange eq %s' % vip).execute()
if (('items' not in result or len(result['items']) == 0)
and event_id == 'CC_IP_DETACHED'):
print(('Project %s destRange %s route not found' %
(project, vip)))
return
if event_id == 'CC_IP_DETACHED':
# Remove route for vip nextHop instance
for r in result['items']:
if (r['network'] == network and r['destRange'] == vip and
r['nextHopIp'] == inst_ip):
result = gcp.routes().delete(project=project,
route=r['name']).execute()
print(('Route %s delete result %s' % (r['name'], str(result))))
# Wait until done or retries exhausted
if 'name' in result:
start = int(time.time())
for i in range(0, 20):
op_result = gcp.globalOperations().get(project=project,
operation=result['name']).execute()
print(('op_result %s' % str(op_result)))
if op_result['status'] == 'DONE':
if 'error' in result:
print(('WARNING: Route delete had errors '
'result %s' % str(op_result)))
else:
print(('Route delete done result %s' %
str(op_result)))
break
if int(time.time()) - start > 20:
print(('WARNING: Wait exhausted last op_result %s' %
str(op_result)))
break
else:
time.sleep(1)
else:
print('WARNING: Unable to obtain name of route delete '
'operation')
elif event_id == 'CC_IP_ATTACHED':
# Add routes to instance
# Route names can just have - and alphanumeric chars
rt_name = re.sub('[./]+', '-', 'route-%s-%s' % (inst_ip, vip))
route = {'name': rt_name,
'destRange': vip, 'network': network,
'nextHopIp': inst_ip}
result = gcp.routes().insert(project=project,
body=route).execute()
print(('Route VIP %s insert result %s' %
(vip, str(result))))
def handle_cc_alert(session, gcp, script_parms):
se_name = script_parms['obj_name']
print(('Event Se %s %s' % (se_name, str(script_parms))))
if len(script_parms['events']) == 0:
print ('WARNING: No events in alert')
return
# GET SE object from Avi for instance IP address and SE Group link
rsp = session.get('serviceengine?uuid=%s' %
script_parms['events'][0]['event_details']['cc_ip_details']['se_vm_uuid'])
if rsp.status_code in range(200, 299):
se = json.loads(rsp.text)
if se['count'] == 0 or len(se['results']) == 0:
print(('WARNING: SE %s no results' %
script_parms['events'][0]['event_details']['cc_ip_details']['se_vm_uuid']))
return
inst_ip = next((v['ip']['ip_addr']['addr'] for v in
se['results'][0]['mgmt_vnic']['vnic_networks']
if v['ip']['mask'] == 32 and v['mode'] != 'VIP'), '')
if not inst_ip:
print(('WARNING: Unable to find IP with mask 32 SE %s' % str(se['results'][0])))
return
# GET SE Group object for GCP project, zones and network
# https://localhost/api/serviceenginegroup/serviceenginegroup-99f78850-4d1f-4b7b-9027-311ad1f8c60e
seg_ref_list = se['results'][0]['se_group_ref'].split('/api/')
seg_rsp = session.get(seg_ref_list[1])
if seg_rsp.status_code in range(200, 299):
vip = '%s/32' % script_parms['events'][0]['event_details']['cc_ip_details']['ip']['addr']
seg = json.loads(seg_rsp.text)
descr = json.loads(seg.get('description', '{}'))
project = descr.get('project', '')
network = descr.get('network', '')
if not project or not network:
print(('WARNING: Project, Network is required descr %s' %
str(descr)))
return
gcp_program_route(gcp, script_parms['events'][0]['event_id'],
project, network, inst_ip, vip)
else:
print(('WARNING: Unable to retrieve SE Group %s status %d' %
(se['results'][0]['se_group_ref'], seg_rsp.status_code)))
return
else:
print(('WARNING: Unable to retrieve SE %s' %
script_parms['events'][0]['obj_uuid']))
# Script entry
if __name__ == "__main__":
script_parms = METHOD_NAME(sys.argv)
try:
admin_session = create_avi_endpoint()
gcp = google_compute()
handle_cc_alert(admin_session, gcp, script_parms)
except Exception:
print(('WARNING: Exception with Avi/Gcp route %s' %
traceback.format_exc())) |
4,543 | test01 create | import pytest
import drjit as dr
import mitsuba as mi
def METHOD_NAME(variant_scalar_rgb):
s = mi.load_dict({"type" : "cube"})
assert s is not None
assert s.primitive_count() == 12
assert dr.allclose(s.surface_area(), 24.0)
def test02_bbox(variant_scalar_rgb):
for r in [1, 2, 4]:
s = mi.load_dict({
"type" : "cube",
"to_world" : mi.Transform4f.scale([r, r, r])
})
b = s.bbox()
assert b.valid()
assert dr.allclose(b.center(), [0, 0, 0])
assert dr.allclose(b.min, [-r, -r, -r])
assert dr.allclose(b.max, [r, r, r])
def test03_ray_intersect(variant_scalar_rgb):
for scale in [mi.Vector3f([1.0, 1.0, 1.0]), mi.Vector3f([2.0, 1.0, 1.0]),
mi.Vector3f([1.0, 2.0, 1.0])]:
for coordsX in [-1.5, -0.9, -0.5, 0, 0.5, 0.9, 1.5]:
for coordsY in [-1.5, -0.9, -0.5, 0, 0.5, 0.9, 1.5]:
s = mi.load_dict({
"type" : "scene",
"foo" : {
"type" : 'cube',
"to_world": mi.Transform4f.scale(scale),
}
})
ray = mi.Ray3f(o=[coordsX, coordsY, -8], d=[0.0, 0.0, 1.0],
time=0.0, wavelengths=[])
si_found = s.ray_test(ray)
assert si_found == ((abs(coordsX) <= scale.x) and (abs(coordsY) <= scale.y))
if si_found:
si = s.ray_intersect(ray)
ray_u = mi.Ray3f(ray)
ray_v = mi.Ray3f(ray)
eps = 1e-4
ray_u.o += si.dp_du * eps
ray_v.o += si.dp_dv * eps
si_u = s.ray_intersect(ray_u)
si_v = s.ray_intersect(ray_v)
if si_u.is_valid():
du = (si_u.uv - si.uv) / eps
assert dr.allclose(du, [1, 0], atol=2e-2)
if si_v.is_valid():
dv = (si_v.uv - si.uv) / eps
assert dr.allclose(dv, [0, 1], atol=2e-2)
# Check normal
assert si.n == mi.Vector3f([0,0,-1])
# Check UV
oo = (coordsY - (-scale.y)) / ((scale.y) - (-scale.y))
tt = (coordsX - (-scale.x)) / (scale.x - (-scale.x ))
assert dr.allclose(si.uv, mi.Vector2f([1.0-oo, tt]), atol=1e-5, rtol=1e-5)
def test04_ray_intersect_vec(variant_scalar_rgb):
from mitsuba.scalar_rgb.test.util import check_vectorization
def kernel(o):
scene = mi.load_dict({
"type" : "scene",
"foo" : {
"type" : "cube",
"to_world" : mi.ScalarTransform4f.scale((0.5, 0.5, 0.5))
}
})
o = 2.0 * o - 1.0
o.z = -8.0
t = scene.ray_intersect(mi.Ray3f(o, [0, 0, 1])).t
dr.eval(t)
return t
check_vectorization(kernel, arg_dims = [3], atol=1e-5)
def test05_check_normals(variant_scalar_rgb):
s = mi.load_dict({
"type" : "scene",
"foo" : {
"type" : 'cube'
}
})
for face in [
(mi.Vector3f(0,0,-8), mi.Vector3f(0,0,1), mi.Vector3f(0,0,-1)),
(mi.Vector3f(0,0,8), mi.Vector3f(0,0,-1), mi.Vector3f(0,0,1)),
(mi.Vector3f(-8,0,0), mi.Vector3f(1,0,0), mi.Vector3f(-1,0,0)),
(mi.Vector3f(8,0,0), mi.Vector3f(-1,0,0), mi.Vector3f(1,0,0)),
(mi.Vector3f(0,-8,0), mi.Vector3f(0,1,0), mi.Vector3f(0,-1,0)),
(mi.Vector3f(0,8,0), mi.Vector3f(0,-1,0), mi.Vector3f(0,1,0)),
]:
ray = mi.Ray3f(o=face[0], d=face[1],
time=0.0, wavelengths=[])
si = s.ray_intersect(ray)
assert si.n == face[2]
ss = mi.load_dict({
"type" : "scene",
"foo" : {
"type" : 'cube',
"flip_normals": True
}
})
for face in [
(mi.Vector3f(0,0,-8), mi.Vector3f(0,0,1), mi.Vector3f(0,0,1)),
(mi.Vector3f(0,0,8), mi.Vector3f(0,0,-1), mi.Vector3f(0,0,-1)),
(mi.Vector3f(-8,0,0), mi.Vector3f(1,0,0), mi.Vector3f(1,0,0)),
(mi.Vector3f(8,0,0), mi.Vector3f(-1,0,0), mi.Vector3f(-1,0,0)),
(mi.Vector3f(0,-8,0), mi.Vector3f(0,1,0), mi.Vector3f(0,1,0)),
(mi.Vector3f(0,8,0), mi.Vector3f(0,-1,0), mi.Vector3f(0,-1,0)),
]:
ray = mi.Ray3f(o=face[0], d=face[1],
time=0.0, wavelengths=[])
si2 = ss.ray_intersect(ray)
assert si2.n == face[2] |
4,544 | check timestamps | #!/usr/bin/env python3
import h5py
import numpy as np
import pandas as pd
from pprint import pprint
from tqdm import tqdm
tqdm.pandas()
import matplotlib.pyplot as plt
plt.style.use('../../pygama/clint.mpl')
from pygama import DataGroup
import pygama.io.lh5 as lh5
def main():
"""
sandbox code for exploring LH5 file groups, loading channel maps, etc.
"""
test_datagroup()
def test_datagroup():
"""
current columns:
['unique_key', 'run', 'label', 'YYYYmmdd', 'hhmmss', 'rtp', 'daq_dir',
'daq_file', 'cmap', 'runtype', 'raw_file', 'raw_path', 'dsp_file',
'dsp_path', 'hit_file', 'hit_path', 'daq_size_GB', 'proc_group']
"""
dg = DataGroup('LPGTA.json', load=True)
query = "run==30 and rtp == 'calib' and proc_group==35"
dg.fileDB.query(query, inplace=True)
# dg.fileDB = dg.fileDB[-1:]
# print(dg.fileDB.columns)
# show what has been selected
view_cols = ['run', 'label', 'YYYYmmdd', 'hhmmss', 'rtp', 'cmap', 'runtype',
'daq_size_GB', 'proc_group']
# print(dg.fileDB[view_cols].to_string())
raw_path, raw_file = dg.fileDB[['raw_path','raw_file']].iloc[0]
f_raw = f'{dg.lh5_dir}/{raw_path}/{raw_file}'
if "sysn" in f_raw:
tmp = {'sysn' : 'geds'} # hack for lpgta
f_raw = f_raw.format_map(tmp)
# f_dsp = f_dsp.format_map(tmp)
# check_lh5_groups(f_raw)
# load_raw_data_example(f_raw)
METHOD_NAME(f_raw)
def check_lh5_groups(f_lh5):
"""
useful but verbose.
open an LH5 file store and identify all groups, datatypes, etc.
"""
def print_attrs(name, obj):
print(name) # show group name only
# show attributes (lh5 datatypes)
for key, val in obj.attrs.items():
print(f" {key}: {val}")
f = h5py.File(f_lh5, 'r')
f.visititems(print_attrs)
def load_raw_data_example(f_raw):
"""
make a plot of the timestamps in a particular channel.
instead of accessing just the timestamp column, this is an example
of accessing the entire raw file (including waveforms) with LH5.
"""
sto = lh5.Store()
tb_name = 'g024/raw'
n_rows = 100 # np.inf to read all
# method 1: call load_nda to pull out only timestamp column (fast)
# par_data = lh5.load_nda([f_raw], ['timestamp'], tb_name)
# pprint(par_data)
# print(par_data['timestamp'].shape)
# exit()
# method 2: read all data, just to give a longer example of what we can access
# TODO: include an example of slicing/selecting rows with np.where
# read non-wf cols (lh5 Arrays)
data_raw, n_tot = sto.read_object(tb_name, f_raw, n_rows=n_rows)
# declare output table (must specify n_rows for size)
tb_raw = lh5.Table(size=n_tot)
for col in data_raw.keys():
if col in ['waveform','tracelist']: continue
# copy all values
newcol = lh5.Array(data_raw[col].nda, attrs=data_raw[col].attrs)
# copy a selection (using np.where)
# newcol = lh5.Array(data_raw[col].nda[idx], attrs=data_raw[col].attrs)
tb_raw.add_field(col, newcol)
df_raw = tb_raw.get_dataframe()
print(df_raw)
# load waveform column (nested LH5 Table)
data_wfs, n_tot = sto.read_object(tb_name+'/waveform', f_raw, n_rows=n_rows)
tb_wfs = lh5.Table(size=n_tot)
for col in data_wfs.keys():
attrs = data_wfs[col].attrs
if isinstance(data_wfs[col], lh5.ArrayOfEqualSizedArrays):
# idk why i can't put the filtered array into the constructor
aoesa = lh5.ArrayOfEqualSizedArrays(attrs=attrs, dims=[1,1])
aoesa.nda = data_wfs[col].nda
# aoesa.nda = data_wfs[col].nda[idx] # with np.where selection
newcol = aoesa
else:
newcol = lh5.Array(data_wfs[col].nda, attrs=attrs)
# newcol = lh5.Array(data_wfs[col].nda[idx], attrs=attrs) # selection
tb_wfs.add_field(col, newcol)
tb_wfs.add_field('waveform', newcol)
tb_wfs.attrs = data_raw.attrs
# can write to file, to read back in for DSP, etc.
# sto.write_object(tb_raw, grp_data, f_peak)
print(tb_wfs)
print(tb_wfs['waveform'].shape)
def METHOD_NAME(f_raw):
"""
fc daq timestamps are in seconds, from beginning of file:
https://github.com/legend-exp/pygama/blob/master/pygama/io/fcdaq.py#L27
"""
ts = lh5.load_nda([f_raw], ['timestamp'], 'g024/raw')['timestamp']
print(ts)
print(ts.shape)
print(f'first: {ts[0]} {min(ts)} last: {ts[-1]} {max(ts)}')
rt = ts[-1] / 60 # runtime in min
plt.plot(np.arange(len(ts)), ts, '.b', label=f'runtime: {rt:.1f} min')
plt.xlabel('entry', ha='right', x=1)
plt.ylabel('timestamp', ha='right', y=1)
plt.legend()
plt.savefig('./plots/ts_check.png', dpi=100)
if __name__=='__main__':
main()
|
4,545 | get employee from user | # Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from collections import Counter
import frappe
from frappe import _
from frappe.desk.form.assign_to import add as add_assignment
from frappe.model.document import Document
from frappe.share import add_docshare
from frappe.utils import get_url, getdate, now
from frappe.utils.verified_command import get_signed_params
class Appointment(Document):
def find_lead_by_email(self):
lead_list = frappe.get_list(
"Lead", filters={"email_id": self.customer_email}, ignore_permissions=True
)
if lead_list:
return lead_list[0].name
return None
def find_customer_by_email(self):
customer_list = frappe.get_list(
"Customer", filters={"email_id": self.customer_email}, ignore_permissions=True
)
if customer_list:
return customer_list[0].name
return None
def before_insert(self):
number_of_appointments_in_same_slot = frappe.db.count(
"Appointment", filters={"scheduled_time": self.scheduled_time}
)
number_of_agents = frappe.db.get_single_value("Appointment Booking Settings", "number_of_agents")
if not number_of_agents == 0:
if number_of_appointments_in_same_slot >= number_of_agents:
frappe.throw(_("Time slot is not available"))
# Link lead
if not self.party:
lead = self.find_lead_by_email()
customer = self.find_customer_by_email()
if customer:
self.appointment_with = "Customer"
self.party = customer
else:
self.appointment_with = "Lead"
self.party = lead
def after_insert(self):
if self.party:
# Create Calendar event
self.auto_assign()
self.create_calendar_event()
else:
# Set status to unverified
self.status = "Unverified"
# Send email to confirm
self.send_confirmation_email()
def send_confirmation_email(self):
verify_url = self._get_verify_url()
template = "confirm_appointment"
args = {
"link": verify_url,
"site_url": frappe.utils.get_url(),
"full_name": self.customer_name,
}
frappe.sendmail(
recipients=[self.customer_email],
template=template,
args=args,
subject=_("Appointment Confirmation"),
)
if frappe.session.user == "Guest":
frappe.msgprint(_("Please check your email to confirm the appointment"))
else:
frappe.msgprint(
_("Appointment was created. But no lead was found. Please check the email to confirm")
)
def on_change(self):
# Sync Calendar
if not self.calendar_event:
return
cal_event = frappe.get_doc("Event", self.calendar_event)
cal_event.starts_on = self.scheduled_time
cal_event.save(ignore_permissions=True)
def set_verified(self, email):
if not email == self.customer_email:
frappe.throw(_("Email verification failed."))
# Create new lead
self.create_lead_and_link()
# Remove unverified status
self.status = "Open"
# Create calender event
self.auto_assign()
self.create_calendar_event()
self.save(ignore_permissions=True)
frappe.db.commit()
def create_lead_and_link(self):
# Return if already linked
if self.party:
return
lead = frappe.get_doc(
{
"doctype": "Lead",
"lead_name": self.customer_name,
"email_id": self.customer_email,
"phone": self.customer_phone_number,
}
)
if self.customer_details:
lead.append(
"notes",
{
"note": self.customer_details,
"added_by": frappe.session.user,
"added_on": now(),
},
)
lead.insert(ignore_permissions=True)
# Link lead
self.party = lead.name
def auto_assign(self):
existing_assignee = self.get_assignee_from_latest_opportunity()
if existing_assignee:
# If the latest opportunity is assigned to someone
# Assign the appointment to the same
self.assign_agent(existing_assignee)
return
if self._assign:
return
available_agents = _get_agents_sorted_by_asc_workload(getdate(self.scheduled_time))
for agent in available_agents:
if _check_agent_availability(agent, self.scheduled_time):
self.assign_agent(agent[0])
break
def get_assignee_from_latest_opportunity(self):
if not self.party:
return None
if not frappe.db.exists("Lead", self.party):
return None
opporutnities = frappe.get_list(
"Opportunity",
filters={
"party_name": self.party,
},
ignore_permissions=True,
order_by="creation desc",
)
if not opporutnities:
return None
latest_opportunity = frappe.get_doc("Opportunity", opporutnities[0].name)
assignee = latest_opportunity._assign
if not assignee:
return None
assignee = frappe.parse_json(assignee)[0]
return assignee
def create_calendar_event(self):
if self.calendar_event:
return
appointment_event = frappe.get_doc(
{
"doctype": "Event",
"subject": " ".join(["Appointment with", self.customer_name]),
"starts_on": self.scheduled_time,
"status": "Open",
"type": "Public",
"send_reminder": frappe.db.get_single_value("Appointment Booking Settings", "email_reminders"),
"event_participants": [
dict(reference_doctype=self.appointment_with, reference_docname=self.party)
],
}
)
employee = METHOD_NAME(self._assign)
if employee:
appointment_event.append(
"event_participants", dict(reference_doctype="Employee", reference_docname=employee.name)
)
appointment_event.insert(ignore_permissions=True)
self.calendar_event = appointment_event.name
self.save(ignore_permissions=True)
def _get_verify_url(self):
verify_route = "/book_appointment/verify"
params = {"email": self.customer_email, "appointment": self.name}
return get_url(verify_route + "?" + get_signed_params(params))
def assign_agent(self, agent):
if not frappe.has_permission(doc=self, user=agent):
add_docshare(self.doctype, self.name, agent, flags={"ignore_share_permission": True})
add_assignment({"doctype": self.doctype, "name": self.name, "assign_to": [agent]})
def _get_agents_sorted_by_asc_workload(date):
appointments = frappe.get_all("Appointment", fields="*")
agent_list = _get_agent_list_as_strings()
if not appointments:
return agent_list
appointment_counter = Counter(agent_list)
for appointment in appointments:
assigned_to = frappe.parse_json(appointment._assign)
if not assigned_to:
continue
if (assigned_to[0] in agent_list) and getdate(appointment.scheduled_time) == date:
appointment_counter[assigned_to[0]] += 1
sorted_agent_list = appointment_counter.most_common()
sorted_agent_list.reverse()
return sorted_agent_list
def _get_agent_list_as_strings():
agent_list_as_strings = []
agent_list = frappe.get_doc("Appointment Booking Settings").agent_list
for agent in agent_list:
agent_list_as_strings.append(agent.user)
return agent_list_as_strings
def _check_agent_availability(agent_email, scheduled_time):
appointemnts_at_scheduled_time = frappe.get_all(
"Appointment", filters={"scheduled_time": scheduled_time}
)
for appointment in appointemnts_at_scheduled_time:
if appointment._assign == agent_email:
return False
return True
def METHOD_NAME(user):
employee_docname = frappe.db.get_value("Employee", {"user_id": user})
if employee_docname:
return frappe.get_doc("Employee", employee_docname)
return None |
4,546 | test full cec2 pipeline | """Regression Tests for CEC2"""
# pylint: disable=too-many-locals invalid-name
from __future__ import annotations
from typing import Final
import numpy as np
from cpuinfo import get_cpu_info
from omegaconf import OmegaConf
from scipy.io import wavfile
from clarity.data.scene_renderer_cec2 import SceneRenderer
from clarity.enhancer.compressor import Compressor
from clarity.enhancer.nalr import NALR
from clarity.evaluator.haspi import haspi_v2_be
from clarity.utils.audiogram import Audiogram, Listener
# Pass some random data through code and compare with reference output
# scene_renderer, enhancer, compressor, haspi
CPUINFO: Final = get_cpu_info()
np.random.seed(0)
# Set up some scene to simulate
# It's been designed to get good code coverage but running quickly
# - Using three maskers - one from each noise type
# - Using a short target with reduce pre and post silence
# - Only generating 2 hearing aid channels
SCENE: Final = {
"dataset": "train",
"room": "R06001",
"scene": "S06001",
"target": {"name": "T010_G0N_02468", "time_start": 0, "time_end": 115894},
"duration": 8820,
"interferers": [
{
"position": 1,
"time_start": 0,
"time_end": 8820,
"type": "noise",
"name": "CIN_fan_014.wav",
"offset": 5376,
},
{
"position": 2,
"time_start": 0,
"time_end": 8820,
"type": "speech",
"name": "som_04766_05.wav",
"offset": 40000,
},
{
"position": 3,
"time_start": 0,
"time_end": 8820,
"type": "music",
"name": "1111967.low.mp3",
"offset": 842553,
},
],
"SNR": 0.0,
"listener": {
"rotation": [
{"sample": 100, "angle": 52.3628},
{"sample": 400, "angle": 38.5256},
],
"hrir_filename": ["VP_N6-ED", "VP_N6-BTE_fr"],
},
}
TEST_PATHS: Final = OmegaConf.create(
{
"hoairs": "tests/test_data/rooms/HOA_IRs",
"hrirs": "tests/test_data/hrir/HRIRs_MAT",
"scenes": "tests/test_data/clarity_data/train/scenes",
"targets": "tests/test_data/targets",
"interferers": "tests/test_data/interferers/{type}",
}
)
TEST_METADATA: Final = OmegaConf.create(
{
"room_definitions": "tests/test_data/metadata/rooms.train.json",
"scene_definitions": "", # Scene definition file not needed for test
"hrir_metadata": "tests/test_data/metadata/hrir_data.json",
}
)
SCENE_RENDERER: Final = SceneRenderer(
TEST_PATHS,
TEST_METADATA,
ambisonic_order=6,
equalise_loudness=True,
reference_channel=1,
channel_norms=[12.0, 3.0],
)
def METHOD_NAME(
regtest,
tmp_path,
scene: dict | None = None,
scene_renderer: SceneRenderer = SCENE_RENDERER,
) -> None:
"""Test full CEC2 pipeline"""
if scene is None:
scene = SCENE
target, interferers, anechoic, _head_turn = scene_renderer.generate_hoa_signals(
scene
)
scene_renderer.generate_binaural_signals(
scene, target, interferers, anechoic, str(tmp_path)
)
_, reference = wavfile.read(f"{tmp_path}/S06001_target_anechoic_CH1.wav")
_, signal = wavfile.read(f"{tmp_path}/S06001_mix_CH1.wav")
reference = reference.astype(float) / 32768.0
signal = signal.astype(float) / 32768.0
# Truncate to 200 ms - i.e. just use part of the signals
# to speed up the HASPI calculation a little
signal = signal[:8820, :]
reference = reference[:8820, :]
# The data below doesn't really need to be meaningful.
# The purpose of the test is not to see if the haspi score is reasonable
# but just to check that the results do not change unexpectedly across releases.
nalr_cfg = {"nfir": 220, "sample_rate": 44100}
compressor_cfg = {
"threshold": 0.35,
"attenuation": 0.1,
"attack": 50,
"release": 1000,
"rms_buffer_size": 0.064,
}
audiogram_l = np.array([45, 50, 60, 65, 60, 65, 70, 80])
audiogram_r = np.array([45, 45, 60, 70, 60, 60, 80, 80])
audiogram_cfs = np.array([250, 500, 1000, 2000, 3000, 4000, 6000, 8000])
audiogram_left = Audiogram(levels=audiogram_l, frequencies=audiogram_cfs)
audiogram_right = Audiogram(levels=audiogram_r, frequencies=audiogram_cfs)
sample_rate = 44100
enhancer = NALR(**nalr_cfg)
compressor = Compressor(**compressor_cfg) # type: ignore
nalr_fir, _ = enhancer.build(audiogram_left)
out_l = enhancer.apply(nalr_fir, signal[:, 0])
nalr_fir, _ = enhancer.build(audiogram_right)
out_r = enhancer.apply(nalr_fir, signal[:, 1])
out_l, _, _ = compressor.process(out_l)
out_r, _, _ = compressor.process(out_r)
enhanced_audio = np.stack([out_l, out_r], axis=1)
enhanced_audio = np.tanh(enhanced_audio)
listener = Listener(audiogram_left=audiogram_left, audiogram_right=audiogram_right)
sii_enhanced = haspi_v2_be(
reference_left=reference[:, 0],
reference_right=reference[:, 1],
processed_left=enhanced_audio[:, 0],
processed_right=enhanced_audio[:, 1],
sample_rate=sample_rate,
listener=listener,
)
regtest.write(f"Enhanced audio HASPI score is {sii_enhanced:0.7f}\n")
# Enhanced audio HASPI score is 0.2994066 |
4,547 | icon | """
iconfont provides a barebones system to get QIcons from icon fonts (like Font Awesome).
The inspiration and methodology come from the 'QtAwesome' module, which does exactly this, but
is a little too big, complicated, and flexible for PyDM's needs.
"""
import json
import os
import sys
from typing import Optional
from qtpy import QtGui, QtWidgets
from qtpy.QtCore import QPoint, QRect, Qt, qRound
from qtpy.QtGui import (QColor, QFont, QFontDatabase, QIcon, QIconEngine,
QPainter, QPixmap)
if sys.version_info[0] == 3:
unichr = chr
class IconFont(object):
"""IconFont represents an icon font. Users will generally want
to use IconFont.icon() to get a QIcon object for the character they want."""
__instance = None
def __init__(self):
if self.__initialized:
return
self.font_file = "fontawesome.otf" # specify these relative to this file.
self.charmap_file = "fontawesome-charmap.json"
self.font_name = None
self.char_map = {}
self.loaded_fonts = {}
self.__initialized = True
def __new__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = object.__new__(IconFont)
cls.__instance.__initialized = False
return cls.__instance
def load_font(self, ttf_filename, charmap_filename):
"""
Load font from ``ttf_filename`` with a mapping defined in
``charmap_filename``.
"""
def hook(obj):
result = {}
for key in obj:
result[key] = unichr(int(obj[key], 16))
return result
if self.char_map:
return
cache_key = ttf_filename + "|" + charmap_filename
ttf_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)), ttf_filename)
font_id = QFontDatabase.addApplicationFont(ttf_fname)
if font_id >= 0:
font_families = QFontDatabase.applicationFontFamilies(font_id)
else:
cache = self.loaded_fonts.get(cache_key, None)
if cache is None:
raise OSError("Could not load ttf file for icon font.")
self.char_map = cache["char_map"]
self.font_name = cache["font_name"]
return
self.font_name = font_families[0]
filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), charmap_filename
)
with open(filename, 'r') as codes:
self.char_map = json.load(codes, object_hook=hook)
self.loaded_fonts[cache_key] = {
"char_map": self.char_map,
"font_name": self.font_name,
}
def get_char_for_name(self, name: str) -> str:
"""
Get a character icon for the given name from the character map.
Parameters
----------
name : str
The user-friendly icon name.
Returns
-------
str
The Qt-facing icon text to use with the font.
"""
if name in self.char_map:
return self.char_map[name]
raise ValueError("Invalid icon name for font.")
def _load_font_if_needed(self) -> bool:
"""
Load the configured font if a QApplication is available
and the font was not already loaded.
Returns
-------
bool
Readiness indicator - return True if the font has been loaded.
"""
if QtWidgets.QApplication.instance() is None:
return False
if not self.char_map:
self.load_font(self.font_file, self.charmap_file)
# If it was loaded correctly, the char map will be populated:
return bool(self.char_map)
def font(self, size: int) -> Optional[QtGui.QFont]:
"""
Load the font at a given pixel size.
Returns
-------
QtGui.QFont or None
The font, if available. If a QApplication is not yet created,
None will be returned.
"""
if not self._load_font_if_needed():
return None
font = QFont(self.font_name)
font.setPixelSize(int(size))
return font
def METHOD_NAME(self, name, color=None) -> Optional[QtGui.QIcon]:
"""
Retrieve the icon given a name and color.
Parameters
----------
name : str
The Icon string identifier.
Icon strings can be found at: https://fontawesome.com/icons?d=gallery
color : QColor, Optional
The base color to use when constructing the Icon. Default is QColor(90, 90, 90).
Returns
-------
QIcon
The desired Icon. ``None`` if a QApplication is not yet available.
"""
if not self._load_font_if_needed():
return None
char = self.get_char_for_name(name)
engine = CharIconEngine(self, char, color)
return QIcon(engine)
class CharIconEngine(QIconEngine):
"""Subclass of QIconEngine that is designed to draw characters from icon fonts."""
def __init__(self, icon_font, char, color=None):
super(CharIconEngine, self).__init__()
self.icon_font = icon_font
self.char = char
if color is None:
self._base_color = QColor(90, 90, 90)
else:
self._base_color = color
self._disabled_color = QColor.fromHslF(self._base_color.hueF(), self._base_color.saturationF(),
max(min(self._base_color.lightnessF() + 0.25, 1.0), 0.0))
def paint(self, painter, rect, mode, state):
painter.save()
if mode == QIcon.Disabled:
color = self._disabled_color
else:
color = self._base_color
painter.setPen(color)
scale_factor = 1.0
draw_size = 0.875 * qRound(rect.height() * scale_factor)
painter.setFont(self.icon_font.font(draw_size))
painter.setOpacity(1.0)
painter.drawText(rect, int(Qt.AlignCenter | Qt.AlignVCenter), self.char)
painter.restore()
def pixmap(self, size, mode, state):
pm = QPixmap(size)
pm.fill(Qt.transparent)
self.paint(QPainter(pm), QRect(QPoint(0, 0), size), mode, state)
return pm |
4,548 | cond | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the swig wrapper tf_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.grappler import item as gitem
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class PyWrapOptimizeGraphTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
"""Make sure arguments can be passed correctly."""
a = constant_op.constant(10, name='a')
b = constant_op.constant(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
# Being a train_op will make 'd' to be added as a fetch node.
train_op.append(d)
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
config = config_pb2.ConfigProto()
rewriter_config = config.graph_options.rewrite_options
rewriter_config.optimizers.append('constfold')
rewriter_config.min_graph_nodes = -1
graph = tf_optimizer.OptimizeGraph(config, mg)
self.assertEqual(len(graph.node), 1)
self.assertItemsEqual([node.name for node in graph.node], ['d'])
@test_util.run_v1_only('b/120545219')
def testKeepNodes(self):
g = ops.Graph()
with g.as_default():
a1 = variables.VariableV1(
1.0) # Must be preserved since it's in the collection 'variables'.
a2 = constant_op.constant(0, shape=[50, 50], name='keep')
ops.add_to_collection('a2', a2) # Explicitly add to collection.
with g._attr_scope(
{'_grappler_do_not_remove': attr_value_pb2.AttrValue(b=True)}):
a3 = constant_op.constant(0, name='keep2')
b = constant_op.constant(1, shape=[100, 10])
c = constant_op.constant(0, shape=[10, 30])
d = math_ops.matmul(b, c)
ops.add_to_collection('train_op', d) # d is the fetch node.
# Optimize the graph.
mg = meta_graph.create_meta_graph_def(graph=g)
config = config_pb2.ConfigProto()
rewriter_config = config.graph_options.rewrite_options
rewriter_config.min_graph_nodes = -1
optimized_graph = tf_optimizer.OptimizeGraph(config, mg)
# Check that the nodes referenced in various collections have been preserved
optimized_graph_nodes = [node.name for node in optimized_graph.node]
expected_nodes = [
d.op.name, a1.op.name, a2.op.name, a3.op.name, 'Variable/initial_value',
'Variable/Assign'
]
self.assertEqual(len(optimized_graph_nodes), len(expected_nodes))
self.assertAllInSet(optimized_graph_nodes, expected_nodes)
@test_util.run_v1_only('b/120545219')
def testLoops(self):
g = ops.Graph()
with g.as_default():
def METHOD_NAME(_, counter):
return counter < end
def _Body(buf, counter):
buf = array_ops.concat([buf, [counter]], 0)
counter += 1
return [buf, counter]
start = array_ops.placeholder(shape=[], dtype=dtypes.int32)
end = array_ops.placeholder(shape=[], dtype=dtypes.int32)
init_buf = array_ops.zeros(shape=[0], dtype=dtypes.int32)
loop_vars = [init_buf, start]
shape_inv = [
tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([])
]
buf, _ = control_flow_ops.while_loop(METHOD_NAME, _Body, loop_vars, shape_inv)
f = -array_ops.ones_like(buf, optimize=False)
buf_shape = array_ops.shape(buf)
f_shape = array_ops.shape(f)
ops.add_to_collection('train_op', buf_shape)
ops.add_to_collection('train_op', f_shape)
# Optimize the graph.
mg = meta_graph.create_meta_graph_def(graph=g)
config = config_pb2.ConfigProto()
rewriter_config = config.graph_options.rewrite_options
rewriter_config.min_graph_nodes = -1
optimized_graph = tf_optimizer.OptimizeGraph(config, mg)
mg.graph_def.CopyFrom(optimized_graph)
# Check that the nodes referenced in various collections have been preserved
item = gitem.Item(mg)
props = item.GetOpProperties()
buf_prop = props[buf.op.name]
f_prop = props[f.op.name]
self.assertEqual(buf_prop, f_prop)
if __name__ == '__main__':
test.main() |
4,549 | set up | #!/usr/bin/env python3
# group: rw
#
# Test streaming with throttle nodes on top
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import asyncio
import os
from typing import List
import iotests
from iotests import qemu_img_create, qemu_io
image_size = 256 * 1024 * 1024
base_img = os.path.join(iotests.test_dir, 'base.img')
top_img = os.path.join(iotests.test_dir, 'top.img')
class TcgVM(iotests.VM):
'''
Variant of iotests.VM that uses -accel tcg. Simply using
iotests.VM.add_args('-accel', 'tcg') is not sufficient, because that will
put -accel qtest before -accel tcg, and -accel arguments are prioritized in
the order they appear.
'''
@property
def _base_args(self) -> List[str]:
# Put -accel tcg first so it takes precedence
return ['-accel', 'tcg'] + super()._base_args
class TestStreamWithThrottle(iotests.QMPTestCase):
def METHOD_NAME(self) -> None:
'''
Create a simple backing chain between two images, write something to
the base image. Attach them to the VM underneath two throttle nodes,
one of which has actually no limits set, but the other does. Then put
a virtio-blk device on top.
This test configuration has been taken from
https://gitlab.com/qemu-project/qemu/-/issues/1215
'''
qemu_img_create('-f', iotests.imgfmt, base_img, str(image_size))
qemu_img_create('-f', iotests.imgfmt, '-b', base_img, '-F',
iotests.imgfmt, top_img, str(image_size))
# Write something to stream
qemu_io(base_img, '-c', f'write 0 {image_size}')
blockdev = {
'driver': 'throttle',
'node-name': 'throttled-node',
'throttle-group': 'thrgr-limited',
'file': {
'driver': 'throttle',
'throttle-group': 'thrgr-unlimited',
'file': {
'driver': iotests.imgfmt,
'node-name': 'unthrottled-node',
'file': {
'driver': 'file',
'filename': top_img
}
}
}
}
# Issue 1215 is not reproducible in qtest mode, which is why we need to
# create an -accel tcg VM
self.vm = TcgVM()
self.vm.add_object('iothread,id=iothr0')
self.vm.add_object('throttle-group,id=thrgr-unlimited')
self.vm.add_object('throttle-group,id=thrgr-limited,'
'x-iops-total=10000,x-bps-total=104857600')
self.vm.add_blockdev(self.vm.qmp_to_opts(blockdev))
self.vm.add_device('virtio-blk,iothread=iothr0,drive=throttled-node')
self.vm.launch()
def tearDown(self) -> None:
self.vm.shutdown()
os.remove(top_img)
os.remove(base_img)
def test_stream(self) -> None:
'''
Do a simple stream beneath the two throttle nodes. Should complete
with no problems.
'''
result = self.vm.qmp('block-stream',
job_id='stream',
device='unthrottled-node')
self.assert_qmp(result, 'return', {})
# Should succeed and not time out
try:
self.vm.run_job('stream')
except asyncio.TimeoutError:
# VM may be stuck, kill it before tearDown()
self.vm.kill()
raise
if __name__ == '__main__':
# Must support backing images
iotests.main(supported_fmts=['qcow', 'qcow2', 'qed'],
supported_protocols=['file'],
required_fmts=['throttle']) |
4,550 | test timesteps | import torch
from diffusers import LMSDiscreteScheduler
from diffusers.utils.testing_utils import torch_device
from .test_schedulers import SchedulerCommonTest
class LMSDiscreteSchedulerTest(SchedulerCommonTest):
scheduler_classes = (LMSDiscreteScheduler,)
num_inference_steps = 10
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**kwargs)
return config
def METHOD_NAME(self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_time_indices(self):
for t in [0, 500, 800]:
self.check_over_forward(time_step=t)
def test_full_loop_no_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 1006.388) < 1e-2
assert abs(result_mean.item() - 1.31) < 1e-3
def test_full_loop_with_v_prediction(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 0.0017) < 1e-2
assert abs(result_mean.item() - 2.2676e-06) < 1e-3
def test_full_loop_device(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 1006.388) < 1e-2
assert abs(result_mean.item() - 1.31) < 1e-3
def test_full_loop_device_karras_sigmas(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
model = self.dummy_model()
sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 3812.9927) < 2e-2
assert abs(result_mean.item() - 4.9648) < 1e-3 |
4,551 | pre export optional cube region | # This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import bmesh
import bpy
import mathutils
from contextlib import contextmanager
from typing import *
from PyHSPlasma import *
def affine_parts(xform):
# Decompose the matrix into the 90s-era 3ds max affine parts sillyness
# All that's missing now is something like "(c) 1998 HeadSpin" oh wait...
affine = hsAffineParts()
affine.T = hsVector3(*xform.to_translation())
affine.K = hsVector3(*xform.to_scale())
affine.F = -1.0 if xform.determinant() < 0.0 else 1.0
rot = xform.to_quaternion()
affine.Q = quaternion(rot)
rot.normalize()
affine.U = quaternion(rot)
return affine
def color(blcolor, alpha=1.0):
"""Converts a Blender Color into an hsColorRGBA"""
return hsColorRGBA(blcolor.r, blcolor.g, blcolor.b, alpha)
def matrix44(blmat):
"""Converts a mathutils.Matrix to an hsMatrix44"""
hsmat = hsMatrix44()
for i in range(4):
hsmat[i, 0] = blmat[i][0]
hsmat[i, 1] = blmat[i][1]
hsmat[i, 2] = blmat[i][2]
hsmat[i, 3] = blmat[i][3]
return hsmat
def quaternion(blquat):
"""Converts a mathutils.Quaternion to an hsQuat"""
return hsQuat(blquat.x, blquat.y, blquat.z, blquat.w)
class BMeshObject:
def __init__(self, name: str, managed: bool = True):
self._managed = managed
self._bmesh = None
self._mesh = bpy.data.meshes.new(name)
self._obj = bpy.data.objects.new(name, self._mesh)
self._obj.draw_type = "WIRE"
bpy.context.scene.objects.link(self._obj)
def __del__(self):
if self._managed:
bpy.context.scene.objects.unlink(self._obj)
bpy.data.meshes.remove(self._mesh)
def __enter__(self) -> bmesh.types.BMesh:
if self._mesh is not None:
self._bmesh = bmesh.new()
self._bmesh.from_mesh(self._mesh)
return self._bmesh
def __exit__(self, type, value, traceback):
if self._bmesh is not None:
self._bmesh.to_mesh(self._mesh)
self._bmesh.free()
self._bmesh = None
def __getattr__(self, name: str) -> Any:
return getattr(self._obj, name)
def __setattr__(self, name: str, value: Any) -> None:
# NOTE: Calling `hasattr()` will trigger infinite recursion in __getattr__(), so
# check the object dict itself for anything that we want on this instance.
d = self.__dict__
if name not in d:
obj = d.get("_obj")
if obj is not None:
if hasattr(obj, name):
setattr(obj, name, value)
return
super().__setattr__(name, value)
@property
def object(self) -> bpy.types.Object:
return self._obj
def release(self) -> bpy.types.Object:
self._managed = False
return self._obj
def create_cube_region(name: str, size: float, owner_object: bpy.types.Object) -> bpy.types.Object:
"""Create a cube shaped region object"""
region_object = BMeshObject(name)
region_object.plasma_object.enabled = True
region_object.plasma_object.page = owner_object.plasma_object.page
region_object.hide_render = True
with region_object as bm:
bmesh.ops.create_cube(bm, size=(size))
bmesh.ops.transform(
bm,
matrix=mathutils.Matrix.Translation(
owner_object.matrix_world.translation - region_object.matrix_world.translation
),
space=region_object.matrix_world, verts=bm.verts
)
return region_object.release()
@contextmanager
def METHOD_NAME(source, attr: str, name: str, size: float, owner_object: bpy.types.Object) -> Optional[bpy.types.Object]:
if getattr(source, attr) is None:
region_object = create_cube_region(name, size, owner_object)
setattr(source, attr, region_object)
try:
yield region_object
finally:
source.property_unset(attr)
else:
# contextlib.contextmanager requires for us to yield. Sad.
yield
@contextmanager
def temporary_mesh_object(source : bpy.types.Object) -> bpy.types.Object:
"""Creates a temporary mesh object from a nonmesh object that will only exist for the duration
of the context."""
assert source.type != "MESH"
obj = bpy.data.objects.new(source.name, source.to_mesh(bpy.context.scene, True, "RENDER"))
obj.draw_type = "WIRE"
obj.parent = source.parent
obj.matrix_local, obj.matrix_world = source.matrix_local, source.matrix_world
bpy.context.scene.objects.link(obj)
try:
yield obj
finally:
bpy.data.objects.remove(obj)
def transform_mesh(mesh: bpy.types.Mesh, matrix: mathutils.Matrix):
# There is a disparity in terms of how negative scaling is displayed in Blender versus how it is
# applied (Ctrl+A) in that the normals are different. Even though negative scaling is evil, we
# prefer to match the visual behavior, not the non-intuitive apply behavior. So, we'll need to
# flip the normals if the scaling is negative. The Blender documentation even "helpfully" warns
# us about this.
mesh.transform(matrix)
if matrix.is_negative:
mesh.flip_normals() |
4,552 | rand jac | """
A tool to make it easier to investigate coloring of jacobians with different sparsity structures.
"""
import sys
import numpy as np
from openmdao.utils.general_utils import printoptions
from openmdao.utils.coloring import _compute_coloring
class TotJacBuilder(object):
def __init__(self, rows, cols):
self.J = np.zeros((rows, cols), dtype=bool)
self.coloring = None
def add_random_points(self, npoints):
nrows, ncols = self.J.shape
count = 0
zro = self.J == False
flat = self.J[zro].flatten()
flat[:npoints] = True
np.random.shuffle(flat)
self.J[zro] = flat
def add_row(self, idx, density=1.0):
self.add_block(self.create_row(density=density), idx, 0)
def add_col(self, idx, density=1.0):
self.add_block(self.create_col(density=density), 0, idx)
def create_row(self, density=1.0):
return self.create_block((1, self.J.shape[1]), density=density)
def create_col(self, density=1.0):
return self.create_block((self.J.shape[0], 1), density=density)
def create_block(self, shape, density=1.0):
if density == 1.0:
return np.ones(shape, dtype=bool)
else:
rows, cols = shape
num = int((rows * cols) * density)
vec = np.zeros(int(rows * cols), dtype=bool)
vec[:num] = True
np.random.shuffle(vec)
return vec.reshape(shape)
def add_block(self, block, start_row, start_col):
rows, cols = block.shape
self.J[start_row:start_row + rows, start_col:start_col + cols] = block
def add_block_diag(self, shapes, start_row, start_col, density=1.0):
row_idx = start_row
col_idx = start_col
for shape in shapes:
self.add_block(self.create_block(shape, density=density), row_idx, col_idx)
row_idx += shape[0]
col_idx += shape[1]
def color(self, mode='auto', fname=None):
self.coloring = _compute_coloring(self.J, mode)
if self.coloring is not None and fname is not None:
self.coloring.save(fname)
return self.coloring
def show(self):
self.coloring.display_txt()
maxdeg_fwd = np.max(np.count_nonzero(self.J, axis=1))
maxdeg_rev = np.max(np.count_nonzero(self.J, axis=0))
print("Shape:", self.J.shape)
print("Density:", np.count_nonzero(self.J) / self.J.size)
print("Max degree (fwd, rev):", maxdeg_fwd, maxdeg_rev)
self.coloring.summary()
def shuffle_rows(self):
np.random.shuffle(self.J)
def density_info(self):
J = self.J
density = np.count_nonzero(J) / J.size
row_density = np.count_nonzero(J, axis=1) / J.shape[1]
max_row_density = np.max(row_density)
n_dense_rows = row_density[row_density == 1.0].size
col_density = np.count_nonzero(J, axis=0) / J.shape[0]
max_col_density = np.max(col_density)
n_dense_cols = col_density[col_density == 1.0].size
return density, max_row_density, n_dense_rows, max_col_density, n_dense_cols
@staticmethod
def make_blocks(num_blocks, min_shape, max_shape):
shapes = []
row_size = col_size = 0
min_rows, min_cols = min_shape
max_rows, max_cols = max_shape
for b in range(num_blocks):
nrows = np.random.randint(min_rows, max_rows + 1)
ncols = np.random.randint(min_cols, max_cols + 1)
shapes.append((nrows, ncols))
row_size += nrows
col_size += ncols
return shapes, row_size, col_size
@staticmethod
def make_jac(n_dense_rows=0, row_density=1.0, n_dense_cols=0, col_density=1.0,
n_blocks=0, min_shape=(1,1), max_shape=(2,2), n_random_pts=0):
if n_blocks > 0:
shapes, nrows, ncols = TotJacBuilder.make_blocks(n_blocks, min_shape, max_shape)
builder = TotJacBuilder(nrows + n_dense_rows, ncols + n_dense_cols)
builder.add_block_diag(shapes, n_dense_rows, n_dense_cols)
else:
nrows, ncols = (100, 50)
builder = TotJacBuilder(nrows, ncols)
J = builder.J
shape = J.shape
# dense rows
for row in range(n_dense_rows):
builder.add_row(row, density=row_density)
# dense cols
for col in range(n_dense_cols):
builder.add_col(col, density=col_density)
builder.add_random_points(n_random_pts)
return builder
@staticmethod
def eisenstat(n):
"""
Return a builder containing an Eisenstat's example Jacobian of size n+1 x n.
Should be colorable with n/2 + 2 colors using bidirectional coloring.
The columns in Eisenstat's example are pairwise structurally nonorthogonal,
so a fwd directional coloring would require n groups.
"""
assert n >= 6, "Eisenstat's example must have n >= 6."
assert n % 2 == 0, "Eisenstat's example must have even 'n'."
D1 = np.eye(n // 2, dtype=int)
D2 = np.eye(n // 2, dtype=int)
D3 = np.eye(n // 2, dtype=int)
B = np.ones((n // 2, n // 2), dtype=int)
idxs = np.arange(n // 2, dtype=int)
B[idxs, idxs] = 0
C = np.ones((1, n // 2), dtype=int)
O = np.zeros((1, n // 2), dtype=int)
A1 = np.hstack([D1, D2])
A2 = np.vstack([np.hstack([C, O]), np.hstack([D3, B])])
A = np.vstack([A1, A2])
builder = TotJacBuilder(n + 1, n)
builder.J[:, :] = A
return builder
def METHOD_NAME():
rnd = np.random.randint
minr = rnd(1, 10)
minc = rnd(1, 10)
return TotJacBuilder.make_jac(n_dense_rows=rnd(5), row_density=np.random.rand(),
n_dense_cols=rnd(5), col_density=np.random.rand(),
n_blocks=rnd(3,8),
min_shape=(minr,minc),
max_shape=(minr+rnd(10),minc+rnd(10)),
n_random_pts=rnd(15))
if __name__ == '__main__':
import argparse
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--eisenstat",
help="Build an Eisenstat's example matrix of size n+1 x n.",
action="store", type=int, default=-1, dest="eisenstat")
parser.add_argument("-m", "--mode", type=str, dest="mode",
help="Direction of coloring (default is auto). Only used with -e.",
default="auto")
parser.add_argument('-s', '--save', dest="save", default=None,
help="Output file for jacobian so it can be reloaded and colored using"
" various methods for comparison.")
parser.add_argument('-l', '--load', dest="load", default=None,
help="Input file for jacobian so it can be reloaded and colored using"
" various methods for comparison.")
options = parser.parse_args()
if options.load is not None:
with open(options.load, "rb") as f:
builder = pickle.load(f)
elif options.eisenstat > -1:
builder = TotJacBuilder.eisenstat(options.eisenstat)
else: # just do a random matrix
builder = METHOD_NAME()
builder.color(options.mode)
builder.show()
if options.save is not None:
with open(options.save, "wb") as f:
pickle.dump(builder, f) |
4,553 | wrap | # Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Some functions have been taken and adjusted from the quodlibet
# source. Quodlibet is (C) 2004-2005 Joe Wreschnig, Michael Urman
# and licensed under the GNU General Public License version 2.
"""
Various string utility functions. Note that these functions are not
necessarily optimised for large strings, so use with care.
"""
import re
import textwrap
import os
import time
import locale
import pydoc
# some handy time constants
SECONDS_PER_MINUTE = 60
SECONDS_PER_HOUR = 60 * SECONDS_PER_MINUTE
SECONDS_PER_DAY = 24 * SECONDS_PER_HOUR
def ascii_safe(s):
"""Get ASCII string without raising encoding errors. Unknown
characters of the given encoding will be ignored.
@param s: the string to be encoded
@type s: string or None
@return: version of s containing only ASCII characters, or None if s was None
@rtype: string or None
"""
if s:
s = s.encode('ascii', 'ignore').decode('ascii')
return s
def unquote(s, matching=False):
"""Remove leading and ending single and double quotes.
The quotes need to match if matching is True. Only one quote from each
end will be stripped.
@return: if s evaluates to False, return s as is, else return
string with stripped quotes
@rtype: unquoted string, or s unchanged if it is evaluating to False
"""
if not s:
return s
if len(s) < 2:
return s
if matching:
if s[0] in ("\"'") and s[0] == s[-1]:
s = s[1:-1]
else:
if s[0] in ("\"'"):
s = s[1:]
if s[-1] in ("\"'"):
s = s[:-1]
return s
_para_mac = r"(?:{sep})(?:(?:{sep})\s*)+".format(sep='\r')
_para_posix = r"(?:{sep})(?:(?:{sep})\s*)+".format(sep='\n')
_para_win = r"(?:{sep})(?:(?:{sep})\s*)+".format(sep='\r\n')
_para_ro = re.compile(f"{_para_mac}|{_para_posix}|{_para_win}")
def get_paragraphs(text):
"""A new paragraph is considered to start at a line which follows
one or more blank lines (lines containing nothing or just spaces).
The first line of the text also starts a paragraph."""
if not text:
return []
return _para_ro.split(text)
def METHOD_NAME(text, width, **kwargs):
"""Adjust lines of text to be not longer than width. The text will be
returned unmodified if width <= 0.
See textwrap.wrap() for a list of supported kwargs.
Returns text with lines no longer than given width."""
if width <= 0 or not text:
return text
ret = []
for para in get_paragraphs(text):
text = " ".join(para.strip().split())
ret.extend(textwrap.METHOD_NAME(text, width, **kwargs))
return os.linesep.join(ret)
def indent(text, indent_string=" "):
"""Indent each line of text with the given indent string."""
return os.linesep.join(f"{indent_string}{x}" for x in text.splitlines())
def paginate(text):
"""Print text in pages of lines."""
pydoc.pager(text)
def strsize(b, grouping=True):
"""Return human representation of bytes b. A negative number of bytes
raises a value error."""
if b < 0:
raise ValueError("Invalid negative byte number")
if b < 1024:
return "%sB" % locale.format_string("%d", b, grouping)
if b < 1024 * 10:
return "%sKB" % locale.format_string("%d", (b // 1024), grouping)
if b < 1024 * 1024:
return "%sKB" % locale.format_string("%.2f", (float(b) / 1024), grouping)
if b < 1024 * 1024 * 10:
return "%sMB" % locale.format_string(
"%.2f", (float(b) / (1024 * 1024)), grouping
)
if b < 1024 * 1024 * 1024:
return "%sMB" % locale.format_string(
"%.1f", (float(b) / (1024 * 1024)), grouping
)
if b < 1024 * 1024 * 1024 * 10:
return "%sGB" % locale.format_string(
"%.2f", (float(b) / (1024 * 1024 * 1024)), grouping
)
return "%sGB" % locale.format_string(
"%.1f", (float(b) / (1024 * 1024 * 1024)), grouping
)
def strtime(t, func=time.localtime):
"""Return ISO 8601 formatted time."""
return time.strftime("%Y-%m-%d %H:%M:%S", func(t)) + strtimezone()
# from quodlibet
def strduration_long(duration, do_translate=True):
"""Turn a time value in seconds into x hours, x minutes, etc."""
if do_translate:
# use global translator functions
global _, _n
else:
# do not translate
def _(x): return x
def _n(a, b, n): return a if n == 1 else b
if duration < 0:
duration = abs(duration)
prefix = "-"
else:
prefix = ""
if duration < 1:
return _("%(prefix)s%(duration).02f seconds") % {
"prefix": prefix,
"duration": duration,
}
# translation dummies
_n("%d second", "%d seconds", 1)
_n("%d minute", "%d minutes", 1)
_n("%d hour", "%d hours", 1)
_n("%d day", "%d days", 1)
_n("%d year", "%d years", 1)
cutoffs = [
(60, "%d second", "%d seconds"),
(60, "%d minute", "%d minutes"),
(24, "%d hour", "%d hours"),
(365, "%d day", "%d days"),
(None, "%d year", "%d years"),
]
time_str = []
for divisor, single, plural in cutoffs:
if duration < 1:
break
if divisor is None:
duration, unit = 0, duration
else:
duration, unit = divmod(duration, divisor)
if unit:
time_str.append(_n(single, plural, unit) % unit)
time_str.reverse()
if len(time_str) > 2:
time_str.pop()
return "{}{}".format(prefix, ", ".join(time_str))
def strtimezone():
"""Return timezone info, %z on some platforms, but not supported on all.
"""
if time.daylight:
zone = time.altzone
else:
zone = time.timezone
return "%+04d" % (-zone // SECONDS_PER_HOUR)
def stripurl(s):
"""Remove any lines from string after the first line.
Also remove whitespace at start and end from given string."""
if not s:
return s
return s.splitlines()[0].strip()
def limit(s, length=72):
"""If the length of the string exceeds the given limit, it will be cut
off and three dots will be appended.
@param s: the string to limit
@type s: string
@param length: maximum length
@type length: non-negative integer
@return: limited string, at most length+3 characters long
"""
assert length >= 0, "length limit must be a non-negative integer"
if not s or len(s) <= length:
return s
if length == 0:
return ""
return "%s..." % s[:length]
def strline(s):
"""Display string representation on one line."""
return strip_control_chars("`%s'" % s.replace("\n", "\\n"))
def format_feature_warning(**kwargs):
"""Format warning that a module could not be imported and that it should
be installed for a certain URL.
"""
return (
_(
"Could not import %(module)s for %(feature)s."
" Install %(module)s from %(url)s to use this feature."
)
% kwargs
)
def strip_control_chars(text):
"""Remove console control characters from text."""
if text:
return re.sub(r"[\x01-\x1F\x7F]", "", text)
return text |
4,554 | make screenshot | """
Copyright 2008-2011,2015 Free Software Foundation, Inc.
This file is part of GNU Radio
SPDX-License-Identifier: GPL-2.0-or-later
"""
from sys import platform
import os
import numbers
from gi.repository import GLib
import cairo
from .canvas.colors import FLOWGRAPH_BACKGROUND_COLOR
from . import Constants
def get_rotated_coordinate(coor, rotation):
"""
Rotate the coordinate by the given rotation.
Args:
coor: the coordinate x, y tuple
rotation: the angle in degrees
Returns:
the rotated coordinates
"""
# handles negative angles
rotation = (rotation + 360) % 360
if rotation not in Constants.POSSIBLE_ROTATIONS:
raise ValueError('unusable rotation angle "%s"' % str(rotation))
# determine the number of degrees to rotate
cos_r, sin_r = {
0: (1, 0), 90: (0, 1), 180: (-1, 0), 270: (0, -1),
}[rotation]
x, y = coor
return x * cos_r + y * sin_r, -x * sin_r + y * cos_r
def get_angle_from_coordinates(p1, p2):
"""
Given two points, calculate the vector direction from point1 to point2, directions are multiples of 90 degrees.
Args:
(x1,y1): the coordinate of point 1
(x2,y2): the coordinate of point 2
Returns:
the direction in degrees
"""
(x1, y1) = p1
(x2, y2) = p2
if y1 == y2: # 0 or 180
return 0 if x2 > x1 else 180
else: # 90 or 270
return 270 if y2 > y1 else 90
def align_to_grid(coor, mode=round):
def align(value):
return int(mode(value / (1.0 * Constants.CANVAS_GRID_SIZE)) * Constants.CANVAS_GRID_SIZE)
try:
return [align(c) for c in coor]
except TypeError:
x = coor
return align(coor)
def num_to_str(num):
""" Display logic for numbers """
def eng_notation(value, fmt='g'):
"""Convert a number to a string in engineering notation. E.g., 5e-9 -> 5n"""
template = '{:' + fmt + '}{}'
magnitude = abs(value)
for exp, symbol in zip(range(9, -15 - 1, -3), 'GMk munpf'):
factor = 10 ** exp
if magnitude >= factor:
return template.format(value / factor, symbol.strip())
return template.format(value, '')
if isinstance(num, numbers.Complex):
num = complex(num) # Cast to python complex
if num == 0:
return '0'
output = eng_notation(num.real) if num.real else ''
output += eng_notation(num.imag, '+g' if output else 'g') + \
'j' if num.imag else ''
return output
else:
return str(num)
def encode(value):
return GLib.markup_escape_text(value)
def METHOD_NAME(flow_graph, file_path, transparent_bg=False):
if not file_path:
return
x_min, y_min, x_max, y_max = flow_graph.get_extents()
padding = Constants.CANVAS_GRID_SIZE
width = x_max - x_min + 2 * padding
height = y_max - y_min + 2 * padding
if file_path.endswith('.png'):
# ImageSurface is pixel-based, so dimensions need to be integers
# We don't round up here, because our padding should allow for up
# to half a pixel size in loss of image area without optically bad
# effects
psurf = cairo.ImageSurface(cairo.FORMAT_ARGB32,
round(width),
round(height))
elif file_path.endswith('.pdf'):
psurf = cairo.PDFSurface(file_path, width, height)
elif file_path.endswith('.svg'):
psurf = cairo.SVGSurface(file_path, width, height)
else:
raise ValueError('Unknown file format')
cr = cairo.Context(psurf)
if not transparent_bg:
cr.set_source_rgba(*FLOWGRAPH_BACKGROUND_COLOR)
cr.rectangle(0, 0, width, height)
cr.fill()
cr.translate(padding - x_min, padding - y_min)
flow_graph.create_labels(cr)
flow_graph.create_shapes()
flow_graph.draw(cr)
if file_path.endswith('.png'):
psurf.write_to_png(file_path)
if file_path.endswith('.pdf') or file_path.endswith('.svg'):
cr.show_page()
psurf.finish()
def scale(coor, reverse=False):
factor = Constants.DPI_SCALING if not reverse else 1 / Constants.DPI_SCALING
return tuple(int(x * factor) for x in coor)
def scale_scalar(coor, reverse=False):
factor = Constants.DPI_SCALING if not reverse else 1 / Constants.DPI_SCALING
return int(coor * factor)
def get_modifier_key(angle_brackets=False):
"""
Get the modifier key based on platform.
Args:
angle_brackets: if return the modifier key with <> or not
Returns:
return the string with the modifier key
"""
if platform == "darwin":
if angle_brackets:
return "<Meta>"
else:
return "Meta"
else:
if angle_brackets:
return "<Ctrl>"
else:
return "Ctrl"
_nproc = None
def get_cmake_nproc():
""" Get number of cmake processes for C++ flowgraphs """
global _nproc # Cached result
if _nproc:
return _nproc
try:
# See https://docs.python.org/3.8/library/os.html#os.cpu_count
_nproc = len(os.sched_getaffinity(0))
except:
_nproc = os.cpu_count()
if not _nproc:
_nproc = 1
_nproc = max(_nproc // 2 - 1, 1)
return _nproc |
4,555 | test ultradns wait for dns change | import unittest
from unittest.mock import patch, Mock
from flask import Flask
from lemur.plugins.lemur_acme import plugin, ultradns
from requests.models import Response
class TestUltradns(unittest.TestCase):
@patch("lemur.plugins.lemur_acme.plugin.dns_provider_service")
def setUp(self, mock_dns_provider_service):
self.ACMEIssuerPlugin = plugin.ACMEIssuerPlugin()
self.acme = plugin.AcmeHandler()
mock_dns_provider = Mock()
mock_dns_provider.name = "cloudflare"
mock_dns_provider.credentials = "{}"
mock_dns_provider.provider_type = "cloudflare"
self.acme.dns_providers_for_domain = {
"www.test.com": [mock_dns_provider],
"test.fakedomain.net": [mock_dns_provider],
}
# Creates a new Flask application for a test duration. In python 3.8, manual push of application context is
# needed to run tests in dev environment without getting error 'Working outside of application context'.
_app = Flask('lemur_test_acme')
self.ctx = _app.app_context()
assert self.ctx
self.ctx.push()
def tearDown(self):
self.ctx.pop()
@patch("lemur.plugins.lemur_acme.ultradns.requests")
@patch("lemur.plugins.lemur_acme.ultradns.current_app")
def test_ultradns_get_token(self, mock_current_app, mock_requests):
# ret_val = json.dumps({"access_token": "access"})
the_response = Response()
the_response._content = b'{"access_token": "access"}'
mock_requests.post = Mock(return_value=the_response)
mock_current_app.config.get = Mock(return_value="Test")
result = ultradns.get_ultradns_token()
self.assertTrue(len(result) > 0)
@patch("lemur.plugins.lemur_acme.ultradns.current_app")
def test_ultradns_create_txt_record(self, mock_current_app):
domain = "_acme_challenge.test.example.com"
zone = "test.example.com"
token = "ABCDEFGHIJ"
account_number = "1234567890"
change_id = (domain, token)
ultradns.get_zone_name = Mock(return_value=zone)
mock_current_app.logger.debug = Mock()
ultradns._post = Mock()
log_data = {
"function": "create_txt_record",
"fqdn": domain,
"token": token,
"message": "TXT record created"
}
result = ultradns.create_txt_record(domain, token, account_number)
mock_current_app.logger.debug.assert_called_with(log_data)
self.assertEqual(result, change_id)
@patch("lemur.plugins.lemur_acme.ultradns.current_app")
@patch("lemur.extensions.metrics")
def test_ultradns_delete_txt_record(self, mock_metrics, mock_current_app):
domain = "_acme_challenge.test.example.com"
zone = "test.example.com"
token = "ABCDEFGHIJ"
account_number = "1234567890"
change_id = (domain, token)
mock_current_app.logger.debug = Mock()
ultradns.get_zone_name = Mock(return_value=zone)
ultradns._post = Mock()
ultradns._get = Mock()
ultradns._get.return_value = {'zoneName': 'test.example.com.com',
'rrSets': [{'ownerName': '_acme-challenge.test.example.com.',
'rrtype': 'TXT (16)', 'ttl': 5, 'rdata': ['ABCDEFGHIJ']}],
'queryInfo': {'sort': 'OWNER', 'reverse': False, 'limit': 100},
'resultInfo': {'totalCount': 1, 'offset': 0, 'returnedCount': 1}}
ultradns._delete = Mock()
mock_metrics.send = Mock()
ultradns.delete_txt_record(change_id, account_number, domain, token)
mock_current_app.logger.debug.assert_not_called()
mock_metrics.send.assert_not_called()
@patch("lemur.plugins.lemur_acme.ultradns.current_app")
@patch("lemur.extensions.metrics")
def METHOD_NAME(self, mock_metrics, mock_current_app):
ultradns._has_dns_propagated = Mock(return_value=True)
nameserver = "1.1.1.1"
ultradns.get_authoritative_nameserver = Mock(return_value=nameserver)
mock_metrics.send = Mock()
domain = "_acme-challenge.test.example.com"
token = "ABCDEFGHIJ"
change_id = (domain, token)
mock_current_app.logger.debug = Mock()
ultradns.wait_for_dns_change(change_id)
# mock_metrics.send.assert_not_called()
log_data = {
"function": "wait_for_dns_change",
"fqdn": domain,
"status": True,
"message": "Record status on Public DNS"
}
mock_current_app.logger.debug.assert_called_with(log_data)
def test_ultradns_get_zone_name(self):
zones = ['example.com', 'test.example.com']
zone = "test.example.com"
domain = "_acme-challenge.test.example.com"
account_number = "1234567890"
ultradns.get_zones = Mock(return_value=zones)
result = ultradns.get_zone_name(domain, account_number)
self.assertEqual(result, zone)
def test_ultradns_get_zones(self):
account_number = "1234567890"
path = "a/b/c"
zones = ['example.com', 'test.example.com']
paginate_response = [{
'properties': {
'name': 'example.com.', 'accountName': 'example', 'type': 'PRIMARY',
'dnssecStatus': 'UNSIGNED', 'status': 'ACTIVE', 'resourceRecordCount': 9,
'lastModifiedDateTime': '2017-06-14T06:45Z'},
'registrarInfo': {
'nameServers': {'missing': ['example.ultradns.com.', 'example.ultradns.net.',
'example.ultradns.biz.', 'example.ultradns.org.']}},
'inherit': 'ALL'}, {
'properties': {
'name': 'test.example.com.', 'accountName': 'example', 'type': 'PRIMARY',
'dnssecStatus': 'UNSIGNED', 'status': 'ACTIVE', 'resourceRecordCount': 9,
'lastModifiedDateTime': '2017-06-14T06:45Z'},
'registrarInfo': {
'nameServers': {'missing': ['example.ultradns.com.', 'example.ultradns.net.',
'example.ultradns.biz.', 'example.ultradns.org.']}},
'inherit': 'ALL'}, {
'properties': {
'name': 'example2.com.', 'accountName': 'example', 'type': 'SECONDARY',
'dnssecStatus': 'UNSIGNED', 'status': 'ACTIVE', 'resourceRecordCount': 9,
'lastModifiedDateTime': '2017-06-14T06:45Z'},
'registrarInfo': {
'nameServers': {'missing': ['example.ultradns.com.', 'example.ultradns.net.',
'example.ultradns.biz.', 'example.ultradns.org.']}},
'inherit': 'ALL'}]
ultradns._paginate = Mock(path, "zones")
ultradns._paginate.side_effect = [[paginate_response]]
result = ultradns.get_zones(account_number)
self.assertEqual(result, zones) |
4,556 | status | '''
System
======
The following methods allow for interaction into the Tenable Security Center
:sc-api:`System <System.htm>` API. These API calls are typically used to
understand timezones, system version, etc.
Methods available on ``sc.system``:
.. rst-class:: hide-signature
.. autoclass:: SystemAPI
:members:
'''
from .base import SCEndpoint
from io import BytesIO
import time
class SystemAPI(SCEndpoint):
def details(self):
'''
Retrieves information about the Tenable Security Center instance. This method should
only be called before authentication has occurred. As most of the
information within this call already happens upon instantiation, there
should be little need to call this manually.
:sc-api:`system: get <System.htm#system_GET>`
Returns:
:obj:`dict`:
The response dictionary
Examples:
>>> info = sc.system.details()
'''
return self._api.get('system').json()['response']
def diagnostics(self, task=None, options=None, fobj=None):
'''
Generates and downloads a diagnostic file for the purpose of
troubleshooting an ailing Tenable Security Center instance.
:sc-api:`system: diagnostics-generate <System.htm#SystemRESTReference-/system/diagnostics/generate>`
:sc-api:`system: diagnostics-download <System.htm#SystemRESTReference-/system/diagnostics/download>`
Args:
fobj (FileObject, optional):
The file-like object to write the diagnostics file to. If
nothing is specified, a BytesIO object will be returnbed with
the file.
options (list, optional):
If performing a diagnostics generation, then which items
should be bundled into the diagnostics file? Available options
are ``all``, ``apacheLog``, ``configuration``, ``dependencies``,
``dirlist``, ``environment``, ``installLog``, ``logs``,
``sanitize``, ``scans``, ``serverConf``, ``setup``, ``sysinfo``,
and ``upgradeLog``. If nothing is specified, it will default to
``['all']``.
task (str, optional):
Which task to perform. Available options are ``appStatus`` and
``diagnosticsFile``. If nothing is specified, it will default
to ``diagnosticFile``.
Returns:
:obj:`FileObject`:
A file-like object with the diagnostics file specified.
Examples:
>>> with open('diagnostics.tar.gz', 'wb') as fobj:
... sc.system.diagnostics(fobj=fobj)
'''
payload = {
'task': self._check('task', task, str,
choices=['diagnosticsFile', 'appStatus'],
default='diagnosticsFile'),
}
# The available choices for the options.
opts = ['all', 'apacheLog', 'configuration', 'dependencies',
'dirlist', 'environment', 'installLog', 'logs', 'sanitize', 'scans',
'serverConf', 'setup', 'sysinfo']
# we only want to add the options to the generation call if the task is
# a diagnostics file.
if payload['task'] == 'diagnosticsFile':
payload['options'] = [self._check('option:item', o, str, choices=opts)
for o in self._check('options', options, list, default=['all'])]
METHOD_NAME = self.METHOD_NAME()
# Make the call to generate the disagnostics file.
self._api.post('system/diagnostics/generate', json=payload)
# We will sleep until the file has been generated. We will know when
# the file is ready or download as the `diagnosticsGenerated` timestamp
# will have been updated.
while self.METHOD_NAME()['diagnosticsGenerated'] == METHOD_NAME['diagnosticsGenerated']:
time.sleep(5)
# Make the call to download the file.
resp = self._api.post('system/diagnostics/download', stream=True)
# if no file-like object was passed, then we will instantiate a BytesIO
# object to push the file into.
if not fobj:
fobj = BytesIO()
# Lets stream the file into the file-like object...
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fobj.write(chunk)
fobj.seek(0)
resp.close()
return fobj
def current_locale(self):
'''
Retrieves the current system locale that Tenable Security Center has been set to.
:sc-api:`system: locale <System.htm#SystemRESTReference-/system/locale>`
Returns:
:obj:`dict`:
locale resource
Examples:
>>> sc.system.current_locale()
'''
return self._api.get('system/locale').json()['response']
def list_locales(self):
'''
Retrieves the available system locales that Tenable Security Center can be set to.
:sc-api:`system: locales <System.htm#SystemRESTReference-/system/locales>`
Returns:
:obj:`dict`:
locales dictionary
Examples:
>>> sc.system.list_locales()
'''
return self._api.get('system/locales').json()['response']
def set_locale(self, locale):
'''
Sets the system locale to be used. This requires an administrator to
perform this task and will be a global change. The locale determines
which pluginset language to use.
:sc-api:`system: set-locale <System.htm#system_locale_PATCH>`
Args:
locale (str): The plugin locale name
Returns:
:obj:`str`:
The new plugin locale.
Examples:
Set the system locale to Japanese:
>>> sc.system.set_locale('ja')
'''
self._api.patch('system/locale', json={
'PluginLocale': self._check('locale', locale, str)
}).json()['response']
def METHOD_NAME(self):
'''
Retrieves the current system status
:sc-api:`system: diagnostics <System.htm#SystemRESTReference-/system/diagnostics>`
Returns:
:obj:`dict`:
The status dictionary
Examples:
>>> status = sc.system.status()
'''
return self._api.get('system/diagnostics').json()['response'] |
4,557 | query user | """
Support for RallyDev
.. versionadded:: 2015.8.0
Requires a ``username`` and a ``password`` in ``/etc/salt/minion``:
.. code-block:: yaml
rallydev:
username: myuser@example.com
password: 123pass
"""
import logging
import salt.utils.http
import salt.utils.json
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load the module if apache is installed
"""
if not __opts__.get("rallydev", {}).get("username", None):
return (
False,
"The rallydev execution module failed to load: rallydev:username not"
" defined in config.",
)
if not __opts__.get("rallydev", {}).get("password", None):
return (
False,
"The rallydev execution module failed to load: rallydev:password not"
" defined in config.",
)
return True
def _get_token():
"""
Get an auth token
"""
username = __opts__.get("rallydev", {}).get("username", None)
password = __opts__.get("rallydev", {}).get("password", None)
path = "https://rally1.rallydev.com/slm/webservice/v2.0/security/authorize"
result = salt.utils.http.query(
path,
decode=True,
decode_type="json",
text=True,
status=True,
username=username,
password=password,
cookies=True,
persist_session=True,
opts=__opts__,
)
if "dict" not in result:
return None
return result["dict"]["OperationResult"]["SecurityToken"]
def _query(
action=None, command=None, args=None, method="GET", header_dict=None, data=None
):
"""
Make a web call to RallyDev.
"""
token = _get_token()
username = __opts__.get("rallydev", {}).get("username", None)
password = __opts__.get("rallydev", {}).get("password", None)
path = "https://rally1.rallydev.com/slm/webservice/v2.0/"
if action:
path += action
if command:
path += "/{}".format(command)
log.debug("RallyDev URL: %s", path)
if not isinstance(args, dict):
args = {}
args["key"] = token
if header_dict is None:
header_dict = {"Content-type": "application/json"}
if method != "POST":
header_dict["Accept"] = "application/json"
decode = True
if method == "DELETE":
decode = False
return_content = None
result = salt.utils.http.query(
path,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type="json",
text=True,
status=True,
username=username,
password=password,
cookies=True,
persist_session=True,
opts=__opts__,
)
log.debug("RallyDev Response Status Code: %s", result["status"])
if "error" in result:
log.error(result["error"])
return [result["status"], result["error"]]
return [result["status"], result.get("dict", {})]
def list_items(name):
"""
List items of a particular type
CLI Examples:
.. code-block:: bash
salt myminion rallydev.list_<item name>s
salt myminion rallydev.list_users
salt myminion rallydev.list_artifacts
"""
status, result = _query(action=name)
return result
def query_item(name, query_string, order="Rank"):
"""
Query a type of record for one or more items. Requires a valid query string.
See https://rally1.rallydev.com/slm/doc/webservice/introduction.jsp for
information on query syntax.
CLI Example:
.. code-block:: bash
salt myminion rallydev.query_<item name> <query string> [<order>]
salt myminion rallydev.query_task '(Name contains github)'
salt myminion rallydev.query_task '(Name contains reactor)' Rank
"""
status, result = _query(action=name, args={"query": query_string, "order": order})
return result
def show_item(name, id_):
"""
Show an item
CLI Example:
.. code-block:: bash
salt myminion rallydev.show_<item name> <item id>
"""
status, result = _query(action=name, command=id_)
return result
def update_item(name, id_, field=None, value=None, postdata=None):
"""
Update an item. Either a field and a value, or a chunk of POST data, may be
used, but not both.
CLI Example:
.. code-block:: bash
salt myminion rallydev.update_<item name> <item id> field=<field> value=<value>
salt myminion rallydev.update_<item name> <item id> postdata=<post data>
"""
if field and value:
if postdata:
raise SaltInvocationError(
"Either a field and a value, or a chunk "
"of POST data, may be specified, but not both."
)
postdata = {name.title(): {field: value}}
if postdata is None:
raise SaltInvocationError(
"Either a field and a value, or a chunk of POST data must be specified."
)
status, result = _query(
action=name,
command=id_,
method="POST",
data=salt.utils.json.dumps(postdata),
)
return result
def show_artifact(id_):
"""
Show an artifact
CLI Example:
.. code-block:: bash
salt myminion rallydev.show_artifact <artifact id>
"""
return show_item("artifact", id_)
def list_users():
"""
List the users
CLI Example:
.. code-block:: bash
salt myminion rallydev.list_users
"""
return list_items("user")
def show_user(id_):
"""
Show a user
CLI Example:
.. code-block:: bash
salt myminion rallydev.show_user <user id>
"""
return show_item("user", id_)
def update_user(id_, field, value):
"""
Update a user
CLI Example:
.. code-block:: bash
salt myminion rallydev.update_user <user id> <field> <new value>
"""
return update_item("user", id_, field, value)
def METHOD_NAME(query_string, order="UserName"):
"""
Update a user
CLI Example:
.. code-block:: bash
salt myminion rallydev.query_user '(Name contains Jo)'
"""
return query_item("user", query_string, order) |
4,558 | path | from io import BytesIO
from urllib.parse import urlencode
from sentry.models import EventAttachment, File
from sentry.testutils.cases import APITestCase
from sentry.testutils.silo import region_silo_test
@region_silo_test(stable=True)
class GroupEventAttachmentsTest(APITestCase):
def create_attachment(self, type=None, event_id=None, file_name="hello.png"):
if type is None:
type = "event.attachment"
self.file = File.objects.create(name=file_name, type=type)
self.file.putfile(BytesIO(b"File contents here"))
self.attachment = EventAttachment.objects.create(
event_id=event_id or self.event.event_id,
project_id=self.event.project_id,
group_id=self.group.id,
file_id=self.file.id,
type=self.file.type,
name=file_name,
)
return self.attachment
def METHOD_NAME(self, types=None, event_ids=None, screenshot=False):
METHOD_NAME = f"/api/0/issues/{self.group.id}/attachments/"
query = [("types", t) for t in types or ()]
query.extend([("event_id", id) for id in event_ids or ()])
if screenshot:
query.append(("screenshot", 1))
if query:
METHOD_NAME += "?" + urlencode(query)
return METHOD_NAME
def test_basic(self):
self.login_as(user=self.user)
attachment = self.create_attachment()
with self.feature("organizations:event-attachments"):
response = self.client.get(self.METHOD_NAME())
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["id"] == str(attachment.id)
def test_filter(self):
self.login_as(user=self.user)
self.create_attachment(type="event.attachment")
attachment2 = self.create_attachment(type="event.minidump")
with self.feature("organizations:event-attachments"):
response = self.client.get(self.METHOD_NAME(types=["event.minidump"]))
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["id"] == str(attachment2.id)
def test_screenshot_filter(self):
self.login_as(user=self.user)
attachment1 = self.create_attachment(type="event.attachment", file_name="screenshot.png")
self.create_attachment(type="event.attachment", file_name="screenshot-not.png")
with self.feature("organizations:event-attachments"):
response = self.client.get(self.METHOD_NAME(screenshot=True))
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["id"] == str(attachment1.id)
def test_second_screenshot_filter(self):
self.login_as(user=self.user)
attachment1 = self.create_attachment(type="event.attachment", file_name="screenshot.png")
self.create_attachment(type="event.attachment", file_name="screenshot-not.png")
with self.feature("organizations:event-attachments"):
response = self.client.get(self.METHOD_NAME(screenshot=True))
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["id"] == str(attachment1.id)
def test_without_feature(self):
self.login_as(user=self.user)
self.create_attachment()
with self.feature({"organizations:event-attachments": False}):
response = self.client.get(self.METHOD_NAME())
assert response.status_code == 404, response.content
def test_event_id_filter(self):
self.login_as(user=self.user)
attachment = self.create_attachment()
self.create_attachment(event_id="b" * 32)
with self.feature("organizations:event-attachments"):
response = self.client.get(self.METHOD_NAME(event_ids=[attachment.event_id]))
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["event_id"] == attachment.event_id
def test_multi_event_id_filter(self):
self.login_as(user=self.user)
attachment = self.create_attachment()
attachment2 = self.create_attachment(event_id="b" * 32)
self.create_attachment(event_id="c" * 32)
with self.feature("organizations:event-attachments"):
response = self.client.get(
self.METHOD_NAME(event_ids=[attachment.event_id, attachment2.event_id])
)
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert response.data[0]["event_id"] == attachment2.event_id
assert response.data[1]["event_id"] == attachment.event_id |
4,559 | get completed step | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import time
from dlrover.python.common.log import default_logger as logger
from dlrover.python.master.shard.base_dataset_manager import (
DatasetManger,
DatasetShardCheckpoint,
DoingTask,
Task,
)
from dlrover.python.master.shard.dataset_splitter import (
Shard,
StreamingDatasetSplitter,
)
_MAX_TASK_RETRIES = 3
class StreamingDatasetManager(DatasetManger):
"""StreamingDatasetManager create tasks with shards in a dynamic dataset.
Attributes:
task_type: the type of computation task like "training",
"evaluation" and "prediction".
batch_size: the size of a batch.
dataset_splitter: DatasetSplitter instance to split the dataset
into shards.
"""
def __init__(
self,
task_type,
batch_size,
dataset_splitter: StreamingDatasetSplitter,
):
super(StreamingDatasetManager, self).__init__(
task_type, batch_size, dataset_splitter
)
self._max_task_completed_time = 0
self._task_id = 0
self._completed_step = 0
def get_task(self, node_type, node_id) -> Task:
"""Return next Task"""
if not self.todo and not self._dataset_splitter.epoch_finished():
# Start a new epoch
# num_epochs <= 0 indicates that the master will create data
# shards infinitely. So, the worker can use the dataset like
# `dataset.repeat()`.
self._dataset_splitter.create_shards()
shards = self._dataset_splitter.get_shards()
self._create_todo_tasks(shards)
if not self.todo:
# No more tasks
return Task.create_invalid_task()
task: Task = self.get_task_from_todo(node_id)
self.doing[task.task_id] = DoingTask(
task, node_type, node_id, int(time.time())
)
logger.info(
"Assign task %s of dataset %s to worker %s",
task.task_id,
self._dataset_splitter.dataset_name,
node_id,
)
return task
def get_epoch(self):
return self._dataset_splitter.get_epoch()
def completed(self):
return (
self._dataset_splitter.epoch_finished()
and not self.todo
and not self.doing
)
def _create_todo_tasks(self, shards):
tasks = []
for shard in shards:
task = Task(self._task_id, self._task_type, shard)
tasks.append(task)
self._task_id += 1
logger.info(
"todo.extend: %d tasks created for dataset = %s.",
len(tasks),
self._dataset_splitter.dataset_name,
)
self.todo.extend(tasks)
def report_task_status(self, task_id, success):
doing_task = self.doing.pop(task_id)
if not doing_task:
logger.warning(
"Unknown task_id: %d of dataset %s"
% (task_id, self._dataset_splitter.dataset_name)
)
success = False
elif not success:
logger.warning(
"Task %d of %s failed "
% (task_id, self._dataset_splitter.dataset_name)
)
self.recover_task(doing_task.task)
else:
self._update_completed_step(doing_task.task)
logger.info(
"Task:%d completed, %d remaining tasks for Dataset %s",
task_id,
len(self.todo) + len(self.doing),
self._dataset_splitter.dataset_name,
)
task_completed_time = time.time() - doing_task.start_time
if task_completed_time > self._max_task_completed_time:
self._max_task_completed_time = task_completed_time
return success, doing_task
def _update_completed_step(self, task: Task):
record_count = task.shard.end - task.shard.start
batch_count = math.ceil(record_count / self._batch_size)
self._completed_step += batch_count
def METHOD_NAME(self):
return self._completed_step
def recover_task(self, task):
if not self._check_exceed_max_task_retries(task):
self.todo.append(task)
def _check_exceed_max_task_retries(self, task: Task):
task.retry_count += 1
if task.retry_count > _MAX_TASK_RETRIES:
logger.error(
"A task %s of failed with %d retries "
% (task.shard.name, _MAX_TASK_RETRIES)
)
return True
return False
def get_doing_tasks(self):
return self.doing
def get_task_from_todo(self, worker_id):
partition_offset = self._dataset_splitter.get_partition_offset()
partition_num = partition_offset.partition_num
for task in self.todo:
get_partition = partition_offset.get_partition_index_by_name
shard_partition_index = get_partition(task.shard.name)
if shard_partition_index == worker_id % partition_num:
self.todo.remove(task)
return task
def checkpoint(self):
todo_shards = []
for task in self.todo:
todo_shards.append([task.shard.start, task.shard.end])
doing_shards = []
for task_id in self.doing:
task = self.doing[task_id].task
doing_shards.append([task.shard.start, task.shard.end])
splitter = self._dataset_splitter.to_checkpoint()
return DatasetShardCheckpoint(
dataset_name=self._dataset_splitter.dataset_name,
todo=todo_shards,
doing=doing_shards,
epoch=self._dataset_splitter.epoch,
splitter=splitter,
)
def restore_checkpoint(self, checkpoint: DatasetShardCheckpoint):
"""Restore the task manager from a checkpoint"""
self._dataset_splitter = StreamingDatasetSplitter.from_checkpoint(
checkpoint.splitter
)
self.todo = []
for shard_indices in checkpoint.doing + checkpoint.todo:
shard = Shard(
name=self._dataset_splitter.dataset_name,
start=shard_indices[0],
end=shard_indices[1],
)
self.todo.append(
Task(
self._dataset_splitter.dataset_name,
self._task_type,
shard,
)
) |
4,560 | build arguments schema | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"relay hyco authorization-rule keys renew",
)
class Renew(AAZCommand):
"""Regenerate keys of Authorization Rule for Relay Service Hybrid Connection.
:example: Regenerate key of Relay Service Hybrid Connection.
az relay hyco authorization-rule keys renew --resource-group myresourcegroup --namespace- name mynamespace --hybrid-connection-name myhyco --name myauthorule --key PrimaryKey
"""
_aaz_info = {
"version": "2017-04-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.relay/namespaces/{}/hybridconnections/{}/authorizationrules/{}/regeneratekeys", "2017-04-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of Hybrid Connection Authorization Rule.",
required=True,
id_part="child_name_2",
fmt=AAZStrArgFormat(
min_length=1,
),
)
_args_schema.hybrid_connection_name = AAZStrArg(
options=["--hybrid-connection-name"],
help="Name of Hybrid Connection.",
required=True,
id_part="child_name_1",
fmt=AAZStrArgFormat(
min_length=1,
),
)
_args_schema.namespace_name = AAZStrArg(
options=["--namespace-name"],
help="Name of Namespace.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
max_length=50,
min_length=6,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.key_value = AAZStrArg(
options=["--key-value"],
help="Optional. If the key value is provided, this is set to key type, or autogenerated key value set for key type.",
)
_args_schema.key = AAZStrArg(
options=["--key"],
help="Specifies Primary or Secondary key needs to be reset.",
required=True,
enum={"PrimaryKey": "PrimaryKey", "SecondaryKey": "SecondaryKey"},
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.HybridConnectionsRegenerateKeys(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class HybridConnectionsRegenerateKeys(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Relay/namespaces/{namespaceName}/hybridConnections/{hybridConnectionName}/authorizationRules/{authorizationRuleName}/regenerateKeys",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"authorizationRuleName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"hybridConnectionName", self.ctx.args.hybrid_connection_name,
required=True,
),
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2017-04-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("key", AAZStrType, ".key_value")
_builder.set_prop("keyType", AAZStrType, ".key", typ_kwargs={"flags": {"required": True}})
return self.serialize_content(_content_value)
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.key_name = AAZStrType(
serialized_name="keyName",
)
_schema_on_200.primary_connection_string = AAZStrType(
serialized_name="primaryConnectionString",
)
_schema_on_200.primary_key = AAZStrType(
serialized_name="primaryKey",
)
_schema_on_200.secondary_connection_string = AAZStrType(
serialized_name="secondaryConnectionString",
)
_schema_on_200.secondary_key = AAZStrType(
serialized_name="secondaryKey",
)
return cls._schema_on_200
class _RenewHelper:
"""Helper class for Renew"""
__all__ = ["Renew"] |
4,561 | init metrics | from json import JSONDecodeError
from requests.exceptions import ConnectionError, HTTPError, InvalidURL, Timeout
from datadog_checks.base import AgentCheck
class SonarrCheck(AgentCheck):
# This will be the prefix of every metric and service check the integration sends
__NAMESPACE__ = "sonarr"
def __init__(self, name, init_config, instances):
super(SonarrCheck, self).__init__(name, init_config, instances)
self.url = self.instance.get("url")
self.http.options["headers"] = {"Authorization": self.instance.get("api_key")}
self.tags = self.instance.get("tags", [])
self.tags.append(f"url:{self.url}")
def check(self, _):
metrics = self.METHOD_NAME()
series = self._http_get("/api/v3/series")
self._process_series(series, metrics)
for show in series:
showID = show.get("id")
if showID is not None:
episodes = self._http_get(f"/api/v3/episode?seriesId={showID}")
self._process_episodes(episodes, metrics)
missing = self._http_get("/api/v3/wanted/missing")
self._process_missing(missing, metrics)
self._report_metrics(metrics)
self.service_check("can_connect", AgentCheck.OK)
def _http_get(self, endpoint):
"""Perform HTTP request against sonarr API endpoint"""
try:
full_url = self.url + endpoint
response = self.http.get(full_url)
response.raise_for_status()
response_json = response.json()
except Timeout as e:
self.service_check(
"can_connect",
AgentCheck.CRITICAL,
message="Request timeout: {}, {}".format(self.url, e),
)
raise
except (HTTPError, InvalidURL, ConnectionError) as e:
self.service_check(
"can_connect",
AgentCheck.CRITICAL,
message="Request failed: {}, {}".format(self.url, e),
)
raise
except JSONDecodeError as e:
self.service_check(
"can_connect",
AgentCheck.CRITICAL,
message="JSON Parse failed: {}, {}".format(self.url, e),
)
raise
except ValueError as e:
self.service_check("can_connect", AgentCheck.CRITICAL, message=str(e))
raise
else:
return response_json
def METHOD_NAME(self):
"""Create and initialize a dictionnary to hold the gathered values
of the metrics that will be emitted by the check"""
return {
# series metrics
"series.file_size": 0,
"series.total": 0,
"series.downloaded": 0,
"series.monitored": 0,
"series.unmonitored": 0,
# seasons metrics
"seasons.total": 0,
"seasons.downloaded": 0,
"seasons.monitored": 0,
"seasons.unmonitored": 0,
# episodes metrics
"episodes.total": 0,
"episodes.downloaded": 0,
"episodes.monitored": 0,
"episodes.unmonitored": 0,
"episodes.missing": 0,
}
def _process_series(self, series, metrics):
"""Compute metrics values from series response from the sonarr API"""
metrics["series.total"] = len(series)
for show in series:
if show.get("monitored"):
metrics["series.monitored"] += 1
else:
metrics["series.unmonitored"] += 1
show_statistics = show.get("statistics", {})
if show_statistics.get("percentOfEpisodes", 0) == 100.0:
metrics["series.downloaded"] += 1
metrics["seasons.total"] += show_statistics.get("seasonCount", 0)
metrics["episodes.total"] += show_statistics.get("totalEpisodeCount", 0)
metrics["episodes.downloaded"] += show_statistics.get("episodeFileCount", 0)
metrics["series.file_size"] += show_statistics.get("sizeOnDisk", 0)
for season in show.get("seasons", []):
if season.get("monitored"):
metrics["seasons.monitored"] += 1
else:
metrics["seasons.unmonitored"] += 1
if season.get("statistics", {}).get("percentOfEpisodes", 0) == 100.0:
metrics["seasons.downloaded"] += 1
def _process_episodes(self, episodes, metrics):
"""Compute metrics values from episodes response from the sonarr API"""
for episode in episodes:
if episode.get("monitored"):
metrics["episodes.monitored"] += 1
else:
metrics["episodes.unmonitored"] += 1
def _process_missing(self, missing, metrics):
"""Compute metrics values from wanted/missing response from the sonarr API"""
metrics["episodes.missing"] = missing.get("totalRecords", 0)
def _report_metrics(self, metrics):
"""Report metrics"""
for metric, value in metrics.items():
self.gauge(metric, value, tags=self.tags) |
4,562 | smalltalk rule0 | # Leo colorizer control file for smalltalk mode.
# This file is in the public domain.
# Properties for smalltalk mode.
properties = {
"commentEnd": "\"",
"commentStart": "\"",
"indentCloseBrackets": "]",
"indentOpenBrackets": "[",
"lineUpClosingBracket": "true",
}
# Attributes dict for smalltalk_main ruleset.
smalltalk_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for smalltalk mode.
attributesDictDict = {
"smalltalk_main": smalltalk_main_attributes_dict,
}
# Keywords dict for smalltalk_main ruleset.
smalltalk_main_keywords_dict = {
"Array": "literal2",
"Boolean": "literal2",
"Character": "literal2",
"Date": "literal2",
"False": "literal2",
"Integer": "literal2",
"Object": "literal2",
"Smalltalk": "literal2",
"String": "literal2",
"Symbol": "literal2",
"Time": "literal2",
"Transcript": "literal2",
"True": "literal2",
"false": "keyword1",
"isNil": "keyword3",
"nil": "keyword1",
"not": "keyword3",
"self": "keyword2",
"super": "keyword2",
"true": "keyword1",
}
# Dictionary of keywords dictionaries for smalltalk mode.
keywordsDictDict = {
"smalltalk_main": smalltalk_main_keywords_dict,
}
# Rules for smalltalk_main ruleset.
def METHOD_NAME(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'")
def smalltalk_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="\"", end="\"")
def smalltalk_rule2(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=":=")
def smalltalk_rule3(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="_")
def smalltalk_rule4(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="=")
def smalltalk_rule5(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="==")
def smalltalk_rule6(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=">")
def smalltalk_rule7(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="<")
def smalltalk_rule8(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=">=")
def smalltalk_rule9(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="<=")
def smalltalk_rule10(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="+")
def smalltalk_rule11(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="-")
def smalltalk_rule12(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="/")
def smalltalk_rule13(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="*")
def smalltalk_rule14(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="keyword3", pattern=":",
exclude_match=True)
def smalltalk_rule15(colorer, s, i):
return colorer.match_mark_following(s, i, kind="label", pattern="#",
exclude_match=True)
def smalltalk_rule16(colorer, s, i):
return colorer.match_mark_following(s, i, kind="literal1", pattern="$",
exclude_match=True)
def smalltalk_rule17(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for smalltalk_main ruleset.
rulesDict1 = {
"\"": [smalltalk_rule1,],
"#": [smalltalk_rule15,],
"$": [smalltalk_rule16,],
"'": [METHOD_NAME,],
"*": [smalltalk_rule13,],
"+": [smalltalk_rule10,],
"-": [smalltalk_rule11,],
"/": [smalltalk_rule12,],
"0": [smalltalk_rule17,],
"1": [smalltalk_rule17,],
"2": [smalltalk_rule17,],
"3": [smalltalk_rule17,],
"4": [smalltalk_rule17,],
"5": [smalltalk_rule17,],
"6": [smalltalk_rule17,],
"7": [smalltalk_rule17,],
"8": [smalltalk_rule17,],
"9": [smalltalk_rule17,],
":": [smalltalk_rule2, smalltalk_rule14,],
"<": [smalltalk_rule7, smalltalk_rule9,],
"=": [smalltalk_rule4, smalltalk_rule5,],
">": [smalltalk_rule6, smalltalk_rule8,],
"@": [smalltalk_rule17,],
"A": [smalltalk_rule17,],
"B": [smalltalk_rule17,],
"C": [smalltalk_rule17,],
"D": [smalltalk_rule17,],
"E": [smalltalk_rule17,],
"F": [smalltalk_rule17,],
"G": [smalltalk_rule17,],
"H": [smalltalk_rule17,],
"I": [smalltalk_rule17,],
"J": [smalltalk_rule17,],
"K": [smalltalk_rule17,],
"L": [smalltalk_rule17,],
"M": [smalltalk_rule17,],
"N": [smalltalk_rule17,],
"O": [smalltalk_rule17,],
"P": [smalltalk_rule17,],
"Q": [smalltalk_rule17,],
"R": [smalltalk_rule17,],
"S": [smalltalk_rule17,],
"T": [smalltalk_rule17,],
"U": [smalltalk_rule17,],
"V": [smalltalk_rule17,],
"W": [smalltalk_rule17,],
"X": [smalltalk_rule17,],
"Y": [smalltalk_rule17,],
"Z": [smalltalk_rule17,],
"_": [smalltalk_rule3,],
"a": [smalltalk_rule17,],
"b": [smalltalk_rule17,],
"c": [smalltalk_rule17,],
"d": [smalltalk_rule17,],
"e": [smalltalk_rule17,],
"f": [smalltalk_rule17,],
"g": [smalltalk_rule17,],
"h": [smalltalk_rule17,],
"i": [smalltalk_rule17,],
"j": [smalltalk_rule17,],
"k": [smalltalk_rule17,],
"l": [smalltalk_rule17,],
"m": [smalltalk_rule17,],
"n": [smalltalk_rule17,],
"o": [smalltalk_rule17,],
"p": [smalltalk_rule17,],
"q": [smalltalk_rule17,],
"r": [smalltalk_rule17,],
"s": [smalltalk_rule17,],
"t": [smalltalk_rule17,],
"u": [smalltalk_rule17,],
"v": [smalltalk_rule17,],
"w": [smalltalk_rule17,],
"x": [smalltalk_rule17,],
"y": [smalltalk_rule17,],
"z": [smalltalk_rule17,],
}
# x.rulesDictDict for smalltalk mode.
rulesDictDict = {
"smalltalk_main": rulesDict1,
}
# Import dict for smalltalk mode.
importDict = {} |
4,563 | run reindex | #
# Copyright 2017-2018 Government of Canada
# Public Services and Procurement Canada - buyandsell.gc.ca
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Importing this file causes the standard settings to be loaded
and a standard service manager to be created. This allows services
to be properly initialized before the webserver process has forked.
"""
import asyncio
import logging
import os
import requests
import threading
import django.db
from django.conf import settings
from wsgi import application
LOGGER = logging.getLogger(__name__)
def run_django_proc(proc, *args):
try:
return proc(*args)
finally:
django.db.connections.close_all()
def run_django(proc, *args) -> asyncio.Future:
return asyncio.get_event_loop().run_in_executor(None, run_django_proc, proc, *args)
def METHOD_NAME():
from django.core.management import call_command
batch_size = os.getenv("SOLR_BATCH_SIZE", 500)
call_command(
"update_index", "--max-retries=5", "--batch-size={}".format(batch_size)
)
def run_migration():
from django.core.management import call_command
call_command("migrate")
async def add_server_headers(request, response):
host = os.environ.get("HOSTNAME")
if host and "X-Served-By" not in response.headers:
response.headers["X-Served-By"] = host
app_solrqueue = None
async def connect_agent():
# placeholder
LOGGER.info(">>>>> connect_agent() <<<<<")
# do the self-connection here
# Make agent connection to self to send self presentation requests later
response = requests.get(
f"{settings.AGENT_ADMIN_URL}/connections"
+ f"?alias={settings.AGENT_SELF_CONNECTION_ALIAS}",
headers=settings.ADMIN_REQUEST_HEADERS,
)
connections = response.json()
# We only need to form a self connection once
if not connections["results"]:
response = requests.post(
f"{settings.AGENT_ADMIN_URL}/connections/create-invitation"
+ f"?alias={settings.AGENT_SELF_CONNECTION_ALIAS}",
headers=settings.ADMIN_REQUEST_HEADERS,
)
response_body = response.json()
requests.post(
f"{settings.AGENT_ADMIN_URL}/connections/receive-invitation",
json=response_body["invitation"],
headers=settings.ADMIN_REQUEST_HEADERS,
)
async def on_app_startup(app):
# placeholder
LOGGER.info(">>>>> on_startup() <<<<<")
asyncio.ensure_future(connect_agent())
async def on_app_cleanup(app):
# placeholder
LOGGER.info(">>>>> on_cleanup() <<<<<")
async def on_app_shutdown(app):
"""
Wait for indexing queue to drain on shutdown.
"""
global app_solrqueue
LOGGER.error(">>>>> on_shutdown() <<<<<")
if app_solrqueue:
while app_solrqueue.isactive():
LOGGER.error(">>>>> waiting ... %s <<<<<" % app_solrqueue.qsize())
await asyncio.sleep(1)
await app_solrqueue.app_stop()
LOGGER.error(">>>>> completed <<<<<")
async def init_app(on_startup=None, on_cleanup=None, on_shutdown=None):
from aiohttp.web import Application
from aiohttp_wsgi import WSGIHandler
from vcr_server.utils.solrqueue import SolrQueue
global app_solrqueue
wsgi_handler = WSGIHandler(application)
app = Application()
# all requests forwarded to django
app.router.add_route("*", "/{path_info:.*}", wsgi_handler)
app_solrqueue = SolrQueue()
app_solrqueue.setup(app=app)
if on_startup:
app.on_startup.append(on_startup)
if on_cleanup:
app.on_cleanup.append(on_cleanup)
if on_shutdown:
app.on_shutdown.append(on_shutdown)
no_headers = os.environ.get("DISABLE_SERVER_HEADERS")
if not no_headers or no_headers == "false":
app.on_response_prepare.append(add_server_headers)
return app |
4,564 | test model from pretrained | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class FlaxBertModelTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_attention_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_choices=4,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
config = BertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, attention_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, token_type_ids, attention_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def prepare_config_and_inputs_for_decoder(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, token_type_ids, attention_mask = config_and_inputs
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class FlaxBertModelTest(FlaxModelTesterMixin, unittest.TestCase):
test_head_masking = True
all_model_classes = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def setUp(self):
self.model_tester = FlaxBertModelTester(self)
@slow
def METHOD_NAME(self):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
model = FlaxBertModel.from_pretrained("bert-base-cased")
outputs = model(np.ones((1, 1)))
self.assertIsNotNone(outputs) |
4,565 | get wireframe | #! /usr/bin/env python
#
# example_wireframe.py -- Example of a 3D plot with octahedron and wireframe
#
"""
Example of 3D plotting in Ginga
Plots an octahedron within a wireframe sphere.
Run with no parameters. Scroll to zoom in/out, click and drag to orbit.
Requirements: Qt5, OpenGL, numpy
"""
import sys
import numpy as np
from ginga import toolkit
toolkit.use('qt5')
from ginga.gw import Widgets # noqa
from ginga.qtw.ImageViewQt import CanvasView # noqa
from ginga.canvas.CanvasObject import get_canvas_types # noqa
from ginga.canvas import transform # noqa
from ginga.misc import log # noqa
class Viewer(object):
def __init__(self, app):
super(Viewer, self).__init__()
self.logger = app.logger
self.dc = get_canvas_types()
self.top = app.make_window(title="Simple Ginga 3D Viewer")
vw = CanvasView(self.logger, render='opengl')
vw.ui_set_active(True)
self.vw = vw
# quick hack to get 'u' to invoke hidden camera mode
bm = vw.get_bindmap()
bm.mode_map['u'] = bm.mode_map['mode_camera']
bd = vw.get_bindings()
bd.enable_all(True)
# canvas that we will draw on
canvas = self.dc.DrawingCanvas()
canvas.set_surface(vw)
self.canvas = canvas
# add canvas to view
private_canvas = vw.get_canvas()
private_canvas.add(canvas)
# little hack because we don't have a way yet to ask for this
# variation of back end through ginga.toolkit
ww = Widgets.wrap(vw.get_widget())
vbox = Widgets.VBox()
vbox.set_border_width(2)
vbox.set_spacing(1)
vbox.add_widget(ww, stretch=1)
hbox = Widgets.HBox()
hbox.set_border_width(4)
wquit = Widgets.Button("Quit")
wquit.add_callback('activated', self.quit)
hbox.add_widget(Widgets.Label(''), stretch=1)
hbox.add_widget(wquit)
vbox.add_widget(hbox)
self.top.set_widget(vbox)
def quit(self, w):
self.top.delete()
sys.exit(0)
def plot_octahedron(viewer, r):
# octahedron
A = [0.17770898, 0.72315927, 0.66742804]
B = [-0.65327074, -0.4196453, 0.63018661]
C = [0.65382635, 0.42081934, -0.62882604]
D = [-0.17907021, -0.72084723, -0.66956189]
E = [-0.73452809, 0.5495376, -0.39809158]
F = [0.73451554, -0.55094017, 0.39617148]
octo = [[E, A, B],
[E, B, D],
[E, D, C],
[E, C, A],
[F, A, B],
[F, B, D],
[F, D, C],
[F, C, A],
]
clrs = [('gray%d' % (i * 10 + 5)) for i in range(8)]
for i, tri in enumerate(octo):
new_tri = [np.asarray(pt) * r for pt in tri]
viewer.canvas.add(viewer.dc.Polygon(new_tri, color='yellow',
fill=True, fillcolor=clrs[i],
fillalpha=0.4))
def METHOD_NAME(viewer, x, y, z, **kwargs):
"""Produce a compound object of paths implementing a wireframe.
x, y, z are expected to be 2D arrays of points making up the mesh.
"""
# TODO: something like this would make a great utility function
# for ginga
n, m = x.shape
objs = []
for i in range(n):
pts = np.asarray([(x[i][j], y[i][j], z[i][j])
for j in range(m)])
objs.append(viewer.dc.Path(pts, **kwargs))
for j in range(m):
pts = np.asarray([(x[i][j], y[i][j], z[i][j])
for i in range(n)])
objs.append(viewer.dc.Path(pts, **kwargs))
return viewer.dc.CompoundObject(*objs)
def plot_sphere(viewer, r):
# sphere
u = np.linspace(0, np.pi, 30)
v = np.linspace(0, 2 * np.pi, 30)
x = np.outer(np.sin(u), np.sin(v)) * r
y = np.outer(np.sin(u), np.cos(v)) * r
z = np.outer(np.cos(u), np.ones_like(v)) * r
wf = METHOD_NAME(viewer, x, y, z, color='cyan', alpha=0.3)
viewer.canvas.add(wf)
logger = log.get_logger('example', level=20, log_stderr=True)
app = Widgets.Application(logger)
v = Viewer(app)
v.top.resize(512, 512)
v.top.show()
# put viewer in camera mode
bm = v.vw.get_bindmap()
bm.set_mode('camera', mode_type='locked')
# toggle 3D view
v.vw.renderer.mode3d = True
v.vw.update_widget()
r = 100
plot_octahedron(v, r)
plot_sphere(v, r)
v.vw.zoom_fit()
app.mainloop()
# END |
4,566 | test connect execution specification to lifeline | from gaphas.canvas import instant_cairo_context
from gaphas.item import Item
from gaphor import UML
from gaphor.core.modeling import DrawContext
from gaphor.core.modeling.diagram import FALLBACK_STYLE
from gaphor.diagram.tests.fixtures import allow, connect, disconnect
from gaphor.UML.interactions.executionspecification import ExecutionSpecificationItem
from gaphor.UML.interactions.lifeline import LifelineItem
def test_execution_specification_implements_item_protocol(diagram):
exec_spec = diagram.create(ExecutionSpecificationItem)
assert isinstance(exec_spec, Item)
def create_lifeline_with_execution_specification(diagram, element_factory):
lifeline = diagram.create(
LifelineItem, subject=element_factory.create(UML.Lifeline)
)
lifeline.lifetime.visible = True
exec_spec = diagram.create(ExecutionSpecificationItem)
connect(exec_spec, exec_spec.handles()[0], lifeline, lifeline.lifetime.port)
return lifeline, exec_spec
def test_draw_on_canvas(diagram):
exec_spec = diagram.create(ExecutionSpecificationItem)
cr = instant_cairo_context()
exec_spec.draw(
DrawContext(
cairo=cr,
style=FALLBACK_STYLE,
selected=False,
focused=False,
hovered=False,
dropzone=False,
)
)
def test_allow_execution_specification_to_lifeline(diagram):
lifeline = diagram.create(LifelineItem)
lifeline.lifetime.visible = True
exec_spec = diagram.create(ExecutionSpecificationItem)
glued = allow(exec_spec, exec_spec.handles()[0], lifeline, lifeline.lifetime.port)
assert glued
def METHOD_NAME(diagram, element_factory):
lifeline, exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
assert exec_spec.subject
assert lifeline.subject
assert exec_spec.subject.start.covered is lifeline.subject
assert (
exec_spec.subject.executionOccurrenceSpecification[0].covered
is lifeline.subject
)
def test_disconnect_execution_specification_from_lifeline(diagram, element_factory):
def elements_of_kind(type):
return element_factory.lselect(type)
lifeline, exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
disconnect(exec_spec, exec_spec.handles()[0])
assert lifeline.subject
assert exec_spec.subject is None
assert exec_spec.diagram
assert elements_of_kind(UML.ExecutionSpecification) == []
assert elements_of_kind(UML.ExecutionOccurrenceSpecification) == []
def test_allow_execution_specification_to_execution_specification(diagram):
parent_exec_spec = diagram.create(ExecutionSpecificationItem)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
glued = allow(
parent_exec_spec,
parent_exec_spec.handles()[0],
child_exec_spec,
child_exec_spec.ports()[0],
)
assert glued
def test_connect_execution_specification_to_execution_specification(
diagram, element_factory
):
parent_exec_spec = diagram.create(ExecutionSpecificationItem)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
connect(
child_exec_spec,
child_exec_spec.handles()[0],
parent_exec_spec,
parent_exec_spec.ports()[0],
)
assert not parent_exec_spec.subject
assert not child_exec_spec.subject
def test_connect_execution_specification_to_execution_specification_with_lifeline(
diagram, element_factory
):
lifeline, parent_exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
connect(
child_exec_spec,
child_exec_spec.handles()[0],
parent_exec_spec,
parent_exec_spec.ports()[0],
)
assert child_exec_spec.subject
assert lifeline.subject
assert child_exec_spec.subject.start.covered is lifeline.subject
assert (
child_exec_spec.subject.executionOccurrenceSpecification[0].covered
is lifeline.subject
)
def test_connect_execution_specification_with_execution_specification_to_lifeline(
diagram, element_factory
):
lifeline = diagram.create(
LifelineItem, subject=element_factory.create(UML.Lifeline)
)
lifeline.lifetime.visible = True
parent_exec_spec = diagram.create(ExecutionSpecificationItem)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
connect(
child_exec_spec,
child_exec_spec.handles()[0],
parent_exec_spec,
parent_exec_spec.ports()[0],
)
connect(
parent_exec_spec,
parent_exec_spec.handles()[0],
lifeline,
lifeline.lifetime.port,
)
assert parent_exec_spec.subject
assert child_exec_spec.subject
assert lifeline.subject
assert parent_exec_spec.subject.start.covered is lifeline.subject
assert child_exec_spec.subject.start.covered is lifeline.subject
assert (
child_exec_spec.subject.executionOccurrenceSpecification[0].covered
is lifeline.subject
)
def test_disconnect_execution_specification_with_execution_specification_from_lifeline(
diagram, element_factory
):
def elements_of_kind(type):
return element_factory.lselect(type)
lifeline, parent_exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
grand_child_exec_spec = diagram.create(ExecutionSpecificationItem)
connect(
child_exec_spec,
child_exec_spec.handles()[0],
parent_exec_spec,
parent_exec_spec.ports()[0],
)
connect(
grand_child_exec_spec,
grand_child_exec_spec.handles()[0],
child_exec_spec,
child_exec_spec.ports()[0],
)
assert child_exec_spec.parent is parent_exec_spec
disconnect(parent_exec_spec, parent_exec_spec.handles()[0])
assert lifeline.subject
assert parent_exec_spec.subject is None
assert child_exec_spec.subject is None
assert grand_child_exec_spec.subject is None
assert elements_of_kind(UML.ExecutionSpecification) == []
assert elements_of_kind(UML.ExecutionOccurrenceSpecification) == []
def test_save_and_load(diagram, element_factory, saver, loader):
lifeline, exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
diagram.update_now((lifeline, exec_spec))
saved_data = saver()
loader(saved_data)
exec_specs = element_factory.lselect(
lambda e: e.isKindOf(UML.ExecutionSpecification)
)
loaded_exec_spec = exec_specs[0].presentation[0]
assert len(exec_specs) == 1
assert (
len(
element_factory.lselect(
lambda e: e.isKindOf(UML.ExecutionOccurrenceSpecification)
)
)
== 2
)
assert loaded_exec_spec.diagram.connections.get_connection(
loaded_exec_spec.handles()[0]
) |
4,567 | build shapes | '''
Creates an svg-file with spawning regions of a species colored depending on the rarity.
'''
from typing import List
from ..common import SVGBoundaries
from .consts import POINT_RADIUS
from .intermediate_types import SpawnPoint, SpawnRectangle
from .species import get_rarity_for_spawn
# These CSS class names are also defined on the ARK Wiki (https://ark.gamepedia.com/MediaWiki:Common.css) and thus shouldn't be renamed here.
CSS_RARITY_CLASSES = [
'spawningMap-very-rare', 'spawningMap-rare', 'spawningMap-very-uncommon', 'spawningMap-uncommon', 'spawningMap-common',
'spawningMap-very-common'
]
def is_group_in_cave(path):
return 'Cave' in path or 'UnderwaterGround' in path
def find_frequency_for_group(frequency_set, group_path) -> float:
for sef in frequency_set:
if sef.path == group_path:
return sef.frequency
return 0
def METHOD_NAME(bounds: SVGBoundaries, spawns, spawn_entries_frequencies, always_untameable):
v_regions: List[List[SpawnRectangle]] = [[] for _ in range(6)]
v_points: List[List[SpawnPoint]] = [[] for _ in range(6)]
for s in spawns['spawns']:
# Check if spawngroup exists for current species
if not s.get('locations', None) or s.get('disabled', False) or s.get('minDesiredNumberOfNPC', -1) <= 0:
continue
frequency = find_frequency_for_group(spawn_entries_frequencies, s['spawnGroup'])
if frequency == 0:
continue
rarity = get_rarity_for_spawn(s, frequency)
if 'spawnLocations' in s:
for region in s['spawnLocations']:
# Add small border to avoid gaps
x1 = round((region['start']['long'] - bounds.border_left) * bounds.size / bounds.coord_width) - 3
x2 = round((region['end']['long'] - bounds.border_left) * bounds.size / bounds.coord_width) + 3
y1 = round((region['start']['lat'] - bounds.border_top) * bounds.size / bounds.coord_height) - 3
y2 = round((region['end']['lat'] - bounds.border_top) * bounds.size / bounds.coord_height) + 3
# Clamp the values
x1 = min(bounds.size, max(0, x1))
x2 = min(bounds.size, max(0, x2))
y1 = min(bounds.size, max(0, y1))
y2 = min(bounds.size, max(0, y2))
# Make sure the order is right
if x1 > x2:
x2, x1 = x1, x2
if y1 > y2:
y2, y1 = y1, y2
w = x2 - x1
h = y2 - y1
untameable = always_untameable or s['forceUntameable']
# Skip if the volume does not cover a non-zero area
if w == 0 or h == 0:
continue
v_regions[rarity].append(SpawnRectangle(x1, y1, w, h, is_group_in_cave(s['spawnGroup']), untameable))
if 'spawnPoints' in s:
for point in s['spawnPoints']:
# Add small border to avoid gaps
x = round((point['long'] - bounds.border_left) * bounds.size / bounds.coord_width)
y = round((point['lat'] - bounds.border_top) * bounds.size / bounds.coord_height)
if x < 0 or y < 0 or x > bounds.size or y > bounds.size:
# Out of bounds, skip
continue
x = min(bounds.size, max(0, x))
y = min(bounds.size, max(0, y))
untameable = always_untameable or s['forceUntameable']
v_points[rarity].append(SpawnPoint(x, y, is_group_in_cave(s['spawnGroup']), untameable))
return v_regions, v_points
def _generate_svg_spawn_regions(rarity_sets):
svg_output = '\n<g filter="url(#blur)" opacity="0.7">'
for rarity, regions in enumerate(rarity_sets):
if not regions:
continue
svg_output += f'\n<g class="{CSS_RARITY_CLASSES[rarity]}">'
for region in regions:
svg_output += f'\n<rect x="{region.x}" y="{region.y}" width="{region.w}" height="{region.h}" />'
svg_output += '\n</g>'
svg_output += '\n</g>'
return svg_output
def _generate_svg_spawn_points(rarity_sets):
svg_output = '\n<g class="spawning-map-point" opacity="0.8">'
for rarity, points in enumerate(rarity_sets):
if not points:
continue
svg_output += f'\n<g class="{CSS_RARITY_CLASSES[rarity]}">'
for point in points:
svg_output += f'\n<circle cx="{point.x}" cy="{point.y}" r="{2 * POINT_RADIUS}" />'
svg_output += '\n</g>'
svg_output += '\n</g>'
return svg_output
def _generate_svg_untameables(rarity_sets):
untameable_regions = [r for regions in rarity_sets for r in regions if r.untameable]
if untameable_regions:
svg_output = '\n<g fill="url(#pattern-untameable)" opacity="0.3">'
for region in untameable_regions:
svg_output += f'\n<rect x="{region.x}" y="{region.y}" width="{region.w}" height="{region.h}"/>'
svg_output += '\n</g>'
return svg_output
return ''
def _generate_svg_caves(rarity_sets):
cave_regions = [r for regions in rarity_sets for r in regions if r.cave]
if cave_regions:
svg_output = '\n<g filter="url(#groupStroke)" opacity="0.8">'
for region in cave_regions:
svg_output += f'\n<rect x="{region.x}" y="{region.y}" width="{region.w}" height="{region.h}"/>'
svg_output += '\n</g>'
return svg_output
return ''
def generate_svg_map(bounds: SVGBoundaries, spawn_freqs, spawns, force_untameable):
svg_output = f'''<?xml version="1.0" encoding="utf-8"?>
<svg xmlns="http://www.w3.org/2000/svg" width="{bounds.size}" height="{bounds.size}"
viewBox="0 0 {bounds.size} {bounds.size}" class="creatureMap" style="position:absolute;">
<defs>
<filter id="blur" x="-30%" y="-30%" width="160%" height="160%">
<feGaussianBlur stdDeviation="{round(bounds.size / 100)}" />
</filter>
<pattern id="pattern-untameable" width="10" height="10" patternTransform="rotate(135)" patternUnits="userSpaceOnUse">
<rect width="4" height="10" fill="black"></rect>
</pattern>
<filter id="groupStroke">
<feFlood result="outsideColor" flood-color="black"/>
<feMorphology in="SourceAlpha" operator="dilate" radius="2"/>
<feComposite result="strokeoutline1" in="outsideColor" operator="in"/>
<feComposite result="strokeoutline2" in="strokeoutline1" in2="SourceAlpha" operator="out"/>
<feGaussianBlur in="strokeoutline2" result="strokeblur" stdDeviation="1"/>
</filter>
<style>
.spawningMap-very-common {{ fill: #0F0; }}
.spawningMap-common {{ fill: #B2FF00; }}
.spawningMap-uncommon {{ fill: #FF0; }}
.spawningMap-very-uncommon {{ fill: #FC0; }}
.spawningMap-rare {{ fill: #F60; }}
.spawningMap-very-rare {{ fill: #F00; }}
.spawning-map-point {{ stroke:black; stroke-width:1; }}
</style>
</defs>
'''
# Generate intermediate shape objects out of spawning data
regions_by_rarity, points_by_rarity = METHOD_NAME(bounds, spawns, spawn_freqs, force_untameable)
has_regions = sum(len(regions) for regions in regions_by_rarity) != 0
has_points = sum(len(points) for points in points_by_rarity) != 0
if not has_regions and not has_points:
return None
# Generate SVG rects and circles from the shapes
if has_regions:
svg_output += _generate_svg_spawn_regions(regions_by_rarity)
if has_points:
svg_output += _generate_svg_spawn_points(points_by_rarity)
# Untameable stripes (without blur)
svg_output += _generate_svg_untameables(regions_by_rarity)
# Cave outlines
svg_output += _generate_svg_caves(regions_by_rarity)
# end of svg
svg_output += '\n</svg>'
return svg_output |
4,568 | test start session when non custom start | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import errno
import json
import botocore.session
from awscli.customizations import sessionmanager
from awscli.testutils import mock, unittest
class TestSessionManager(unittest.TestCase):
def setUp(self):
self.session = mock.Mock(botocore.session.Session)
self.client = mock.Mock()
self.region = 'us-west-2'
self.profile = 'testProfile'
self.endpoint_url = 'testUrl'
self.client.meta.region_name = self.region
self.client.meta.endpoint_url = self.endpoint_url
self.session.create_client.return_value = self.client
self.session.profile = self.profile
self.caller = sessionmanager.StartSessionCaller(self.session)
def METHOD_NAME(self):
self.client.start_session.side_effect = Exception('some exception')
params = {}
with self.assertRaisesRegex(Exception, 'some exception'):
self.caller.invoke('ssm', 'StartSession', params, mock.Mock())
@mock.patch('awscli.customizations.sessionmanager.check_call')
def test_start_session_success_scenario(self, mock_check_call):
mock_check_call.return_value = 0
start_session_params = {
"Target": "i-123456789"
}
start_session_response = {
"SessionId": "session-id",
"TokenValue": "token-value",
"StreamUrl": "stream-url"
}
self.client.start_session.return_value = start_session_response
rc = self.caller.invoke('ssm', 'StartSession',
start_session_params, mock.Mock())
self.assertEqual(rc, 0)
self.client.start_session.assert_called_with(**start_session_params)
mock_check_call_list = mock_check_call.call_args[0][0]
mock_check_call_list[1] = json.loads(mock_check_call_list[1])
self.assertEqual(
mock_check_call_list,
['session-manager-plugin',
start_session_response,
self.region,
'StartSession',
self.profile,
json.dumps(start_session_params),
self.endpoint_url]
)
@mock.patch('awscli.customizations.sessionmanager.check_call')
def test_start_session_when_check_call_fails(self, mock_check_call):
mock_check_call.side_effect = OSError(errno.ENOENT, 'some error')
start_session_params = {
"Target": "i-123456789"
}
start_session_response = {
"SessionId": "session-id",
"TokenValue": "token-value",
"StreamUrl": "stream-url"
}
terminate_session_params = {
"SessionId": "session-id"
}
self.client.start_session.return_value = start_session_response
with self.assertRaises(ValueError):
self.caller.invoke('ssm', 'StartSession',
start_session_params, mock.Mock())
self.client.start_session.assert_called_with(
**start_session_params)
self.client.terminate_session.assert_called_with(
**terminate_session_params)
mock_check_call_list = mock_check_call.call_args[0][0]
mock_check_call_list[1] = json.loads(mock_check_call_list[1])
self.assertEqual(
mock_check_call_list,
['session-manager-plugin',
start_session_response,
self.region,
'StartSession',
self.profile,
json.dumps(start_session_params),
self.endpoint_url]
)
@mock.patch('awscli.customizations.sessionmanager.check_call')
def test_start_session_when_no_profile_is_passed(self, mock_check_call):
self.session.profile = None
mock_check_call.return_value = 0
start_session_params = {
"Target": "i-123456789"
}
start_session_response = {
"SessionId": "session-id",
"TokenValue": "token-value",
"StreamUrl": "stream-url"
}
self.client.start_session.return_value = start_session_response
rc = self.caller.invoke('ssm', 'StartSession',
start_session_params, mock.Mock())
self.assertEqual(rc, 0)
self.client.start_session.assert_called_with(**start_session_params)
mock_check_call_list = mock_check_call.call_args[0][0]
self.assertEqual(mock_check_call_list[4], '') |
4,569 | list app service plan hybrid connection keys | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListAppServicePlanHybridConnectionKeysResult',
'AwaitableListAppServicePlanHybridConnectionKeysResult',
'list_app_service_plan_hybrid_connection_keys',
'list_app_service_plan_hybrid_connection_keys_output',
]
@pulumi.output_type
class ListAppServicePlanHybridConnectionKeysResult:
"""
Hybrid Connection key contract. This has the send key name and value for a Hybrid Connection.
"""
def __init__(__self__, id=None, kind=None, name=None, send_key_name=None, send_key_value=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if send_key_name and not isinstance(send_key_name, str):
raise TypeError("Expected argument 'send_key_name' to be a str")
pulumi.set(__self__, "send_key_name", send_key_name)
if send_key_value and not isinstance(send_key_value, str):
raise TypeError("Expected argument 'send_key_value' to be a str")
pulumi.set(__self__, "send_key_value", send_key_value)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="sendKeyName")
def send_key_name(self) -> str:
"""
The name of the send key.
"""
return pulumi.get(self, "send_key_name")
@property
@pulumi.getter(name="sendKeyValue")
def send_key_value(self) -> str:
"""
The value of the send key.
"""
return pulumi.get(self, "send_key_value")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListAppServicePlanHybridConnectionKeysResult(ListAppServicePlanHybridConnectionKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListAppServicePlanHybridConnectionKeysResult(
id=self.id,
kind=self.kind,
name=self.name,
send_key_name=self.send_key_name,
send_key_value=self.send_key_value,
type=self.type)
def METHOD_NAME(name: Optional[str] = None,
namespace_name: Optional[str] = None,
relay_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListAppServicePlanHybridConnectionKeysResult:
"""
Description for Get the send key name and value of a Hybrid Connection.
:param str name: Name of the App Service plan.
:param str namespace_name: The name of the Service Bus namespace.
:param str relay_name: The name of the Service Bus relay.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = name
__args__['namespaceName'] = namespace_name
__args__['relayName'] = relay_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:web/v20220901:listAppServicePlanHybridConnectionKeys', __args__, opts=opts, typ=ListAppServicePlanHybridConnectionKeysResult).value
return AwaitableListAppServicePlanHybridConnectionKeysResult(
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
send_key_name=pulumi.get(__ret__, 'send_key_name'),
send_key_value=pulumi.get(__ret__, 'send_key_value'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def list_app_service_plan_hybrid_connection_keys_output(name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
relay_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListAppServicePlanHybridConnectionKeysResult]:
"""
Description for Get the send key name and value of a Hybrid Connection.
:param str name: Name of the App Service plan.
:param str namespace_name: The name of the Service Bus namespace.
:param str relay_name: The name of the Service Bus relay.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
... |
4,570 | test invalid send analytic dxo | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import pytest
from nvflare.apis.analytix import AnalyticsDataType
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.tracking.tracker_types import LogWriterName, TrackConst
from nvflare.app_common.widgets.streaming import create_analytic_dxo, send_analytic_dxo
INVALID_TEST_CASES = [
(list(), dict(), FLContext(), TypeError, f"expect comp to be an instance of FLComponent, but got {type(list())}"),
(FLComponent(), dict(), FLContext(), TypeError, f"expect dxo to be an instance of DXO, but got {type(dict())}"),
(
FLComponent(),
DXO(data={"k": "v"}, data_kind=DataKind.ANALYTIC),
list(),
TypeError,
f"expect fl_ctx to be an instance of FLContext, but got {type(list())}",
),
]
INVALID_WRITE_TEST_CASES = [
(
list(),
1.0,
1,
AnalyticsDataType.SCALAR,
TypeError,
f"expect tag to be an instance of str, but got {type(list())}",
),
(
"tag",
list(),
2,
AnalyticsDataType.SCALAR,
TypeError,
f"expect 'tag' value to be an instance of float, but got '{type(list())}'",
),
(
list(),
1.0,
2,
AnalyticsDataType.SCALARS,
TypeError,
f"expect tag to be an instance of str, but got {type(list())}",
),
(
"tag",
1.0,
3,
AnalyticsDataType.SCALARS,
TypeError,
f"expect 'tag' value to be an instance of dict, but got '{type(1.0)}'",
),
(list(), 1.0, 4, AnalyticsDataType.TEXT, TypeError, f"expect tag to be an instance of str, but got {type(list())}"),
(
"tag",
1.0,
5,
AnalyticsDataType.TEXT,
TypeError,
f"expect 'tag' value to be an instance of str, but got '{type(1.0)}'",
),
(
list(),
1.0,
6,
AnalyticsDataType.IMAGE,
TypeError,
f"expect tag to be an instance of str, but got {type(list())}",
),
]
class TestStreaming:
@pytest.mark.parametrize("comp,dxo,fl_ctx,expected_error,expected_msg", INVALID_TEST_CASES)
def METHOD_NAME(self, comp, dxo, fl_ctx, expected_error, expected_msg):
with pytest.raises(expected_error, match=expected_msg):
send_analytic_dxo(comp=comp, dxo=dxo, fl_ctx=fl_ctx)
@pytest.mark.parametrize("tag,value,step, data_type,expected_error,expected_msg", INVALID_WRITE_TEST_CASES)
def test_invalid_write_func(self, tag, value, step, data_type, expected_error, expected_msg):
with pytest.raises(expected_error, match=expected_msg):
create_analytic_dxo(tag=tag, value=value, data_type=data_type, step=step, writer=LogWriterName.TORCH_TB)
def mock_add(tag: str, value, data_type: AnalyticsDataType, global_step: Optional[int] = None, **kwargs):
# This mock_add tests writer behavior for MLflow and WandB too,
# but to keep the signature of the func, we use writer=LogWriterName.TORCH_TB which shows up in expected_dxo_meta
kwargs = kwargs if kwargs else {}
if global_step is not None:
if not isinstance(global_step, int):
raise TypeError(f"Expect global step to be an instance of int, but got {type(global_step)}")
kwargs[TrackConst.GLOBAL_STEP_KEY] = global_step
dxo = create_analytic_dxo(tag=tag, value=value, data_type=data_type, writer=LogWriterName.TORCH_TB, **kwargs)
return dxo
ANALYTICS_SENDER_TEST_CASES = [
(
"text",
"textsample",
AnalyticsDataType.TEXT,
None,
{},
"ANALYTIC",
{"track_key": "text", "track_value": "textsample"},
{"analytics_data_type": AnalyticsDataType.TEXT, "tracker_key": LogWriterName.TORCH_TB},
),
(
"text",
"textsample",
AnalyticsDataType.TEXT,
2,
{},
"ANALYTIC",
{"track_key": "text", "track_value": "textsample", "global_step": 2, "analytics_kwargs": {"global_step": 2}},
{"analytics_data_type": AnalyticsDataType.TEXT, "tracker_key": LogWriterName.TORCH_TB},
),
(
"text",
"textsample",
AnalyticsDataType.TEXT,
3,
{"extra_arg": 4},
"ANALYTIC",
{
"track_key": "text",
"track_value": "textsample",
"global_step": 3,
"analytics_kwargs": {"global_step": 3, "extra_arg": 4},
},
{"analytics_data_type": AnalyticsDataType.TEXT, "tracker_key": LogWriterName.TORCH_TB},
),
(
"set_tag_key_tag_name",
"tagvalue",
AnalyticsDataType.TAG,
None,
{},
"ANALYTIC",
{"track_key": "set_tag_key_tag_name", "track_value": "tagvalue"},
{"analytics_data_type": AnalyticsDataType.TAG, "tracker_key": LogWriterName.TORCH_TB},
),
(
"log_metric_key_name",
2.4,
AnalyticsDataType.METRIC,
20,
{},
"ANALYTIC",
{
"track_key": "log_metric_key_name",
"track_value": 2.4,
"global_step": 20,
"analytics_kwargs": {"global_step": 20},
},
{"analytics_data_type": AnalyticsDataType.METRIC, "tracker_key": LogWriterName.TORCH_TB},
),
( # for WandBWriter
"metrics",
{"train_loss": 2.4},
AnalyticsDataType.METRICS,
20,
{},
"ANALYTIC",
{
"track_key": "metrics",
"track_value": {"train_loss": 2.4},
"global_step": 20,
"analytics_kwargs": {"global_step": 20},
},
{"analytics_data_type": AnalyticsDataType.METRICS, "tracker_key": LogWriterName.TORCH_TB},
),
]
INVALID_SENDER_TEST_CASES = [
(
"text",
"textsample",
AnalyticsDataType.TEXT,
None,
{"global_step": 3, "extra_arg": 4},
TypeError,
"got multiple values for keyword argument 'global_step'",
),
]
class TestAnalyticsSender:
@pytest.mark.parametrize(
"tag,value,data_type,global_step,kwargs,expected_dxo_data_kind,expected_dxo_data,expected_dxo_meta",
ANALYTICS_SENDER_TEST_CASES,
)
def test_add(
self, tag, value, data_type, global_step, kwargs, expected_dxo_data_kind, expected_dxo_data, expected_dxo_meta
):
dxo = mock_add(tag=tag, value=value, data_type=data_type, global_step=global_step, **kwargs)
assert dxo.data_kind == expected_dxo_data_kind
assert dxo.data == expected_dxo_data
assert dxo.meta == expected_dxo_meta
# Since global_step is already being set, it cannot also be in kwargs.
@pytest.mark.parametrize(
"tag,value,data_type,global_step,kwargs,expected_error,expected_msg",
INVALID_SENDER_TEST_CASES,
)
def test_add_invalid(self, tag, value, data_type, global_step, kwargs, expected_error, expected_msg):
with pytest.raises(expected_error, match=expected_msg):
dxo = mock_add(tag=tag, value=value, data_type=data_type, global_step=global_step, **kwargs) |
4,571 | get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appplatform.v2023_09_01_preview.aio.AppPlatformManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.OperationDetail"]:
"""Lists all of the available REST API operations of the Microsoft.AppPlatform provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationDetail or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.appplatform.v2023_09_01_preview.models.OperationDetail]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2023-09-01-preview")
)
cls: ClsType[_models.AvailableOperations] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AvailableOperations", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(METHOD_NAME, extract_data)
list.metadata = {"url": "/providers/Microsoft.AppPlatform/operations"} |
4,572 | test wait until event check false | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from unittest import mock
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.mq import simple
from buildbot.test.fake import fakemaster
from buildbot.test.reactor import TestReactorMixin
from buildbot.test.util import interfaces
from buildbot.test.util import tuplematching
class Tests(interfaces.InterfaceTests):
def setUp(self):
raise NotImplementedError
def test_empty_produce(self):
self.mq.produce(('a', 'b', 'c'), {"x": 1})
# ..nothing happens
def test_signature_produce(self):
@self.assertArgSpecMatches(self.mq.produce)
def produce(self, routingKey, data):
pass
def test_signature_startConsuming(self):
@self.assertArgSpecMatches(self.mq.startConsuming)
def startConsuming(self, callback, filter, persistent_name=None):
pass
@defer.inlineCallbacks
def test_signature_stopConsuming(self):
cons = yield self.mq.startConsuming(lambda: None, ('a',))
@self.assertArgSpecMatches(cons.stopConsuming)
def stopConsuming(self):
pass
def test_signature_waitUntilEvent(self):
@self.assertArgSpecMatches(self.mq.waitUntilEvent)
def waitUntilEvent(self, filter, check_callback):
pass
class RealTests(tuplematching.TupleMatchingMixin, Tests):
# tests that only "real" implementations will pass
# called by the TupleMatchingMixin methods
@defer.inlineCallbacks
def do_test_match(self, routingKey, shouldMatch, filter):
cb = mock.Mock()
yield self.mq.startConsuming(cb, filter)
self.mq.produce(routingKey, 'x')
self.assertEqual(shouldMatch, cb.call_count == 1)
if shouldMatch:
cb.assert_called_once_with(routingKey, 'x')
@defer.inlineCallbacks
def test_stopConsuming(self):
cb = mock.Mock()
qref = yield self.mq.startConsuming(cb, ('abc',))
self.mq.produce(('abc',), {"x": 1})
qref.stopConsuming()
self.mq.produce(('abc',), {"x": 1})
cb.assert_called_once_with(('abc',), {"x": 1})
@defer.inlineCallbacks
def test_stopConsuming_twice(self):
cb = mock.Mock()
qref = yield self.mq.startConsuming(cb, ('abc',))
qref.stopConsuming()
qref.stopConsuming()
# ..nothing bad happens
@defer.inlineCallbacks
def test_non_persistent(self):
cb = mock.Mock()
qref = yield self.mq.startConsuming(cb, ('abc',))
cb2 = mock.Mock()
qref2 = yield self.mq.startConsuming(cb2, ('abc',))
qref.stopConsuming()
self.mq.produce(('abc',), '{}')
qref = yield self.mq.startConsuming(cb, ('abc',))
qref.stopConsuming()
qref2.stopConsuming()
self.assertTrue(cb2.called)
self.assertFalse(cb.called)
@defer.inlineCallbacks
def test_persistent(self):
cb = mock.Mock()
qref = yield self.mq.startConsuming(cb, ('abc',), persistent_name='ABC')
qref.stopConsuming()
self.mq.produce(('abc',), '{}')
qref = yield self.mq.startConsuming(cb, ('abc',), persistent_name='ABC')
qref.stopConsuming()
self.assertTrue(cb.called)
@defer.inlineCallbacks
def METHOD_NAME(self):
d = self.mq.waitUntilEvent(('abc',), lambda: False)
self.assertEqual(d.called, False)
self.mq.produce(('abc',), {"x": 1})
self.assertEqual(d.called, True)
res = yield d
self.assertEqual(res, (('abc',), {"x": 1}))
timeout = 3 # those tests should not run long
class TestFakeMQ(TestReactorMixin, unittest.TestCase, Tests):
def setUp(self):
self.setup_test_reactor()
self.master = fakemaster.make_master(self, wantMq=True)
self.mq = self.master.mq
self.mq.verifyMessages = False
class TestSimpleMQ(TestReactorMixin, unittest.TestCase, RealTests):
@defer.inlineCallbacks
def setUp(self):
self.setup_test_reactor()
self.master = fakemaster.make_master(self)
self.mq = simple.SimpleMQ()
yield self.mq.setServiceParent(self.master) |
4,573 | compute ik length height | # Copyright (c) Princeton University.
# This source code is licensed under the BSD 3-Clause license found in the LICENSE file in the root directory of this source tree.
# Authors: Alexander Raistrick
import logging
import bpy
import bpy_types
import mathutils
import numpy as np
from numpy.random import uniform as U, normal as N
import pdb
from assets.creatures import creature, creature_util as cutil
from util.math import clip_gaussian, randomspacing, lerp
from util import blender as butil
def METHOD_NAME(targets):
bounds = []
for i in range(3):
vmin = min(t.matrix_world.translation[i] for t in targets)
vmax = max(t.matrix_world.translation[i] for t in targets)
bounds.append([vmin, vmax])
return np.array(bounds)
def snap_iks_to_floor(targets, floor_bvh, minweight=0.7):
assert floor_bvh is not None
bpy.context.view_layer.update()
get_targets = lambda k: [t for t in targets if k in t.name]
bounds = METHOD_NAME(targets)
def find_floor_offset(t):
ray_origin = mathutils.Vector((t.matrix_world.translation.x, t.matrix_world.translation.y, bounds[2, 1]))
location, normal, index, dist = floor_bvh.ray_cast(ray_origin, mathutils.Vector((0, 0, -1)))
if location is None:
return None
return location - t.matrix_world.translation
feet = get_targets('foot')
feet_offsets = [find_floor_offset(f) for f in feet]
if any(off is None for off in feet_offsets):
logging.warning(f'snap_iks_to_floor found {feet_offsets=}, aborting snap operation')
return
# dont allow the pose diff to be too large (ie, prevent weird behavior at cliffs)
for i, o in enumerate(feet_offsets):
if o.length > bounds[2, 1] - bounds[2, 0]:
logging.warning(f'snap_iks_to_floor ignoring too-long offset {o.length=}')
feet_offsets[i] = mathutils.Vector()
for f, fo, in zip(feet, feet_offsets):
f.location += fo
hips = get_targets('body')
if len(feet) == len(hips) * 2:
# hips seem coupled to pairs of feet, take that into consideration
# TODO: Restructure to make detecting this more robust
hip_offsets = []
for i in range(len(feet) // 2):
o1, o2 = feet_offsets[2*i], feet_offsets[2*i + 1]
hip_off = minweight * min(o1, o2) + (1 - minweight) * max(o1, o2)
hip_offsets.append(hip_off)
for h, ho in zip(hips, hip_offsets):
h.location += ho
for o in get_targets('head'): # front-associated
o.location += hip_offsets[-1]
for o in get_targets('tail'): # back associated
o.location += hip_offsets[0]
else:
logging.warning(f'Couldnt establish feet-hip mapping')
off = mathutils.Vector(np.array(feet_offsets).mean(axis=0))
for o in targets:
if o in feet:
continue
o.location += off
def idle_body_noise_drivers(targets, foot_motion_chance=0.2, head_benddown=1.0, body_mag=1.0, wing_mag=1.0):
# all magnitudes are determined as multiples of the creatures overall length/height/width
bounds = METHOD_NAME(targets)
ls = bounds[:, 1] - bounds[:, 0]
# scalars for the whole creature
freq_scalar = N(1, 0.15)
mag_scalar = N(1, 0.15)
def add_noise(t, k, axis, mag, freq, off=0, mode='noise', seeds=None):
d = t.driver_add(k, axis)
p = getattr(t, k)[axis]
if k == 'location':
mag *= ls[axis]
freq = freq / bpy.context.scene.render.fps
freq *= freq_scalar
mag *= mag_scalar
if mode == 'noise':
s1, s2 = seeds if seeds is not None else U(0, 1000, 2) # random offsets as 'seeds'
varying = f'noise.noise(({freq:.6f}*frame, {s1:.2f}, {s2:.2f}))'
elif mode == 'sin':
varying = f'sin({freq:6f}*frame*2*pi)'
else:
raise ValueError(mode)
d.driver.expression = f'{p:.4f}+{mag:.4f}*({off:.4f}+{varying})'
get_targets = lambda k: [t for t in targets if k in t.name]
for i, t in enumerate(get_targets('body')):
add_noise(t, 'location', 0, mag=body_mag*0.025*N(1, 0.2), freq=0.25*N(1, 0.2))
if i != 0:
add_noise(t, 'location', 2, mag=body_mag*0.015*N(1, 0.2), freq=0.5*N(1, 0.2), mode='sin')
for t in get_targets('foot'):
if U() < foot_motion_chance:
add_noise(t, 'location', 0, mag=0.07*N(1, 0.1), freq=U(0.2, 0.7))
add_noise(t, 'location', 2, mag=0.04*N(1, 0.1), freq=U(0.2, 0.7))
for t in get_targets('head'):
headfreq = 0.4
add_noise(t, 'location', 0, mag=0.07*N(1, 0.1), freq=headfreq, off=-0.5*head_benddown)
add_noise(t, 'location', 1, mag=0.03*N(1, 0.1), freq=headfreq)
add_noise(t, 'location', 2, mag=0.2*N(1, 0.1), freq=headfreq/2, off=-0.7*head_benddown)
#add_noise(t, 'rotation_euler', 0, mag=0.4*N(1, 0.1), freq=U(0.1, 0.4))
#add_noise(t, 'rotation_euler', 1, mag=0.4*N(1, 0.1), freq=U(0.1, 0.4))
seeds = U(0, 1000, 2) # synchronize wing motion a little bit
for t in get_targets('wingtip'):
add_noise(t, 'location', 0, mag=wing_mag*0.1*N(1, 0.1), freq=U(0.6, 4), seeds=seeds+N(0, 0.2, 2))
add_noise(t, 'location', 2, mag=wing_mag*0.2*N(1, 0.1), freq=U(0.6, 4), seeds=seeds+N(0, 0.2, 2))
for t in get_targets('tail'):
for i in range(3):
add_noise(t, 'location', 0, mag=0.07*N(1, 0.1), freq=headfreq, off=-0.5)
def head_look_around(targets):
pas |
4,574 | audio pipeline | #!/usr/bin/env/python3
"""This minimal example trains an autoencoder over speech features. The encoder
is a MLP that transforms the input into a lower-dimensional latent representation.
The decoder is another MLP that predicts the input features. The system is trained
with MSE. Given the tiny dataset, the expected behavior is to overfit the
training data (with a validation performance that stays high).
"""
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class AutoBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Appling encoder and decoder to the input features"
batch = batch.to(self.device)
wavs, lens = batch.sig
feats = self.hparams.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
encoded = self.modules.linear1(feats)
encoded = self.hparams.activation(encoded)
decoded = self.modules.linear2(encoded)
return decoded
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the MSE loss."
wavs, lens = batch.sig
feats = self.hparams.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
self.mse_metric.append(batch.id, predictions, feats, lens)
return self.hparams.compute_cost(predictions, feats, lens)
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
self.mse_metric = self.hparams.loss_tracker()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if self.hparams.use_tensorboard:
if stage == sb.Stage.TRAIN:
self.hparams.train_logger.log_stats(
{"Epoch": epoch},
train_stats={"loss": self.mse_metric.scores},
)
elif stage == sb.Stage.VALID:
self.hparams.train_logger.log_stats(
{"Epoch": epoch},
valid_stats={"loss": self.mse_metric.scores},
)
if stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
{}, test_stats={"loss": self.mse_metric.scores}
)
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
if stage == sb.Stage.VALID:
print("Completed epoch %d" % epoch)
print("Train loss: %.3f" % self.train_loss)
print("Valid loss: %.3f" % stage_loss)
if stage == sb.Stage.TEST:
print("Test loss: %.3f" % stage_loss)
def data_prep(data_folder):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def METHOD_NAME(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, METHOD_NAME)
# 3. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig"])
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams.yaml"
data_folder = "../../samples/ASR"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder)
# Trainer initialization
auto_brain = AutoBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
auto_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
auto_brain.evaluate(valid_data)
# Check if model overfits for integration test
assert auto_brain.train_loss < 0.08
if __name__ == "__main__":
main()
def test_error(device):
main(device) |
4,575 | get setup pipeline | import abc
import logging
from collections import namedtuple
from typing import Any, Mapping, Sequence
from django.utils.encoding import force_str
from django.views import View
from sentry.models import AuthIdentity, User
from sentry.pipeline import PipelineProvider
from .view import AuthView, ConfigureView
class MigratingIdentityId(namedtuple("MigratingIdentityId", ["id", "legacy_id"])):
"""
MigratingIdentityId may be used in the ``id`` field of an identity
dictionary to facilitate migrating user identities from one identifying id
to another.
Context - when google oauth was initially created, the auth_identity key was simply
the provider email. This can cause issues if the customer changes their domain name,
and now their email is different and they're locked out of their account.
This logic updates their id to the provider id instead.
NOTE: this should _only_ really be relevant for google oauth implementation
"""
__slots__ = ()
def __str__(self) -> str:
return force_str(self.id)
class Provider(PipelineProvider, abc.ABC):
"""
A provider indicates how authenticate should happen for a given service,
including its configuration and basic identity management.
"""
is_partner = False
# All auth providers by default require the sso-basic feature
required_feature = "organizations:sso-basic"
def __init__(self, key: str, **config: Any) -> None:
super().__init__()
self._key = key
self.config = config
self.logger = logging.getLogger(f"sentry.auth.{self.key}")
@property
def key(self) -> str:
return self._key
def get_configure_view(self) -> View:
"""
Return the view which handles configuration (post-setup).
"""
return ConfigureView.as_view()
def get_auth_pipeline(self) -> Sequence[AuthView]:
"""
Return a list of AuthView instances representing the authentication
pipeline for this provider.
"""
raise NotImplementedError
def METHOD_NAME(self) -> Sequence[AuthView]:
"""
Return a list of AuthView instances representing the initial setup
pipeline for this provider.
Defaults to the defined authentication pipeline.
"""
return self.get_auth_pipeline()
def get_pipeline_views(self) -> Sequence[AuthView]:
return self.get_auth_pipeline()
# TODO: state should be Mapping[str, Any]?
# Must be reconciled with sentry.pipeline.base.Pipeline.fetch_state
def build_config(self, state: Any) -> Mapping[str, Any]:
"""
Return a mapping containing provider configuration.
- ``state`` is the resulting data captured by the pipeline
"""
raise NotImplementedError
def build_identity(self, state: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Return a mapping containing the identity information.
- ``state`` is the resulting data captured by the pipeline
>>> {
>>> "id": "foo@example.com",
>>> "email": "foo@example.com",
>>> "name": "Foo Bar",
>>> "email_verified": True,
>>> }
The ``email`` and ``id`` keys are required, ``name`` is optional.
The ``id`` may be passed in as a ``MigratingIdentityId`` should the
the id key be migrating from one value to another and have multiple
lookup values.
The provider is trustable and the email address is verified by the provider,
the ``email_verified`` attribute should be set to ``True``.
If the identity can not be constructed an ``IdentityNotValid`` error
should be raised.
"""
raise NotImplementedError
def update_identity(
self, new_data: Mapping[str, Any], current_data: Mapping[str, Any]
) -> Mapping[str, Any]:
"""
When re-authenticating with a provider, the identity data may need to
be mutated based on the previous state. An example of this is Google,
which will not return a `refresh_token` unless the user explicitly
goes through an approval process.
Return the new state which should be used for an identity.
"""
return new_data
def refresh_identity(self, auth_identity: AuthIdentity) -> None:
"""
Updates the AuthIdentity with any changes from upstream. The primary
example of a change would be signalling this identity is no longer
valid.
If the identity is no longer valid an ``IdentityNotValid`` error should
be raised.
"""
raise NotImplementedError
def can_use_scim(self, organization_id: int, user: User) -> bool:
"""
Controls whether or not a provider can have SCIM enabled to manage users.
By default we have this on for all providers.
"""
return True |
4,576 | wait for master | import abc
from typing import Any, Dict, Iterable, List, Optional
import pkg_resources
from termcolor import colored
import determined
import determined.deploy
from determined.common.api import certs
from determined.deploy import healthcheck
from determined.deploy.aws import aws, constants
COMMON_TEMPLATE_PARAMETER_KEYS = [
constants.cloudformation.ENABLE_CORS,
constants.cloudformation.MASTER_TLS_CERT,
constants.cloudformation.MASTER_TLS_KEY,
constants.cloudformation.MASTER_CERT_NAME,
constants.cloudformation.KEYPAIR,
constants.cloudformation.MASTER_INSTANCE_TYPE,
constants.cloudformation.AUX_AGENT_INSTANCE_TYPE,
constants.cloudformation.COMPUTE_AGENT_INSTANCE_TYPE,
constants.cloudformation.INBOUND_CIDR,
constants.cloudformation.VERSION,
constants.cloudformation.DB_PASSWORD,
constants.cloudformation.MAX_IDLE_AGENT_PERIOD,
constants.cloudformation.MAX_AGENT_STARTING_PERIOD,
constants.cloudformation.MAX_AUX_CONTAINERS_PER_AGENT,
constants.cloudformation.MAX_DYNAMIC_AGENTS,
constants.cloudformation.SPOT_ENABLED,
constants.cloudformation.SPOT_MAX_PRICE,
constants.cloudformation.CPU_ENV_IMAGE,
constants.cloudformation.GPU_ENV_IMAGE,
constants.cloudformation.LOG_GROUP_PREFIX,
constants.cloudformation.IMAGE_REPO_PREFIX,
constants.cloudformation.MASTER_CONFIG_TEMPLATE,
constants.cloudformation.AGENT_REATTACH_ENABLED,
constants.cloudformation.AGENT_RECONNECT_ATTEMPTS,
constants.cloudformation.AGENT_RECONNECT_BACKOFF,
constants.cloudformation.AGENT_CONFIG_FILE_CONTENTS,
constants.cloudformation.MASTER_IMAGE_NAME,
constants.cloudformation.AGENT_IMAGE_NAME,
constants.cloudformation.DOCKER_USER,
constants.cloudformation.DOCKER_PASS,
constants.cloudformation.NOTEBOOK_TIMEOUT,
] # type: List[str]
class DeterminedDeployment(metaclass=abc.ABCMeta):
template_parameter_keys = [] # type: List[str]
template = None # type: Optional[str]
master_info = "Configure the Determined CLI: " + colored(
"export DET_MASTER={master_url}", "yellow"
)
ui_info = "View the Determined UI: " + colored("{master_url}", "blue")
logs_info = "View Logs at: " + colored(
"https://{region}.console.aws.amazon.com/cloudwatch/home?"
"region={region}#logStream:group={log_group}",
"blue",
)
ssh_info = "SSH to master Instance: " + colored(
"ssh -i <pem-file> ubuntu@{master_ip}", "yellow"
)
def __init__(self, parameters: Dict[str, Any]) -> None:
assert self.template is not None
self.template_path = pkg_resources.resource_filename(
constants.misc.TEMPLATE_PATH, self.template
)
self.parameters = parameters
@abc.abstractmethod
def deploy(self, no_prompt: bool, update_terminate_agents: bool) -> None:
pass
def print(self) -> None:
with open(self.template_path) as f:
print(f.read())
def METHOD_NAME(self, timeout: int = 5 * 60) -> None:
cert = None
if self.parameters[constants.cloudformation.MASTER_TLS_CERT]:
cert = certs.Cert(noverify=True)
master_url = self._get_master_url()
return healthcheck.wait_for_master_url(master_url, timeout=timeout, cert=cert)
def consolidate_parameters(self) -> List[Dict[str, Any]]:
return [
{"ParameterKey": k, "ParameterValue": str(self.parameters[k])}
for k in self.parameters.keys()
if self.parameters[k] and k in self.template_parameter_keys
]
def before_deploy_print(self) -> None:
cluster_id = self.parameters[constants.cloudformation.CLUSTER_ID]
aws_region = self.parameters[constants.cloudformation.BOTO3_SESSION].region_name
version = (
self.parameters[constants.cloudformation.VERSION]
if self.parameters[constants.cloudformation.VERSION]
else determined.__version__
)
keypair = self.parameters[constants.cloudformation.KEYPAIR]
print(f"Determined Version: {version}")
print(f"Stack Name: {cluster_id}")
print(f"AWS Region: {aws_region}")
print(f"Keypair: {keypair}")
@property
def info_partials(self) -> Iterable[str]:
return (
self.master_info,
self.ui_info,
self.logs_info,
self.ssh_info,
)
def print_output_info(self, **kwargs: str) -> None:
print("\n".join(self.info_partials).format(**kwargs))
def _get_aws_output(self) -> Dict[str, str]:
stack_name = self.parameters[constants.cloudformation.CLUSTER_ID]
boto3_session = self.parameters[constants.cloudformation.BOTO3_SESSION]
return aws.get_output(stack_name, boto3_session)
def print_results(self) -> None:
output = self._get_aws_output()
master_ip = output[constants.cloudformation.DET_ADDRESS]
region = output[constants.cloudformation.REGION]
log_group = output[constants.cloudformation.LOG_GROUP]
master_url = self._get_master_url()
self.print_output_info(
master_ip=master_ip, master_url=master_url, region=region, log_group=log_group
)
def _get_master_url(self) -> str:
output = self._get_aws_output()
master_ip = output[constants.cloudformation.DET_ADDRESS]
master_port = output[constants.cloudformation.MASTER_PORT]
master_scheme = output[constants.cloudformation.MASTER_SCHEME]
master_url = f"{master_scheme}://{master_ip}:{master_port}"
return master_url |
4,577 | test raises on bad vocab size | # Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.simulation.baselines import baseline_task
from tensorflow_federated.python.simulation.baselines import client_spec
from tensorflow_federated.python.simulation.baselines.stackoverflow import word_prediction_tasks
class WordPredictionTasksTest(tf.test.TestCase, parameterized.TestCase):
def test_constructs_with_eval_client_spec(self):
train_client_spec = client_spec.ClientSpec(
num_epochs=2, batch_size=10, max_elements=3, shuffle_buffer_size=5
)
eval_client_spec = client_spec.ClientSpec(
num_epochs=1, batch_size=2, max_elements=5, shuffle_buffer_size=10
)
baseline_task_spec = word_prediction_tasks.create_word_prediction_task(
train_client_spec,
vocab_size=10,
eval_client_spec=eval_client_spec,
use_synthetic_data=True,
)
self.assertIsInstance(baseline_task_spec, baseline_task.BaselineTask)
def test_constructs_without_eval_client_spec(self):
train_client_spec = client_spec.ClientSpec(
num_epochs=2, batch_size=10, max_elements=3, shuffle_buffer_size=5
)
baseline_task_spec = word_prediction_tasks.create_word_prediction_task(
train_client_spec, vocab_size=10, use_synthetic_data=True
)
self.assertIsInstance(baseline_task_spec, baseline_task.BaselineTask)
@parameterized.named_parameters(
('sequence_length1', 1),
('sequence_length8', 8),
('sequence_length10', 10),
('sequence_length50', 50),
)
def test_constructs_with_different_sequence_lengths(self, sequence_length):
train_client_spec = client_spec.ClientSpec(
num_epochs=2, batch_size=10, max_elements=3, shuffle_buffer_size=5
)
baseline_task_spec = word_prediction_tasks.create_word_prediction_task(
train_client_spec,
vocab_size=10,
sequence_length=sequence_length,
use_synthetic_data=True,
)
self.assertIsInstance(baseline_task_spec, baseline_task.BaselineTask)
@parameterized.named_parameters(
('sequence_length0', 0),
('sequence_length_minus1', -1),
('sequence_length_minus5', -5),
)
def test_raises_on_bad_sequence_lengths(self, sequence_length):
train_client_spec = client_spec.ClientSpec(
num_epochs=2, batch_size=10, max_elements=3, shuffle_buffer_size=5
)
with self.assertRaises(ValueError):
word_prediction_tasks.create_word_prediction_task(
train_client_spec,
vocab_size=10,
sequence_length=sequence_length,
use_synthetic_data=True,
)
@parameterized.named_parameters(
('vocab_size1', 1),
('vocab_size8', 8),
('vocab_size10', 10),
('vocab_size50', 50),
)
def test_constructs_with_different_vocab_sizes(self, vocab_size):
train_client_spec = client_spec.ClientSpec(
num_epochs=2, batch_size=10, max_elements=3, shuffle_buffer_size=5
)
baseline_task_spec = word_prediction_tasks.create_word_prediction_task(
train_client_spec, vocab_size=vocab_size, use_synthetic_data=True
)
self.assertIsInstance(baseline_task_spec, baseline_task.BaselineTask)
@parameterized.named_parameters(
('vocab_size0', 0),
('vocab_size_minus1', -1),
('vocab_size_minus5', -5),
)
def METHOD_NAME(self, vocab_size):
train_client_spec = client_spec.ClientSpec(
num_epochs=2, batch_size=10, max_elements=3, shuffle_buffer_size=5
)
with self.assertRaises(ValueError):
word_prediction_tasks.create_word_prediction_task(
train_client_spec, vocab_size=vocab_size, use_synthetic_data=True
)
def test_model_is_compatible_with_preprocessed_data(self):
train_client_spec = client_spec.ClientSpec(num_epochs=1, batch_size=10)
baseline_task_spec = word_prediction_tasks.create_word_prediction_task(
train_client_spec, use_synthetic_data=True
)
centralized_dataset = (
baseline_task_spec.datasets.get_centralized_test_data()
)
sample_batch = next(iter(centralized_dataset))
model = baseline_task_spec.model_fn()
model.forward_pass(sample_batch)
if __name__ == '__main__':
execution_contexts.set_sync_local_cpp_execution_context()
tf.test.main() |
4,578 | test osarch detail api redirect | import urllib.parse
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_main.models import Osarch
class OsarchAPIViewTestCase(TestCase):
"""osarch API view tests"""
@classmethod
def setUpTestData(cls):
# create object
Osarch.objects.create(osarch_name='osarch_1')
# create user
User.objects.create_user(
username='testuser_osarch_api', password='baxmijIgjTfCzy9w8lrF'
)
def test_osarch_list_api_unauthorized(self):
"""unauthorized access is forbidden"""
# get response
response = self.client.get('/api/osarch/')
# compare
self.assertEqual(response.status_code, 401)
def test_osarch_list_api_method_get(self):
"""GET is allowed"""
# login testuser
self.client.login(
username='testuser_osarch_api', password='baxmijIgjTfCzy9w8lrF'
)
# get response
response = self.client.get('/api/osarch/')
# compare
self.assertEqual(response.status_code, 200)
def test_osarch_list_api_method_post(self):
"""POST is allowed"""
# login testuser
self.client.login(
username='testuser_osarch_api', password='baxmijIgjTfCzy9w8lrF'
)
# create POST string
poststring = {"osarch_name": "osarch_2"}
# get response
response = self.client.post('/api/osarch/', data=poststring)
# compare
self.assertEqual(response.status_code, 201)
def test_osarch_list_api_redirect(self):
"""test redirect with appending slash"""
# login testuser
self.client.login(
username='testuser_osarch_api', password='baxmijIgjTfCzy9w8lrF'
)
# create url
destination = urllib.parse.quote('/api/osarch/', safe='/')
# get response
response = self.client.get('/api/osarch', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_osarch_detail_api_unauthorized(self):
"""unauthorized access is forbidden"""
# get object
osarch_api_1 = Osarch.objects.get(osarch_name='osarch_1')
# get response
response = self.client.get('/api/osarch/' + str(osarch_api_1.osarch_id) + '/')
# compare
self.assertEqual(response.status_code, 401)
def test_osarch_detail_api_method_get(self):
"""GET is allowed"""
# get object
osarch_api_1 = Osarch.objects.get(osarch_name='osarch_1')
# login testuser
self.client.login(
username='testuser_osarch_api', password='baxmijIgjTfCzy9w8lrF'
)
# get response
response = self.client.get('/api/osarch/' + str(osarch_api_1.osarch_id) + '/')
# compare
self.assertEqual(response.status_code, 200)
def test_osarch_detail_api_method_delete(self):
"""DELETE is forbidden"""
# get object
osarch_api_1 = Osarch.objects.get(osarch_name='osarch_1')
# login testuser
self.client.login(
username='testuser_osarch_api', password='baxmijIgjTfCzy9w8lrF'
)
# get response
response = self.client.delete(
'/api/osarch/' + str(osarch_api_1.osarch_id) + '/'
)
# compare
self.assertEqual(response.status_code, 405)
def test_osarch_detail_api_method_put(self):
"""PUT is allowed"""
# get object
osarch_api_1 = Osarch.objects.get(osarch_name='osarch_1')
# login testuser
self.client.login(
username='testuser_osarch_api', password='baxmijIgjTfCzy9w8lrF'
)
# create url
destination = urllib.parse.quote(
'/api/osarch/' + str(osarch_api_1.osarch_id) + '/', safe='/'
)
# create PUT string
putstring = {"osarch_name": "osarch_3"}
# get response
response = self.client.put(
destination, data=putstring, content_type='application/json'
)
# compare
self.assertEqual(response.status_code, 200)
def METHOD_NAME(self):
"""test redirect with appending slash"""
# get object
osarch_api_1 = Osarch.objects.get(osarch_name='osarch_1')
# login testuser
self.client.login(
username='testuser_osarch_api', password='baxmijIgjTfCzy9w8lrF'
)
# create url
destination = urllib.parse.quote(
'/api/osarch/' + str(osarch_api_1.osarch_id) + '/', safe='/'
)
# get response
response = self.client.get(
'/api/osarch/' + str(osarch_api_1.osarch_id), follow=True
)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
) |
4,579 | test mapping hostname and authority | import json
import pytest
from tests.utils import compile_with_cachecheck
@pytest.mark.compilertest
def test_mapping_host_star_error():
test_yaml = """
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
name: bad-mapping
namespace: default
spec:
host: "*"
prefix: /star/
service: star
"""
r = compile_with_cachecheck(test_yaml, errors_ok=True)
ir = r["ir"]
# print(json.dumps(ir.aconf.errors, sort_keys=True, indent=4))
errors = ir.aconf.errors["bad-mapping.default.1"]
assert len(errors) == 1, f"Expected 1 error but got {len(errors)}"
assert errors[0]["ok"] == False
assert errors[0]["error"] == "host exact-match * contains *, which cannot match anything."
for g in ir.groups.values():
assert g.prefix != "/star/"
# print(json.dumps(ir.as_dict(), sort_keys=True, indent=4))
@pytest.mark.compilertest
def test_mapping_host_authority_star_error():
test_yaml = """
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
name: bad-mapping
namespace: default
spec:
headers:
":authority": "*"
prefix: /star/
service: star
"""
r = compile_with_cachecheck(test_yaml, errors_ok=True)
ir = r["ir"]
# print(json.dumps(ir.aconf.errors, sort_keys=True, indent=4))
errors = ir.aconf.errors["bad-mapping.default.1"]
assert len(errors) == 1, f"Expected 1 error but got {len(errors)}"
assert errors[0]["ok"] == False
assert (
errors[0]["error"] == ":authority exact-match '*' contains *, which cannot match anything."
)
for g in ir.groups.values():
assert g.prefix != "/star/"
# print(json.dumps(ir.as_dict(), sort_keys=True, indent=4))
@pytest.mark.compilertest
def test_mapping_host_ok():
test_yaml = """
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
name: good-host-mapping
namespace: default
spec:
host: foo.example.com
prefix: /wanted_group/
service: star
"""
r = compile_with_cachecheck(test_yaml, errors_ok=True)
ir = r["ir"]
errors = ir.aconf.errors
assert len(errors) == 0, "Expected no errors but got %s" % (
json.dumps(errors, sort_keys=True, indent=4)
)
found = 0
for g in ir.groups.values():
if g.prefix == "/wanted_group/":
assert g.host == "foo.example.com"
found += 1
assert found == 1, "Expected 1 /wanted_group/ prefix, got %d" % found
# print(json.dumps(ir.as_dict(), sort_keys=True, indent=4))
@pytest.mark.compilertest
def test_mapping_host_authority_ok():
test_yaml = """
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
name: good-host-mapping
namespace: default
spec:
headers:
":authority": foo.example.com
prefix: /wanted_group/
service: star
"""
r = compile_with_cachecheck(test_yaml, errors_ok=True)
ir = r["ir"]
errors = ir.aconf.errors
assert len(errors) == 0, "Expected no errors but got %s" % (
json.dumps(errors, sort_keys=True, indent=4)
)
found = 0
for g in ir.groups.values():
if g.prefix == "/wanted_group/":
assert g.host == "foo.example.com"
found += 1
assert found == 1, "Expected 1 /wanted_group/ prefix, got %d" % found
# print(json.dumps(ir.as_dict(), sort_keys=True, indent=4))
@pytest.mark.compilertest
def test_mapping_host_authority_and_host():
test_yaml = """
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
name: good-host-mapping
namespace: default
spec:
headers:
":authority": bar.example.com
host: foo.example.com
prefix: /wanted_group/
service: star
"""
r = compile_with_cachecheck(test_yaml, errors_ok=True)
ir = r["ir"]
errors = ir.aconf.errors
assert len(errors) == 0, "Expected no errors but got %s" % (
json.dumps(errors, sort_keys=True, indent=4)
)
found = 0
for g in ir.groups.values():
if g.prefix == "/wanted_group/":
assert g.host == "foo.example.com"
found += 1
assert found == 1, "Expected 1 /wanted_group/ prefix, got %d" % found
# print(json.dumps(ir.as_dict(), sort_keys=True, indent=4))
@pytest.mark.compilertest
def test_mapping_hostname_ok():
test_yaml = """
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
name: good-hostname-mapping
namespace: default
spec:
hostname: "*.example.com"
prefix: /wanted_group/
service: star
"""
r = compile_with_cachecheck(test_yaml, errors_ok=True)
ir = r["ir"]
errors = ir.aconf.errors
assert len(errors) == 0, "Expected no errors but got %s" % (
json.dumps(errors, sort_keys=True, indent=4)
)
found = 0
for g in ir.groups.values():
if g.prefix == "/wanted_group/":
assert g.host == "*.example.com"
found += 1
assert found == 1, "Expected 1 /wanted_group/ prefix, got %d" % found
# print(json.dumps(ir.as_dict(), sort_keys=True, indent=4))
@pytest.mark.compilertest
def test_mapping_hostname_and_host():
test_yaml = """
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
name: hostname-and-host-mapping
namespace: default
spec:
host: foo.example.com
hostname: "*.example.com"
prefix: /wanted_group/
service: star
"""
r = compile_with_cachecheck(test_yaml, errors_ok=True)
ir = r["ir"]
errors = ir.aconf.errors
assert len(errors) == 0, "Expected no errors but got %s" % (
json.dumps(errors, sort_keys=True, indent=4)
)
found = 0
for g in ir.groups.values():
if g.prefix == "/wanted_group/":
assert g.host == "*.example.com"
found += 1
assert found == 1, "Expected 1 /wanted_group/ prefix, got %d" % found
# print(json.dumps(ir.as_dict(), sort_keys=True, indent=4))
@pytest.mark.compilertest
def METHOD_NAME():
test_yaml = """
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
name: hostname-and-host-mapping
namespace: default
spec:
headers:
":authority": foo.example.com
hostname: "*.example.com"
prefix: /wanted_group/
service: star
"""
r = compile_with_cachecheck(test_yaml, errors_ok=True)
ir = r["ir"]
errors = ir.aconf.errors
assert len(errors) == 0, "Expected no errors but got %s" % (
json.dumps(errors, sort_keys=True, indent=4)
)
found = 0
for g in ir.groups.values():
if g.prefix == "/wanted_group/":
assert g.host == "*.example.com"
found += 1
assert found == 1, "Expected 1 /wanted_group/ prefix, got %d" % found
# print(json.dumps(ir.as_dict(), sort_keys=True, indent=4))
@pytest.mark.compilertest
def test_mapping_hostname_and_host_and_authority():
test_yaml = """
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
name: hostname-and-host-mapping
namespace: default
spec:
headers:
":authority": bar.example.com
host: foo.example.com
hostname: "*.example.com"
prefix: /wanted_group/
service: star
"""
r = compile_with_cachecheck(test_yaml, errors_ok=True)
ir = r["ir"]
errors = ir.aconf.errors
assert len(errors) == 0, "Expected no errors but got %s" % (
json.dumps(errors, sort_keys=True, indent=4)
)
found = 0
for g in ir.groups.values():
if g.prefix == "/wanted_group/":
assert g.host == "*.example.com"
found += 1
assert found == 1, "Expected 1 /wanted_group/ prefix, got %d" % found
# print(json.dumps(ir.as_dict(), sort_keys=True, indent=4)) |
4,580 | to pkg tuple | '''
dnfdragora is a graphical package management tool based on libyui python bindings
License: GPLv3
Author: Angelo Naselli <anaselli@linux.it>
@package dnfdragora
'''
# NOTE part of this code is imported from yumex-dnf
import time
import configparser
import gettext
import locale
import logging
import logging.handlers
import os.path
import re
import subprocess
import sys
import re
import dnfdaemon.client
logger = logging.getLogger('dnfdragora.misc')
class QueueEmptyError(Exception):
def __init__(self):
super(QueueEmptyError, self).__init__()
class TransactionBuildError(Exception):
def __init__(self, msgs):
super(TransactionBuildError, self).__init__()
self.msgs = msgs
class TransactionSolveError(Exception):
def __init__(self, msgs):
super(TransactionSolveError, self).__init__()
self.msgs = msgs
def dbus_dnfsystem(cmd):
subprocess.call(
'/usr/bin/dbus-send --system --print-reply '
'--dest=org.baseurl.DnfSystem / org.baseurl.DnfSystem.%s' % cmd,
shell=True)
def METHOD_NAME(pkg_id):
"""Find the real package nevre & repoid from an package pkg_id"""
(n, e, v, r, a, repo_id) = str(pkg_id).split(',')
return (n, e, v, r, a, repo_id)
def list_to_string(pkg_list, first_delimitier, delimiter):
"""Creates a multiline string from a list of packages"""
string = first_delimitier
for pkg_name in pkg_list:
string = string + pkg_name + delimiter
return string
def pkg_id_to_full_name(pkg_id):
(n, e, v, r, a, repo_id) = METHOD_NAME(pkg_id)
if e and e != '0':
return "%s-%s:%s-%s.%s" % (n, e, v, r, a)
else:
return "%s-%s-%s.%s" % (n, v, r, a)
#def color_floats(spec):
#rgba = Gdk.RGBA()
#rgba.parse(spec)
#return rgba.red, rgba.green, rgba.blue
#def get_color(spec):
#rgba = Gdk.RGBA()
#rgba.parse(spec)
#return rgba
#def rgb_to_hex(r, g, b):
#if isinstance(r, float):
#r *= 255
#g *= 255
#b *= 255
#return "#{0:02X}{1:02X}{2:02X}".format(int(r), int(g), int(b))
#def color_to_hex(color):
#return rgb_to_hex(color.red, color.green, color.blue)
def is_url(url):
urls = re.findall(
r'^http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+~]|'
r'[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', url)
return urls
def format_block(block, indent):
''' Format a block of text so they get the same indentation'''
spaces = " " * indent
lines = str(block).split('\n')
result = lines[0] + "\n"
for line in lines[1:]:
result += spaces + line + '\n'
return result
def parse_dbus_error():
'''parse values from a DBus related exception '''
DBUS_ERR_RE = re.compile('.*GDBus.Error:([\w\.]*): (.*)$')
(type, value, traceback) = sys.exc_info()
res = DBUS_ERR_RE.match(str(value))
if res:
return res.groups()
return "", ""
def ExceptionHandler(func):
"""
This decorator catch dnfdragora backed exceptions
"""
def newFunc(*args, **kwargs):
try:
rc = func(*args, **kwargs)
return rc
except dnfdaemon.client.DaemonError as e:
base = args[0] # get current class
base.exception_handler(e)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
def TimeFunction(func):
"""
This decorator catch dnfdragora exceptions and send fatal signal to frontend
"""
def newFunc(*args, **kwargs):
t_start = time.monotonic()
rc = func(*args, **kwargs)
t_end = time.monotonic()
name = func.__name__
t_diff = t_end - t_start
if t_diff >= 0.001:
logger.debug("%s took %.3f sec", name, t_diff)
return rc
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
def format_number(number, SI=0, space=' '):
"""Turn numbers into human-readable metric-like numbers"""
symbols = ['', # (none)
'k', # kilo
'M', # mega
'G', # giga
'T', # tera
'P', # peta
'E', # exa
'Z', # zetta
'Y'] # yotta
if SI:
step = 1000.0
else:
step = 1024.0
thresh = 999
depth = 0
max_depth = len(symbols) - 1
# we want numbers between 0 and thresh, but don't exceed the length
# of our list. In that event, the formatting will be screwed up,
# but it'll still show the right number.
while number > thresh and depth < max_depth:
depth = depth + 1
number = number / step
if isinstance(number, int):
# it's an int or a long, which means it didn't get divided,
# which means it's already short enough
fmt = '%i%s%s'
elif number < 9.95:
# must use 9.95 for proper sizing. For example, 9.99 will be
# rounded to 10.0 with the .1f fmt string (which is too long)
fmt = '%.1f%s%s'
else:
fmt = '%.0f%s%s'
return(fmt % (float(number or 0), space, symbols[depth]))
def format_size(number):
"""Turn size number in KBytes """
step = 1024.0
number = number / step
fmt = '%10.1f%s'
return(fmt % (float(number or 0), "K"))
def logger_setup(file_name='dnfdragora.log',
logroot='dnfdragora',
logfmt='%(asctime)s: %(message)s',
loglvl=logging.INFO):
"""Setup Python logging."""
maxbytes=10*1024*1024
handler = logging.handlers.RotatingFileHandler(
file_name, maxBytes=maxbytes, backupCount=5)
logging.basicConfig(filename=file_name, format='%(asctime)s [%(name)s]{%(filename)s:%(lineno)d}(%(levelname)s) %(message)s', level=loglvl)
logger.addHandler(handler)
#logger = logging.getLogger(logroot)
#logger.setLevel(loglvl)
#formatter = logging.Formatter(logfmt, '%H:%M:%S')
#handler = logging.FileHandler(filename)
#handler.setFormatter(formatter)
#handler.propagate = False
#logger.addHandler(handler)
|
4,581 | test lifecycle | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from sparseml.pytorch.sparsification import SetWeightDecayModifier
from tests.sparseml.pytorch.helpers import ConvNet, create_optim_adam, create_optim_sgd
from tests.sparseml.pytorch.sparsification.test_modifier import ScheduledModifierTest
from tests.sparseml.pytorch.helpers import ( # noqa isort:skip
test_epoch,
test_loss,
test_steps_per_epoch,
)
OPTIMIZER_MODIFIERS = [
lambda: SetWeightDecayModifier(
weight_decay=0.999,
start_epoch=2.0,
constant_logging=False,
param_groups=[0],
),
lambda: SetWeightDecayModifier(
weight_decay=0.75,
start_epoch=0.0,
constant_logging=False,
param_groups=None,
),
]
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
@pytest.mark.parametrize("modifier_lambda", OPTIMIZER_MODIFIERS, scope="function")
@pytest.mark.parametrize("model_lambda", [ConvNet], scope="function")
@pytest.mark.parametrize(
"optim_lambda", [create_optim_sgd, create_optim_adam], scope="function"
)
class TestSetWeightDecayModifierImpl(ScheduledModifierTest):
def METHOD_NAME(
self,
modifier_lambda,
model_lambda,
optim_lambda,
test_steps_per_epoch, # noqa: F811
):
modifier = modifier_lambda()
model = model_lambda()
optimizer = optim_lambda(model)
self.initialize_helper(modifier, model)
# get expected param groups and original values
param_group_idxs = (
modifier.param_groups
if modifier.param_groups
else list(range(len(optimizer.param_groups)))
)
# test pre start epoch
for epoch in range(int(modifier.start_epoch)):
assert not modifier.update_ready(epoch, test_steps_per_epoch)
for idx in param_group_idxs:
assert optimizer.param_groups[idx]["weight_decay"] != modifier.weight_decay
# test start epoch + update
assert modifier.update_ready(modifier.start_epoch, test_steps_per_epoch)
modifier.scheduled_update(
model, optimizer, modifier.start_epoch, test_steps_per_epoch
)
for idx in param_group_idxs:
assert optimizer.param_groups[idx]["weight_decay"] == modifier.weight_decay
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
def test_set_weight_decay_modifier_yaml():
weight_decay = 0.0
start_epoch = 2.0
param_groups = [0]
constant_logging = False
yaml_str = """
!SetWeightDecayModifier
weight_decay: {weight_decay}
start_epoch: {start_epoch}
constant_logging: {constant_logging}
param_groups: {param_groups}
""".format(
weight_decay=weight_decay,
start_epoch=start_epoch,
constant_logging=constant_logging,
param_groups=param_groups,
)
yaml_modifier = SetWeightDecayModifier.load_obj(
yaml_str
) # type: SetWeightDecayModifier
serialized_modifier = SetWeightDecayModifier.load_obj(
str(yaml_modifier)
) # type: SetWeightDecayModifier
obj_modifier = SetWeightDecayModifier(
weight_decay=weight_decay,
start_epoch=start_epoch,
constant_logging=constant_logging,
param_groups=param_groups,
)
assert isinstance(yaml_modifier, SetWeightDecayModifier)
assert (
yaml_modifier.weight_decay
== serialized_modifier.weight_decay
== obj_modifier.weight_decay
)
assert (
yaml_modifier.start_epoch
== serialized_modifier.start_epoch
== obj_modifier.start_epoch
)
assert (
yaml_modifier.constant_logging
== serialized_modifier.constant_logging
== obj_modifier.constant_logging
)
assert (
yaml_modifier.param_groups
== serialized_modifier.param_groups
== obj_modifier.param_groups
) |
4,582 | test run tests | """Unit tests for PyTest runner."""
import os
import collections
import pytest
from testplan.report import TestCaseReport
from testplan.testing import py_test as pytest_runner
from testplan import defaults
from testplan import report
from tests.unit.testplan.testing import pytest_expected_data
@pytest.fixture
def pytest_test_inst(repo_root_path, root_directory):
"""Return a PyTest test instance, with the example tests as its target."""
# For testing purposes, we want to run the pytest example at
# examples/PyTest/pytest_tests.py.
example_path = os.path.join(
repo_root_path, "examples", "PyTest", "pytest_tests.py"
)
rootdir = os.path.commonprefix([root_directory, os.getcwd()])
# We need to explicitly set the stdout_style in UT, normally it is inherited
# from the parent object but that doesn't work when testing PyTest in
# isolation.
return pytest_runner.PyTest(
name="My PyTest",
description="PyTest example test",
target=example_path,
stdout_style=defaults.STDOUT_STYLE,
extra_args=["--rootdir", rootdir],
)
def test_dry_run(pytest_test_inst):
"""
Test the dry_run() method returns the expected report skeleton.
"""
result = pytest_test_inst.dry_run()
report = result.report
assert report == pytest_expected_data.EXPECTED_DRY_RUN_REPORT
def METHOD_NAME(pytest_test_inst):
"""Test running all tests in batch mode."""
pytest_test_inst.setup()
pytest_test_inst.run_tests()
assert pytest_test_inst.report.status == report.Status.FAILED
_check_attachements(
pytest_test_inst.result.report["pytest_tests.py::TestWithAttachments"][
"test_attachment"
]
)
_check_all_testcounts(pytest_test_inst.report.counter)
def test_run_testcases_iter_all(pytest_test_inst):
"""Test running all tests iteratively."""
all_results = list(pytest_test_inst.run_testcases_iter())
assert len(all_results) == 14
report_attributes, current_uids = all_results[0]
assert current_uids == ["My PyTest"]
assert report_attributes["runtime_status"] == report.RuntimeStatus.RUNNING
counter = collections.Counter()
for testcase_report, _ in all_results[1:]:
counter[testcase_report.status] += 1
_check_all_testcounts(counter)
testcase_report, _ = all_results[7]
_check_attachements(testcase_report)
def test_run_testcases_iter_testsuite(pytest_test_inst):
"""Test running a single testsuite iteratively."""
all_results = list(
pytest_test_inst.run_testcases_iter(
testsuite_pattern="pytest_tests.py::TestPytestBasics"
)
)
assert len(all_results) == 6
report_attributes, current_uids = all_results[0]
assert current_uids == ["My PyTest", "pytest_tests.py::TestPytestBasics"]
assert report_attributes["runtime_status"] == report.RuntimeStatus.RUNNING
counter = collections.Counter()
for testcase_report, _ in all_results[1:]:
counter[testcase_report.status] += 1
counter["total"] += 1
assert counter["total"] == 5
assert counter["passed"] == 4
assert counter["failed"] == 1
assert counter["skipped"] == 0
def test_run_testcases_iter_testcase(pytest_test_inst):
"""Test running a single testcase iteratively."""
all_results = list(
pytest_test_inst.run_testcases_iter(
testsuite_pattern="pytest_tests.py::TestPytestBasics",
testcase_pattern="test_success",
)
)
assert len(all_results) == 2
report_attributes, current_uids = all_results[0]
assert current_uids == [
"My PyTest",
"pytest_tests.py::TestPytestBasics",
"test_success",
]
assert report_attributes["runtime_status"] == report.RuntimeStatus.RUNNING
testcase_report, parent_uids = all_results[1]
assert testcase_report.status == report.Status.PASSED
assert parent_uids == ["My PyTest", "pytest_tests.py::TestPytestBasics"]
def test_run_testcases_iter_param(pytest_test_inst):
"""Test running all parametrizations of a testcase iteratively."""
all_results = list(
pytest_test_inst.run_testcases_iter(
testsuite_pattern="pytest_tests.py::TestPytestBasics",
testcase_pattern="test_parametrization",
)
)
assert len(all_results) == 4
report_attributes, current_uids = all_results[0]
assert current_uids == [
"My PyTest",
"pytest_tests.py::TestPytestBasics",
"test_parametrization",
]
assert report_attributes["runtime_status"] == report.RuntimeStatus.RUNNING
counter = collections.Counter()
for testcase_report, parent_uids in all_results[1:]:
assert parent_uids == [
"My PyTest",
"pytest_tests.py::TestPytestBasics",
"test_parametrization",
]
counter[testcase_report.status] += 1
counter["total"] += 1
assert counter["total"] == 3
assert counter["passed"] == 3
assert counter["failed"] == 0
assert counter["skipped"] == 0
def test_capture_stdout(pytest_test_inst):
"""Test running a single testcase iteratively."""
all_results = list(
pytest_test_inst.run_testcases_iter(
testsuite_pattern="pytest_tests.py::TestPytestBasics",
testcase_pattern="test_failure",
)
)
assert all_results[0][0]["runtime_status"] == report.RuntimeStatus.RUNNING
assert all_results[1][0].entries[1]["message"] == "test output\n"
def _check_attachements(report: TestCaseReport):
assert len(report.attachments) == 1
assert report.attachments[0].description == "example attachment"
def _check_all_testcounts(counter):
"""Check the pass/fail/skip counts after running all tests."""
# One testcase is conditionally skipped when not running on a posix OS, so
# we have to take this into account when checking the pass/fail/skip counts.
if os.name == "posix":
assert counter["passed"] == 8
assert counter["skipped"] == 1
else:
assert counter["passed"] == 7
assert counter["skipped"] == 2
assert counter["failed"] == 4 |
4,583 | connect child emitters | """MutableMapping that emits events when altered."""
from typing import Mapping, Optional, Sequence, Type, Union
from napari.utils.events.containers._dict import _K, _T, TypedMutableMapping
from napari.utils.events.event import EmitterGroup, Event
from napari.utils.events.types import SupportsEvents
class EventedDict(TypedMutableMapping[_K, _T]):
"""Mutable dictionary that emits events when altered.
This class is designed to behave exactly like builtin ``dict``, but
will emit events before and after all mutations (addition, removal, and
changing).
Parameters
----------
data : Mapping, optional
Dictionary to initialize the class with.
basetype : type of sequence of types, optional
Type of the element in the dictionary.
Events
------
changed (key: K, old_value: T, value: T)
emitted when item at ``key`` is changed from ``old_value`` to ``value``
adding (key: K)
emitted before an item is added to the dictionary with ``key``
added (key: K, value: T)
emitted after ``value`` was added to the dictionary with ``key``
removing (key: K)
emitted before ``key`` is removed from the dictionary
removed (key: K, value: T)
emitted after ``key`` was removed from the dictionary
updated (key, K, value: T)
emitted after ``value`` of ``key`` was changed. Only implemented by
subclasses to give them an option to trigger some update after ``value``
was changed and this class did not register it. This can be useful if
the ``basetype`` is not an evented object.
"""
events: EmitterGroup
def __init__(
self,
data: Optional[Mapping[_K, _T]] = None,
basetype: Union[Type[_T], Sequence[Type[_T]]] = (),
) -> None:
_events = {
"changing": None,
"changed": None,
"adding": None,
"added": None,
"removing": None,
"removed": None,
"updated": None,
}
# For inheritance: If the mro already provides an EmitterGroup, add...
if hasattr(self, "events") and isinstance(self.events, EmitterGroup):
self.events.add(**_events)
else:
# otherwise create a new one
self.events = EmitterGroup(
source=self, auto_connect=False, **_events
)
super().__init__(data, basetype)
def __setitem__(self, key: _K, value: _T):
old = self._dict.get(key)
if value is old or value == old:
return
if old is None:
self.events.adding(key=key)
super().__setitem__(key, value)
self.events.added(key=key, value=value)
self.METHOD_NAME(value)
else:
super().__setitem__(key, value)
self.events.changed(key=key, old_value=old, value=value)
def __delitem__(self, key: _K):
self.events.removing(key=key)
self._disconnect_child_emitters(self[key])
item = self._dict.pop(key)
self.events.removed(key=key, value=item)
def _reemit_child_event(self, event: Event):
"""An item in the dict emitted an event. Re-emit with key"""
if not hasattr(event, "key"):
event.key = self.key(event.source)
# re-emit with this object's EventEmitter
self.events(event)
def _disconnect_child_emitters(self, child: _T):
"""Disconnect all events from the child from the re-emitter."""
if isinstance(child, SupportsEvents):
child.events.disconnect(self._reemit_child_event)
def METHOD_NAME(self, child: _T):
"""Connect all events from the child to be re-emitted."""
if isinstance(child, SupportsEvents):
# make sure the event source has been set on the child
if child.events.source is None:
child.events.source = child
child.events.connect(self._reemit_child_event)
def key(self, value: _T):
"""Return first instance of value."""
for k, v in self._dict.items():
if v is value or v == value:
return k
return None |
4,584 | test model fit tensor board epoch level | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DatasetCreator` with `Model.fit` across usages and strategies."""
import tensorflow.compat.v2 as tf
from keras import callbacks as callbacks_lib
from keras.distribute import dataset_creator_model_fit_test_base as test_base
from keras.distribute import strategy_combinations
from keras.testing_infra import test_utils
@test_utils.run_v2_only
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=strategy_combinations.parameter_server_strategies_multi_worker,
use_dataset_creator=[True, False],
mode="eager",
)
)
class DatasetCreatorModelFitParameterServerStrategyOnlyTest(
test_base.DatasetCreatorModelFitTestBase
):
def testModelFitWithRunEagerly(self, strategy, use_dataset_creator):
with self.assertRaisesRegex(
ValueError,
"When using `Model` with `ParameterServerStrategy`, "
"`run_eagerly` is not supported.",
):
self._model_fit(
strategy,
run_eagerly=True,
use_dataset_creator=use_dataset_creator,
)
def testModelPredict(self, strategy, use_dataset_creator):
if use_dataset_creator:
self.skipTest("Unused option.")
model, _ = self._model_compile(strategy)
test_data = (
tf.data.Dataset.from_tensor_slices(
[[1.0], [2.0], [3.0], [1.0], [5.0], [1.0]]
)
.repeat()
.batch(2)
)
model.predict(x=test_data, steps=3)
def testClusterCoordinatorSingleInstance(
self, strategy, use_dataset_creator
):
model = self._model_fit(
strategy, use_dataset_creator=use_dataset_creator
)
strategy = model.distribute_strategy
self.assertIs(
strategy._cluster_coordinator,
tf.distribute.experimental.coordinator.ClusterCoordinator(strategy),
)
def testModelFitErrorOnBatchLevelCallbacks(
self, strategy, use_dataset_creator
):
class BatchLevelCallback(callbacks_lib.Callback):
def on_train_batch_end(self, batch, logs=None):
pass
with self.assertRaisesRegex(
ValueError, "Batch-level `Callback`s are not supported"
):
callbacks = [BatchLevelCallback()]
self._model_fit(
strategy,
callbacks=callbacks,
use_dataset_creator=use_dataset_creator,
)
def testModelFitCallbackSupportsTFLogs(self, strategy, use_dataset_creator):
class MyCallback(callbacks_lib.Callback):
def __init__(self):
super().__init__()
# Fetches the RemoteValues if necessary.
self._supports_tf_logs = True
def on_train_batch_end(self, batch, logs=None):
assert isinstance(
logs, tf.distribute.experimental.coordinator.RemoteValue
)
my_callback = MyCallback()
callbacks = [my_callback]
self._model_fit(
strategy,
callbacks=callbacks,
use_dataset_creator=use_dataset_creator,
)
def testModelFitVerbosity(self, strategy, use_dataset_creator):
class MyCallback(callbacks_lib.Callback):
pass
my_callback = MyCallback()
callbacks = [my_callback]
self._model_fit(
strategy,
callbacks=callbacks,
use_dataset_creator=use_dataset_creator,
)
# PSStrategy should default to epoch-level logging.
self.assertEqual(my_callback.params["verbose"], 2)
def METHOD_NAME(self, strategy, use_dataset_creator):
log_dir = self.get_temp_dir()
callbacks = [callbacks_lib.TensorBoard(log_dir)]
self._model_fit(
strategy,
callbacks=callbacks,
use_dataset_creator=use_dataset_creator,
)
self.assertTrue(tf.compat.v1.gfile.Exists(log_dir))
files = tf.compat.v1.gfile.ListDirectory(log_dir)
self.assertGreaterEqual(len(files), 1)
def testModelFitVerbose1(self, strategy, use_dataset_creator):
with self.assertRaisesRegex(
ValueError,
"`verbose=1` is not allowed with "
"`ParameterServerStrategy` for performance "
"reasons. Received: verbose=1",
):
self._model_fit(
strategy, use_dataset_creator=use_dataset_creator, verbose=1
)
def testModelEvaluateErrorOnBatchLevelCallbacks(
self, strategy, use_dataset_creator
):
class BatchLevelCallback(callbacks_lib.Callback):
def on_train_batch_end(self, batch, logs=None):
pass
with self.assertRaisesRegex(
ValueError, "Batch-level `Callback`s are not supported"
):
callbacks = [BatchLevelCallback()]
self._model_evaluate(
strategy,
callbacks=callbacks,
use_dataset_creator=use_dataset_creator,
)
def testClusterCoordinatorSingleInstanceWithJitCompileTrue(
self, strategy, use_dataset_creator
):
model = self._model_fit(
strategy, use_dataset_creator=use_dataset_creator, jit_compile=True
)
strategy = model.distribute_strategy
self.assertIs(
strategy._cluster_coordinator,
tf.distribute.experimental.coordinator.ClusterCoordinator(strategy),
)
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main() |
4,585 | add slave | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2015 (ita)
"""
Instead of compiling object files one by one, c/c++ compilers are often able to compile at once:
cc -c ../file1.c ../file2.c ../file3.c
Files are output on the directory where the compiler is called, and dependencies are more difficult
to track (do not run the command on all source files if only one file changes)
As such, we do as if the files were compiled one by one, but no command is actually run:
replace each cc/cpp Task by a TaskSlave. A new task called TaskMaster collects the
signatures from each slave and finds out the command-line to run.
Just import this module to start using it:
def build(bld):
bld.load('batched_cc')
Note that this is provided as an example, unity builds are recommended
for best performance results (fewer tasks and fewer jobs to execute).
See waflib/extras/unity.py.
"""
from waflib import Task, Utils
from waflib.TaskGen import extension, feature, after_method
from waflib.Tools import c, cxx
MAX_BATCH = 50
c_str = '${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${tsk.batch_incpaths()} ${DEFINES_ST:DEFINES} -c ${SRCLST} ${CXX_TGT_F_BATCHED} ${CPPFLAGS}'
c_fun, _ = Task.compile_fun_noshell(c_str)
cxx_str = '${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${tsk.batch_incpaths()} ${DEFINES_ST:DEFINES} -c ${SRCLST} ${CXX_TGT_F_BATCHED} ${CPPFLAGS}'
cxx_fun, _ = Task.compile_fun_noshell(cxx_str)
count = 70000
class batch(Task.Task):
color = 'PINK'
after = ['c', 'cxx']
before = ['cprogram', 'cshlib', 'cstlib', 'cxxprogram', 'cxxshlib', 'cxxstlib']
def uid(self):
return Utils.h_list([Task.Task.uid(self), self.generator.idx, self.generator.path.abspath(), self.generator.target])
def __str__(self):
return 'Batch compilation for %d slaves' % len(self.slaves)
def __init__(self, *k, **kw):
Task.Task.__init__(self, *k, **kw)
self.slaves = []
self.inputs = []
self.hasrun = 0
global count
count += 1
self.idx = count
def METHOD_NAME(self, slave):
self.slaves.append(slave)
self.set_run_after(slave)
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
for t in self.slaves:
#if t.executed:
if t.hasrun != Task.SKIPPED:
return Task.RUN_ME
return Task.SKIP_ME
def get_cwd(self):
return self.slaves[0].outputs[0].parent
def batch_incpaths(self):
st = self.env.CPPPATH_ST
return [st % node.abspath() for node in self.generator.includes_nodes]
def run(self):
self.outputs = []
srclst = []
slaves = []
for t in self.slaves:
if t.hasrun != Task.SKIPPED:
slaves.append(t)
srclst.append(t.inputs[0].abspath())
self.env.SRCLST = srclst
if self.slaves[0].__class__.__name__ == 'c':
ret = c_fun(self)
else:
ret = cxx_fun(self)
if ret:
return ret
for t in slaves:
t.old_post_run()
def hook(cls_type):
def n_hook(self, node):
ext = '.obj' if self.env.CC_NAME == 'msvc' else '.o'
name = node.name
k = name.rfind('.')
if k >= 0:
basename = name[:k] + ext
else:
basename = name + ext
outdir = node.parent.get_bld().make_node('%d' % self.idx)
outdir.mkdir()
out = outdir.find_or_declare(basename)
task = self.create_task(cls_type, node, out)
try:
self.compiled_tasks.append(task)
except AttributeError:
self.compiled_tasks = [task]
if not getattr(self, 'masters', None):
self.masters = {}
self.allmasters = []
def fix_path(tsk):
if self.env.CC_NAME == 'msvc':
tsk.env.append_unique('CXX_TGT_F_BATCHED', '/Fo%s\\' % outdir.abspath())
if not node.parent in self.masters:
m = self.masters[node.parent] = self.master = self.create_task('batch')
fix_path(m)
self.allmasters.append(m)
else:
m = self.masters[node.parent]
if len(m.slaves) > MAX_BATCH:
m = self.masters[node.parent] = self.master = self.create_task('batch')
fix_path(m)
self.allmasters.append(m)
m.METHOD_NAME(task)
return task
return n_hook
extension('.c')(hook('c'))
extension('.cpp','.cc','.cxx','.C','.c++')(hook('cxx'))
@feature('cprogram', 'cshlib', 'cstaticlib', 'cxxprogram', 'cxxshlib', 'cxxstlib')
@after_method('apply_link')
def link_after_masters(self):
if getattr(self, 'allmasters', None):
for m in self.allmasters:
self.link_task.set_run_after(m)
# Modify the c and cxx task classes - in theory it would be best to
# create subclasses and to re-map the c/c++ extensions
for x in ('c', 'cxx'):
t = Task.classes[x]
def run(self):
pass
def post_run(self):
pass
setattr(t, 'oldrun', getattr(t, 'run', None))
setattr(t, 'run', run)
setattr(t, 'old_post_run', t.post_run)
setattr(t, 'post_run', post_run)
|
4,586 | savetxt | from collections import defaultdict
import numpy as np
import sys
#from A, except some functions
class DistanceMatrix:
def __init__(self, *args):
self.D = np.array(*args)
return
def __str__(self):
return str([[float(a) for a in x] for x in self.D])
def __repr__(self):
return type(self).__name__ + "(" + str([[float(a) for a in x] for x in self.D]) + ")"
@staticmethod
def loadtxt(file_name, dtype=None, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0):
D = np.loadtxt(file_name, dtype, comments, delimiter, converters, skiprows, usecols, unpack, ndmin)
return DistanceMatrix(D)
def METHOD_NAME(self, output_file, fmt='%.18e', delimiter=' ', newline='\n', header='', footer='', comments='# '):
np.METHOD_NAME(output_file, self.D, fmt, delimiter, newline, header, footer, comments)
return
def nr_leaves(self):
return len(self.D)
def limb_length(self, j):
n = self.nr_leaves()
assert(j < n)
minimum = sys.maxsize
for i in range(n):
if i != j:
for k in range(n):
if k != j:
Dij = self.D[i][j]
Djk = self.D[j][k]
Dik = self.D[i][k]
minimum = min([minimum, (Dij+Djk-Dik)/2])
return minimum
#from B
def additive_phylogeny(self):
self.max_node = len(self.matrix)
return UnrootedTree(*sorted(self.additive_phylogeny_recursive(self.matrix, len(self.matrix))))
#from B
def additive_phylogeny_recursive(self, mat, n):
if n == 2:
return [(0, 1, mat[0][1])]
limb_size = DistanceMatrix.limb_length_from_matrix(mat[:n, :n], n - 1)
for j in range(n - 1):
mat[n - 1][j] = mat[n - 1][j] - limb_size
mat[j][n - 1] = mat[n - 1][j]
for i in range(n - 1):
found = False
for k in range(i, n - 1):
if mat[i][k] == mat[i][n - 1] + mat[k][n - 1]:
found = True
break
if found:
break
x = mat[i][n - 1]
tree_list = self.additive_phylogeny_recursive(mat.copy(), n - 1)
tree = UnrootedTree(*tree_list)
path = tree.path(i, k)
for j in range(1, len(path)):
edge = (path[j - 1], path[j])
edge_sorted = tuple(sorted(edge))
if tree.edges[edge_sorted] > x:
tree_list.remove((edge_sorted[0], edge_sorted[1], tree.edges[edge_sorted]))
tree_list.append((edge[0], self.max_node, x))
tree_list.append((edge[1], self.max_node, tree.edges[edge_sorted] - x))
tree_list.append((n - 1, self.max_node, limb_size))
self.max_node += 1
break
elif tree.edges[edge_sorted] == x:
new_edge = sorted((n - 1, edge[1]))
tree_list.append((new_edge[0], new_edge[1], limb_size))
break
else:
x -= tree.edges[edge_sorted]
return tree_list
def UPGMA(self):
self.nr_count = self.nr_leaves()
clusters = [{i} for i in range(self.nr_leaves())]
trees = [Tree(i) for i in range(self.nr_leaves())]
ages = [0 for _ in range(self.nr_leaves())]
while len(clusters) > 1:
min_d = sys.maxsize
min_C1, min_C2 = None, None
n = len(clusters)
for i in range(n):
for j in range(i+1,n):
C1, C2 = clusters[i], clusters[j]
d = self.pairwise_distance(C1,C2)
if d < min_d:
min_d = d
min_C1, min_C2 = C1, C2
C1_index, C2_index = clusters.index(min_C1), clusters.index(min_C2)
age = min_d/2
clusters[C1_index] = min_C1 | min_C2
clusters.pop(C2_index)
trees[C1_index] = Tree(self.nr_count, (trees[C1_index], age - ages[C1_index]), (trees[C2_index], age - ages[C2_index] ))
trees.pop(C2_index)
ages[C1_index] = age
ages.pop(C2_index)
self.nr_count += 1
return trees[0]
def pairwise_distance(self,C1, C2):
n, m = len(C1), len(C2)
s = sum([self.D[i][j] for i in C1 for j in C2])
return s/(n*m)
#from B completely
class UnrootedTree:
def __init__(self, *args):
self.graph = defaultdict(set)
self.edges = defaultdict(int)
self.nodes = set()
self.edges_list = list()
self.leaves = set()
for tup in args:
self.graph[tup[0]].add((tup[1]))
self.graph[tup[1]].add((tup[0]))
self.edges[tuple(sorted((tup[0], tup[1])))] = tup[2]
self.edges_list.append((tup[0], tup[1], float(tup[2])))
self.nodes.add(tup[0])
self.nodes.add(tup[1])
for key, val in self.graph.items():
if len(val) == 1:
self.leaves.add(key)
def __repr__(self):
repr_str = "UnrootedTree("
for edge in self.edges_list:
repr_str += str(edge) + ", "
return repr_str[:-2] + ")"
@staticmethod
def loadtxt(f):
with open(f, "r") as graph_file:
tuple_list = []
for line in graph_file:
line_arr = line.strip().split("<->")
rhs = line_arr[1].split(":")
tuple_list.append((int(line_arr[0]), int(rhs[0]), float(rhs[1])))
return UnrootedTree(*tuple_list)
def path(self, first_node, second_node):
stack = [(first_node, [first_node])]
while stack:
(vertex, path) = stack.pop()
for next_vertex in self.graph[vertex] - set(path):
if next_vertex == second_node:
return path + [next_vertex]
else:
stack.append((next_vertex, path + [next_vertex]))
def distance_matrix(self):
mat = [[0 for _ in range(len(self.leaves))] for _ in range(len(self.leaves))]
for n1 in self.leaves:
for n2 in self.leaves:
if n1 < n2:
path = self.path(n1, n2)
length = 0
for i in range(1, len(path)):
length += self.edges[tuple(sorted((path[i - 1], path[i])))]
mat[n1][n2] = length
mat[n2][n1] = length
return DistanceMatrix(mat)
#from A
class Tree:
def __init__(self, root, *subtrees):
self.root = root
self.subtrees = subtrees
def __str__(self):
subtrees_str = ", ".join([str(tree) for tree in self.subtrees])
return type(self).__name__ + "(" + str(self.root) + (", " if len(self.subtrees) > 0 else "") + subtrees_str + ")"
def __repr__(self):
return self.__str__() |
4,587 | list jobs | from django.contrib.auth.models import AnonymousUser
from django.db.models import Q
from django.template import Library, TemplateSyntaxError, Variable
from django.utils.translation import gettext_lazy as _
from django.db.models import Count
from tendenci.apps.jobs.models import Job
from tendenci.apps.base.template_tags import ListNode, parse_tag_kwargs
from tendenci.apps.perms.utils import get_query_filters
register = Library()
@register.inclusion_tag("jobs/options.html", takes_context=True)
def job_options(context, user, job):
context.update({
"opt_object": job,
"user": user
})
return context
@register.inclusion_tag("jobs/nav.html", takes_context=True)
def job_nav(context, user, job=None):
context.update({
'nav_object': job,
"user": user
})
return context
@register.inclusion_tag("jobs/search-form.html", takes_context=True)
def job_search(context):
return context
@register.inclusion_tag("jobs/top_nav_items.html", takes_context=True)
def job_current_app(context, user, job=None):
context.update({
"app_object": job,
"user": user
})
return context
@register.inclusion_tag("jobs/search-form.html", takes_context=True)
def my_job_search(context):
context.update({
'my_job': True
})
return context
@register.inclusion_tag("jobs/pricing-nav.html", takes_context=True)
def job_pricing_nav(context, user, job_pricing=None):
context.update({
'nav_object': job_pricing,
"user": user
})
return context
@register.inclusion_tag("jobs/pricing-options.html", takes_context=True)
def job_pricing_options(context, user, job_pricing):
context.update({
"opt_object": job_pricing,
"user": user
})
return context
@register.inclusion_tag("jobs/pricing-table.html", takes_context=True)
def job_pricing_table(context):
from tendenci.apps.jobs.models import JobPricing
job_pricings = JobPricing.objects.filter(status=True).order_by('duration')
show_premium_price = False
show_member_price = False
premium_jp = JobPricing.objects.filter(status=True).filter(premium_price__gt=0)
if premium_jp:
show_premium_price = True
member_jp = JobPricing.objects.filter(status=True).filter(show_member_pricing=True).filter(Q(premium_price_member__gt=0) | Q(regular_price_member__gt=0))
if member_jp:
show_member_price = True
context.update({
"job_pricings": job_pricings,
'show_premium_price': show_premium_price,
'show_member_price': show_member_price
})
return context
@register.inclusion_tag("jobs/top_nav_items_pricing.html", takes_context=True)
def job_pricing_current_app(context, user, job_pricing=None):
context.update({
'app_object': job_pricing,
"user": user
})
return context
class ListJobCategoriesNode(ListNode):
model = Job
perms = 'jobs.view_job'
def render(self, context):
user = AnonymousUser()
if 'user' in self.kwargs:
try:
user = Variable(self.kwargs['user'])
user = user.resolve(context)
except:
user = self.kwargs['user']
else:
# check the context for an already existing user
if 'user' in context:
user = context['user']
filters = get_query_filters(user, self.perms)
items = self.model.objects.filter(filters).exclude(cat=None
).values('cat__name', 'cat__id'
).annotate(total=Count('cat')).order_by('cat__name')
context[self.context_var] = items
return ""
@register.tag
def list_jobs_categories(parser, token):
"""
Used to pull a list of jobs categories with the number of jobs per category.
Usage::
{% list_jobs_categories as [varname] [options] %}
Options include:
``user``
Specify a user to only show public items to all. **Default: Viewing user**
Example::
{% list_jobs_categories as jobs_cats_list user=request.user %}
{% if jobs_cats_list %}
<ul>
{% for cat in jobs_cats_list %}
<li>
<a href="{% url 'jobs' %}?cat={{ cat.cat__id }}">{{ cat.cat__name }} ({{ cat.total }})</a>
</li>
{% endfor %}
</ul>
{% endif %}
"""
args, kwargs = [], {}
bits = token.split_contents()
context_var = bits[2]
if len(bits) < 3:
message = "'%s' tag requires at least 2 parameters" % bits[0]
raise TemplateSyntaxError(_(message))
if bits[1] != "as":
message = "'%s' second argument must be 'as'" % bits[0]
raise TemplateSyntaxError(_(message))
kwargs = parse_tag_kwargs(bits)
return ListJobCategoriesNode(context_var, *args, **kwargs)
class ListJobNode(ListNode):
model = Job
perms = 'jobs.view_job'
@register.tag
def METHOD_NAME(parser, token):
"""
Used to pull a list of :model:`jobs.Job` items.
Usage::
{% list_jobs as [varname] [options] %}
Be sure the [varname] has a specific name like ``jobs_sidebar`` or
``jobs_list``. Options can be used as [option]=[value]. Wrap text values
in quotes like ``tags="cool"``. Options include:
``limit``
The number of items that are shown. **Default: 3**
``order``
The order of the items. **Default: Newest Approved**
``user``
Specify a user to only show public items to all. **Default: Viewing user**
``query``
The text to search for items. Will not affect order.
``tags``
The tags required on items to be included.
``random``
Use this with a value of true to randomize the items included.
Example::
{% list_jobs as jobs_list limit=5 tags="cool" %}
{% for job in jobs_list %}
{{ job.title }}
{% endfor %}
"""
args, kwargs = [], {}
bits = token.split_contents()
context_var = bits[2]
if len(bits) < 3:
message = "'%s' tag requires at least 2 parameters" % bits[0]
raise TemplateSyntaxError(_(message))
if bits[1] != "as":
message = "'%s' second argument must be 'as'" % bits[0]
raise TemplateSyntaxError(_(message))
kwargs = parse_tag_kwargs(bits)
if 'order' not in kwargs:
kwargs['order'] = '-post_dt'
return ListJobNode(context_var, *args, **kwargs) |
4,588 | hook r | # SPDX-License-Identifier: MIT
from ..hv import TraceMode
from ..utils import *
__all__ = []
class RegCacheAlwaysCached(Reloadable):
def __init__(self, parent):
self.parent = parent
def read(self, addr, width):
return self.parent.read_cached(addr, width)
def write(self, addr, data, width):
raise Exception("Trying to write a register to the cache")
class RegCache(Reloadable):
def __init__(self, hv):
self.hv = hv
self.u = hv.u
self.cache = {}
self.cached = RegCacheAlwaysCached(self)
def update(self, addr, data):
self.cache[addr] = data
def read(self, addr, width):
if self.hv.ctx or not self.hv.started:
data = self.u.read(addr, width)
self.cache[addr] = data
return data
else:
return self.read_cached(addr, width)
def read_cached(self, addr, width):
data = self.cache.get(addr, None)
if data is None:
print(f"RegCache: no cache for {addr:#x}")
return data
def write(self, addr, data, width):
if self.hv.ctx:
self.u.write(addr, data, width)
self.cache[addr] = data
else:
raise Exception("Cannot write register in asynchronous context")
class TracerState:
pass
class Tracer(Reloadable):
DEFAULT_MODE = TraceMode.ASYNC
def __init__(self, hv, verbose=False, ident=None):
self.hv = hv
self.ident = ident or type(self).__name__
self.regmaps = {}
self.verbose = verbose
self.state = TracerState()
self.init_state()
self._cache = RegCache(hv)
cache = hv.tracer_caches.get(self.ident, None)
if cache is not None:
self._cache.cache.update(cache.get("regcache", {}))
self.state.__dict__.update(cache.get("state", {}))
hv.tracer_caches[self.ident] = {
"regcache": self._cache.cache,
"state": self.state.__dict__
}
def init_state(self):
pass
def hook_w(self, addr, val, width, **kwargs):
self.hv.u.write(addr, val, width)
def METHOD_NAME(self, addr, width, **kwargs):
return self.hv.u.read(addr, width)
def evt_rw(self, evt, regmap=None, prefix=None):
self._cache.update(evt.addr, evt.data)
reg = rcls = None
value = evt.data
t = "w" if evt.flags.WRITE else "r"
if regmap is not None:
reg, index, rcls = regmap.lookup_addr(evt.addr)
if rcls is not None:
value = rcls(evt.data)
if self.verbose >= 3 or (reg is None and self.verbose >= 1):
if reg is None:
s = f"{evt.addr:#x} = {value:#x}"
else:
s = f"{regmap.get_name(evt.addr)} = {value!s}"
m = "+" if evt.flags.MULTI else " "
self.log(f"MMIO: {t.upper()}.{1<<evt.flags.WIDTH:<2}{m} " + s)
if reg is not None:
if prefix is not None:
attr = f"{t}_{prefix}_{reg}"
else:
attr = f"{t}_{reg}"
handler = getattr(self, attr, None)
if handler:
if index is not None:
handler(value, index)
else:
handler(value)
elif self.verbose == 2:
s = f"{regmap.get_name(evt.addr)} = {value!s}"
m = "+" if evt.flags.MULTI else " "
self.log(f"MMIO: {t.upper()}.{1<<evt.flags.WIDTH:<2}{m} " + s)
def trace(self, start, size, mode, read=True, write=True, **kwargs):
zone = irange(start, size)
if mode == TraceMode.HOOK:
self.hv.add_tracer(zone, self.ident, mode, self.METHOD_NAME if read else None,
self.hook_w if write else None, **kwargs)
else:
self.hv.add_tracer(zone, self.ident, mode, self.evt_rw if read else None,
self.evt_rw if write else None, **kwargs)
def trace_regmap(self, start, size, cls, mode=None, name=None, prefix=None, regmap_offset=0):
if mode is None:
mode = self.DEFAULT_MODE
if name is None:
name = cls.__name__
regmap = self.regmaps.get(start - regmap_offset, None)
if regmap is None:
regmap = cls(self._cache, start - regmap_offset)
regmap.cached = cls(self._cache.cached, start - regmap_offset)
self.regmaps[start - regmap_offset] = regmap
else:
assert isinstance(regmap, cls)
setattr(self, name, regmap)
self.trace(start, size, mode=mode, regmap=regmap, prefix=prefix)
def start(self):
pass
def stop(self):
self.hv.clear_tracers(self.ident)
def log(self, msg, show_cpu=True):
self.hv.log(f"[{self.ident}] {msg}", show_cpu=show_cpu)
class PrintTracer(Tracer):
def __init__(self, hv, device_addr_tbl):
super().__init__(hv)
self.device_addr_tbl = device_addr_tbl
self.log_file = None
def event_mmio(self, evt, name=None, start=None):
dev, zone2 = self.device_addr_tbl.lookup(evt.addr)
if name is None:
name = dev
start = zone2.start
t = "W" if evt.flags.WRITE else "R"
m = "+" if evt.flags.MULTI else " "
logline = (f"[cpu{evt.flags.CPU}] [0x{evt.pc:016x}] MMIO: {t}.{1<<evt.flags.WIDTH:<2}{m} " +
f"0x{evt.addr:x} ({name}, offset {evt.addr - start:#04x}) = 0x{evt.data:x}")
print(logline)
if self.log_file:
self.log_file.write(f"# {logline}\n")
width = 8 << evt.flags.WIDTH
if evt.flags.WRITE:
stmt = f"p.write{width}({start:#x} + {evt.addr - start:#x}, {evt.data:#x})\n"
else:
stmt = f"p.read{width}({start:#x} + {evt.addr - start:#x})\n"
self.log_file.write(stmt)
class ADTDevTracer(Tracer):
REGMAPS = []
NAMES = []
PREFIXES = []
def __init__(self, hv, devpath, verbose=False):
super().__init__(hv, verbose=verbose, ident=type(self).__name__ + "@" + devpath)
self.dev = hv.adt[devpath]
@classmethod
def _reloadcls(cls, force=False):
regmaps = []
for i in cls.REGMAPS:
if i is None:
reloaded = None
elif isinstance(i, tuple):
reloaded = (i[0]._reloadcls(force), i[1])
else:
reloaded = i._reloadcls(force)
regmaps.append(reloaded)
cls.REGMAPS = regmaps
return super()._reloadcls(force)
def start(self):
for i in range(len(self.dev.reg)):
if i >= len(self.REGMAPS) or (regmap := self.REGMAPS[i]) is None:
continue
if isinstance(regmap, tuple):
regmap, regmap_offset = regmap
else:
regmap_offset = 0
prefix = name = None
if i < len(self.NAMES):
name = self.NAMES[i]
if i < len(self.PREFIXES):
prefix = self.PREFIXES[i]
start, size = self.dev.get_reg(i)
self.trace_regmap(start, size, regmap, name=name, prefix=prefix, regmap_offset=regmap_offset)
__all__.extend(k for k, v in globals().items()
if (callable(v) or isinstance(v, type)) and v.__module__.startswith(__name__)) |
4,589 | frame descriptors | """
ckwg +31
Copyright 2020 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for Python interface to vital::feature_track_set/state
"""
import unittest
import nose.tools as nt
import numpy as np
from kwiver.vital.tests.py_helpers import no_call_pure_virtual_method
from kwiver.vital.types import (
FeatureF,
Descriptor,
FeatureTrackState as ftstate,
FeatureTrackSet as ftset,
TrackSet,
Track,
TrackDescriptor,
DescriptorSet,
new_descriptor,
TrackState,
SimpleFeatureSet as sfs,
)
"""
Test Feature Track State
"""
class TestFeatureTrackState(unittest.TestCase):
@classmethod
def setUp(self):
self.f1 = FeatureF([1, 1], 1, 2, 1)
self.desc = new_descriptor(33, 'd')
self.ts = TrackState(0)
def test_constructors(self):
ftstate(13, self.f1, self.desc)
ftstate(ftstate(13, self.f1, self.desc))
def test_members(self):
test_ft = ftstate(13, self.f1, self.desc)
test_ft.inlier = True
self.assertTrue(test_ft.inlier)
self.assertEqual(test_ft.frame_id, 13)
nt.assert_equal(test_ft.feature, self.f1)
nt.assert_equal(test_ft.descriptor, self.desc)
def test_methods(self):
test_ft = ftstate(13, self.f1, self.desc)
test_ft_clone = test_ft.clone()
nt.ok_(isinstance(test_ft_clone, ftstate))
nt.assert_equal(test_ft_clone.frame_id, test_ft.frame_id)
test_ft_downcast = test_ft.downcast()
nt.ok_(isinstance(test_ft_downcast, ftstate))
nt.assert_equal(test_ft_downcast.frame_id, test_ft.frame_id)
"""
Test Feature Track Set
"""
class TestFeatureTrackSet(unittest.TestCase):
@classmethod
def setUp(self):
self.track_state = TrackState(15)
self.track = Track(23)
self.track.append(TrackState(1))
self.track.append(self.track_state)
self._track_arr = [Track(15), Track(1),
Track(150), Track(9),
self.track]
def test_construct(self):
ftset()
ftset(self._track_arr)
def test_methods(self):
test_feat_set = ftset(self._track_arr)
self.assertEqual(test_feat_set.all_frame_ids(), {1, 15})
nt.assert_equal(test_feat_set.get_track(1), self._track_arr[1] )
self.assertEqual(0, test_feat_set.first_frame())
self.assertEqual(15, test_feat_set.last_frame())
self.assertEqual(5, len(test_feat_set.tracks()))
self.assertEqual(len(test_feat_set), 5)
cloned_test_feat_set = test_feat_set.clone()
self.assertIsInstance(cloned_test_feat_set, ftset)
self.assertEqual(cloned_test_feat_set.all_frame_ids(), test_feat_set.all_frame_ids())
self.assertIsInstance(test_feat_set.last_frame_features(), sfs)
self.assertIsInstance(test_feat_set.last_frame_descriptors(), DescriptorSet)
self.assertIsInstance(test_feat_set.frame_features(), sfs)
self.assertIsInstance(test_feat_set.METHOD_NAME(), DescriptorSet)
track_state_list = test_feat_set.frame_feature_track_states(15)
self.assertListEqual(track_state_list, [])
self.assertEqual(test_feat_set.keyframes(), set())
"""
Implement and Test Class Inheriting from Feature Track Set
"""
class SubFeatureTrackSet(ftset):
def __init__(self):
ftset.__init__(self)
self.tracks = [15, 1, 18, 9]
def last_frame_features(self):
return sfs()
def last_frame_descriptors(self):
return DescriptorSet()
def frame_features(self, offset):
return sfs()
def METHOD_NAME(self, offset):
return DescriptorSet()
def frame_feature_track_states(self, offset):
return []
def keyframes(self):
return set()
def clone(self):
return SubFeatureTrackSet()
def size(self):
return len(self.tracks)
class TestSubFeatureTrackSet(unittest.TestCase):
def test_constructors(self):
SubFeatureTrackSet()
def test_overridden_methods(self):
tst = SubFeatureTrackSet()
self.assertIsInstance(tst.last_frame_features(), sfs)
self.assertIsInstance(tst.last_frame_descriptors(), DescriptorSet)
self.assertListEqual([], tst.frame_feature_track_states(15))
self.assertEqual(set(), tst.keyframes())
self.assertIsInstance(tst.frame_features(0), sfs)
self.assertIsInstance(tst.METHOD_NAME(0), DescriptorSet)
def test_inherited_methods(self):
tst = SubFeatureTrackSet()
cloned = tst.clone()
self.assertIsInstance(cloned, ftset)
self.assertListEqual(cloned.tracks, tst.tracks)
self.assertEqual(0, len(tst))
self.assertEqual(4, tst.size()) |
4,590 | rei url helper | # -*- coding: utf-8 -*-
"""
EPA REI
"""
import re
import pandas as pd
import numpy as np
from flowsa.location import US_FIPS
from flowsa.flowbyfunctions import assign_fips_location_system
from flowsa.fbs_allocation import direct_allocation_method
def METHOD_NAME(*, build_url, config, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
# initiate url list for coa cropland data
urls = []
# replace "__xlsx_name__" in build_url to create three urls
for x in config['files']:
url = build_url
url = url.replace("__filename__", x)
urls.append(url)
return urls
def rei_call(*, url, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df = pd.read_csv(url)
# append filename for use in parsing
fn = re.search('sourcedata/REI_(.*).csv', url)
df['Description'] = fn.group(1)
return df
def primary_factors_parse(*, df_list, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param year: year of FBS
:return: df, parsed and partially formatted to
flowbyactivity specifications
"""
rei_list = []
for df in df_list:
# df for primary factors
if df['Description'][0] == 'primaryfactors':
df.iloc[0, 1] = 'ActivityProducedBy'
df = (df
.drop(df.columns[0], axis=1)
.rename(columns=df.iloc[0])
.rename(columns={'primaryfactors': 'Description'})
.drop(df.index[0])
.reset_index(drop=True)
)
# use "melt" fxn to convert columns into rows
df = df.melt(id_vars=["Description", "ActivityProducedBy"],
var_name="FlowName",
value_name="FlowAmount")
df["Class"] = "Money"
df.loc[df['FlowName'] == 'Employment', 'Class'] = 'Employment'
df["FlowType"] = 'ELEMENTARY_FLOW'
df['Unit'] = 'Thousand USD'
df.loc[df['FlowName'] == 'Employment', 'Unit'] = 'p'
df['FlowAmount'] = df['FlowAmount'].astype(float)
# df for waste - sector consumed by
elif df['Description'][0] == 'useintersection':
df.iloc[0, 1] = 'FlowName'
df = (df
.drop(df.columns[0], axis=1)
.rename(columns=df.iloc[0])
.rename(columns={'useintersection': 'Description'})
.drop(df.index[0])
.reset_index(drop=True)
)
df = (df
.drop(columns=df.columns[[1]])
.drop([0, 1]) # Drop blanks
.melt(id_vars=['Description', 'FlowName'],
var_name='ActivityConsumedBy',
value_name='FlowAmount')
)
df = df[~df['FlowAmount'].astype(str).str.contains(
'- ')].reset_index(drop=True)
df['FlowAmount'] = (df['FlowAmount'].str.replace(
',', '').astype('float'))
df['Unit'] = 'USD'
# Where recyclable code >= 9 (Gleaned product), change units to MT
df.loc[df['FlowName'].str.contains('metric tons'), 'Unit'] = 'MT'
df['FlowName'] = df['FlowName'].apply(
lambda x: x.split('(', 1)[0])
df["Class"] = "Other"
df['FlowType'] = 'WASTE_FLOW'
# df for waste - sector produced by
elif df['Description'][0] == 'makecol':
df = (df
.drop(df.columns[0], axis=1)
.rename(columns={'Unnamed: 1': 'ActivityProducedBy'})
.drop(df.index[0])
.reset_index(drop=True)
)
# Assign final row as Post-consumer
df['ActivityProducedBy'].iloc[-1] = 'Estimate from Post-Consumer Waste'
df = (df
.melt(id_vars=['Description', 'ActivityProducedBy'],
var_name='FlowName',
value_name='FlowAmount')
.dropna()
)
df = df[~df['FlowAmount'].astype(str).str.contains(
'-')].reset_index(drop=True)
df['FlowAmount'] = (df['FlowAmount'].str.replace(
',', '').astype('float'))
df['Unit'] = 'MT'
df['Unit'] = np.where(df['FlowName'].str.contains('\$'), 'USD',
df['Unit'])
df['FlowName'] = df['FlowName'].apply(
lambda x: x.split('(', 1)[0])
df["Class"] = "Other"
df['FlowType'] = 'WASTE_FLOW'
rei_list.append(df)
df2 = pd.concat(rei_list, sort=False)
# update employment to jobs
df2['FlowName'] = np.where(df2['FlowName'] == 'Employment', 'Jobs',
df2['FlowName'])
# address trailing white space
string_cols = list(df2.select_dtypes(include=['object', 'int']).columns)
for s in string_cols:
df2[s] = df2[s].str.strip()
# hardcode info
df2['Location'] = US_FIPS
df2 = assign_fips_location_system(df2, year)
df2['SourceName'] = 'EPA_REI'
df2["Year"] = year
df2['DataReliability'] = 5
df2['DataCollection'] = 5
return df2
def rei_waste_flows_attribution(*, flow_subset_mapped, k, names, method, **_):
"""
Attribute REI waste data to include SPB and SCB.
todo: convert this function into yaml methods once we pull in Matthew's
updates
:param flow_subset_mapped:
:param k:
:param names:
:param attr:
:param fbs_list:
:param method:
:return:
"""
# subset data into activityproducedby and activityconsumedby datafarames
p = flow_subset_mapped[flow_subset_mapped['Description'] == 'makecol']
c = flow_subset_mapped[flow_subset_mapped['Description'] ==
'useintersection']
# first directly attribute/equally attribute APB to sectors and drop ACB
# data
p2 = direct_allocation_method(p, k, names, method)
p2 = p2.drop(columns=['ActivityConsumedBy', 'SectorConsumedBy'])
# then create attribution ratios to activityconsumedby based on flowable
c2 = c[['Flowable', 'FlowAmount', 'Unit', 'ActivityConsumedBy',
'SectorConsumedBy']].assign(Denominator=c.groupby(
['Flowable', 'Unit'])['FlowAmount'].transform('sum'))
c2 = c2.assign(AttributionRatio=c2['FlowAmount']/c2['Denominator'])
c2 = c2.sort_values(['Flowable', 'ActivityConsumedBy'])
# Drop imports and exports so that the quantity is not allocated to SCB
c2 = c2.query('SectorConsumedBy not in ("F04000", "F05000")').reset_index(drop=True)
# merge data and recalculate flow amounts
df = p2.merge(c2[['Flowable', 'Unit', 'ActivityConsumedBy',
'SectorConsumedBy', 'AttributionRatio']],
how='left')
df['FlowAmount'] = df['FlowAmount'] * df['AttributionRatio']
df = df.drop(columns='AttributionRatio')
return df |
4,591 | get mock rendition | from django.db import models
from django.utils.functional import cached_property
from wagtail.images.image_operations import (
DoNothingOperation,
MinMaxOperation,
WidthHeightOperation,
)
from wagtail.images.models import (
AbstractImage,
AbstractRendition,
Filter,
Image,
)
from wagtail_placeholder_images.mixins import PlaceholderRenditionMixin
from v1.atomic_elements.atoms import IMAGE_ALT_TEXT_HELP_TEXT
class CFGOVImage(PlaceholderRenditionMixin, AbstractImage):
alt = models.TextField(blank=True, help_text=IMAGE_ALT_TEXT_HELP_TEXT)
file_hash = models.CharField(max_length=40, blank=True, editable=False)
admin_form_fields = Image.admin_form_fields + ("alt",)
def get_rendition(self, rendition_filter):
"""Always return the source image file for GIF renditions.
CFGOVImage overrides the default Wagtail renditions behavior to
always embed the original uploaded image file for GIFs, instead of
generating new versions on the fly.
"""
if self.file.name.endswith(".gif"):
return self.METHOD_NAME(rendition_filter)
else:
return super().get_rendition(rendition_filter)
def METHOD_NAME(self, rendition_filter):
"""Create a mock rendition object that wraps the original image.
Using the template tag {% image image 'original' %} will return an
<img> tag linking to the original file (instead of a file copy, as
is default Wagtail behavior).
Template tags with Wagtail size-related filters (width, height, max,
and min), e.g. {% image image 'max-165x165' %}, will generate an
<img> tag with appropriate size parameters, following logic from
wagtail.images.image_operations.
"""
if isinstance(rendition_filter, str):
rendition_filter = Filter(spec=rendition_filter)
width = self.width
height = self.height
for operation in rendition_filter.operations:
if isinstance(operation, DoNothingOperation):
continue
if not any(
[
isinstance(operation, WidthHeightOperation),
isinstance(operation, MinMaxOperation),
]
):
raise RuntimeError("non-size operations not supported on GIFs")
width, height = self.apply_size_operation(operation, width, height)
return CFGOVRendition(
image=self, file=self.file, width=width, height=height
)
@staticmethod
def apply_size_operation(operation, width, height):
class MockResizableImage(object):
def __init__(self, width, height):
self.width = width
self.height = height
def get_size(self):
return self.width, self.height
def resize(self, size):
width, height = size
self.width = width
self.height = height
@property
def size(self):
return self.get_size()
mock_image = MockResizableImage(width, height)
operation.run(mock_image, image=None)
return mock_image.width, mock_image.height
@property
def default_alt_text(self):
# Override Wagtail default of setting alt text to the image title.
return self.alt
# If the image is both large and its height-to-width ratio is approximately
# 1/2 we instruct the template to render large Twitter cards
# See https://dev.twitter.com/cards/types/summary-large-image
@property
def should_display_summary_large_image(self):
image_ratio = float(self.height) / self.width
return self.width >= 1000 and 0.4 <= image_ratio <= 0.6
class CFGOVRendition(AbstractRendition):
image = models.ForeignKey(
CFGOVImage, on_delete=models.CASCADE, related_name="renditions"
)
@property
def alt(self):
return self.image.alt
@cached_property
def orientation(self):
orientation = "square"
if self.is_portrait:
orientation = "portrait"
elif self.is_landscape:
orientation = "landscape"
return orientation
@cached_property
def is_square(self):
return self.height == self.width
@cached_property
def is_portrait(self):
return self.height > self.width
@cached_property
def is_landscape(self):
return self.height < self.width
class Meta:
unique_together = (("image", "filter_spec", "focal_point_key"),) |
4,592 | form type | '''
See COPYRIGHT.md for copyright information.
'''
import os
from arelle import XmlUtil
from arelle.ModelObject import ModelObject
newRssWatchOptions = {
"feedSource": "",
"feedSourceUri": None,
"matchTextExpr": "",
"formulaFileUri": "",
"logFileUri": "",
"emailAddress": "",
"validateXbrlRules": False,
"validateDisclosureSystemRules": False,
"validateCalcLinkbase": False,
"validateFormulaAssertions": False,
"alertMatchedFactText": False,
"alertAssertionUnsuccessful": False,
"alertValiditionError": False,
"latestPubDate": None,
}
# Note: if adding to this list keep DialogRssWatch in sync
class ModelRssItem(ModelObject):
def init(self, modelDocument):
super(ModelRssItem, self).init(modelDocument)
try:
if (self.modelXbrl.modelManager.rssWatchOptions.latestPubDate and
self.pubDate <= self.modelXbrl.modelManager.rssWatchOptions.latestPubDate):
self.status = _("tested")
else:
self.status = _("not tested")
except AttributeError:
self.status = _("not tested")
self.results = None
self.assertions = None
# find edgar namespace
self.edgr = None
for elt in self.iterdescendants("{*}xbrlFiling"):
self.edgr = elt.qname.namespaceURI
break
if self.edgr:
edgrPrefix = "{" + self.edgr + "}"
else:
edgrPrefix = ""
self.edgrDescription = edgrPrefix + "description"
self.edgrFile = edgrPrefix + "file"
self.edgrInlineXBRL = edgrPrefix + "inlineXBRL"
self.edgrSequence = edgrPrefix + "sequence"
self.edgrType = edgrPrefix + "type"
self.edgrUrl = edgrPrefix + "url"
@property
def cikNumber(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "cikNumber"))
@property
def accessionNumber(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "accessionNumber"))
@property
def fileNumber(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "fileNumber"))
@property
def companyName(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "companyName"))
@property
def METHOD_NAME(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "formType"))
@property
def pubDate(self):
try:
return self._pubDate
except AttributeError:
from arelle.UrlUtil import parseRfcDatetime
self._pubDate = parseRfcDatetime(XmlUtil.text(XmlUtil.descendant(self, None, "pubDate")))
return self._pubDate
@property
def filingDate(self):
try:
return self._filingDate
except AttributeError:
import datetime
self._filingDate = None
date = XmlUtil.text(XmlUtil.descendant(self, self.edgr, "filingDate"))
d = date.split("/")
if d and len(d) == 3:
self._filingDate = datetime.date(int(d[2]),int(d[0]),int(d[1]))
return self._filingDate
@property
def period(self):
per = XmlUtil.text(XmlUtil.descendant(self, self.edgr, "period"))
if per and len(per) == 8:
return "{0}-{1}-{2}".format(per[0:4],per[4:6],per[6:8])
return None
@property
def assignedSic(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "assignedSic"))
@property
def acceptanceDatetime(self):
try:
return self._acceptanceDatetime
except AttributeError:
import datetime
self._acceptanceDatetime = None
date = XmlUtil.text(XmlUtil.descendant(self, self.edgr, "acceptanceDatetime"))
if date and len(date) == 14:
self._acceptanceDatetime = datetime.datetime(int(date[0:4]),int(date[4:6]),int(date[6:8]),int(date[8:10]),int(date[10:12]),int(date[12:14]))
return self._acceptanceDatetime
@property
def fiscalYearEnd(self):
yrEnd = XmlUtil.text(XmlUtil.descendant(self, self.edgr, "fiscalYearEnd"))
if yrEnd and len(yrEnd) == 4:
return "{0}-{1}".format(yrEnd[0:2],yrEnd[2:4])
return None
@property
def htmlUrl(self): # main filing document
htmlDocElt = XmlUtil.descendant(self, self.edgr, "xbrlFile", attrName=self.edgrSequence, attrValue="1")
if htmlDocElt is not None:
return htmlDocElt.get(self.edgrUrl)
return None
@property
def url(self):
try:
return self._url
except AttributeError:
self._url = None
for instDocElt in XmlUtil.descendants(self, self.edgr, "xbrlFile"):
if instDocElt.get(self.edgrType).endswith(".INS") or instDocElt.get(self.edgrInlineXBRL) == "true":
self._url = instDocElt.get(self.edgrUrl)
break
return self._url
@property
def enclosureUrl(self):
return XmlUtil.childAttr(self, None, "enclosure", "url")
@property
def zippedUrl(self):
enclosure = XmlUtil.childAttr(self, None, "enclosure", "url")
if enclosure:
# modify url to use zip file
_path, sep, file = (self.url or "").rpartition("/")
# return path + sep + self.accessionNumber + "-xbrl.zip" + sep + file
return enclosure + sep + file
else: # no zipped enclosure, just use unzipped file
return self.url
@property
def htmURLs(self):
try:
return self._htmURLs
except AttributeError:
self._htmURLs = [
(instDocElt.get(self.edgrDescription),instDocElt.get(self.edgrUrl))
for instDocElt in XmlUtil.descendants(self, self.edgr, "xbrlFile")
if instDocElt.get(self.edgrFile).endswith(".htm")]
return self._htmURLs
@property
def primaryDocumentURL(self):
try:
return self._primaryDocumentURL
except AttributeError:
METHOD_NAME = self.METHOD_NAME
self._primaryDocumentURL = None
for instDocElt in XmlUtil.descendants(self, self.edgr, "xbrlFile"):
if instDocElt.get(self.edgrType) == METHOD_NAME:
self._primaryDocumentURL = instDocElt.get(self.edgrUrl)
break
return self._primaryDocumentURL
def setResults(self, modelXbrl):
self.results = []
self.assertionUnsuccessful = False
# put error codes first, sorted, then assertion result (dict's)
self.status = "pass"
for error in modelXbrl.errors:
if isinstance(error,dict): # assertion results
self.assertions = error
for countSuccessful, countNotsuccessful in error.items():
if countNotsuccessful > 0:
self.assertionUnsuccessful = True
self.status = "unsuccessful"
else: # error code results
self.results.append(error)
self.status = "fail" # error code
self.results.sort()
@property
def propertyView(self):
return (("CIK", self.cikNumber),
("company", self.companyName),
("published", self.pubDate),
("form type", self.METHOD_NAME),
("filing date", self.filingDate),
("period", self.period),
("year end", self.fiscalYearEnd),
("status", self.status),
("instance", os.path.basename(self.url)),
)
def __repr__(self):
return ("rssItem[{0}]{1})".format(self.objectId(),self.propertyView)) |
4,593 | batch shape | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Geometric distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class Geometric(distribution.Distribution):
"""Geometric distribution.
The Geometric distribution is parameterized by p, the probability of a
positive event. It represents the probability that in k + 1 Bernoulli trials,
the first k trials failed, before seeing a success.
The pmf of this distribution is:
#### Mathematical Details
```none
pmf(k; p) = (1 - p)**k * p
```
where:
* `p` is the success probability, `0 < p <= 1`, and,
* `k` is a non-negative integer.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="Geometric"):
"""Construct Geometric distributions.
Args:
logits: Floating-point `Tensor` with shape `[B1, ..., Bb]` where `b >= 0`
indicates the number of batch dimensions. Each entry represents logits
for the probability of success for independent Geometric distributions
and must be in the range `(-inf, inf]`. Only one of `logits` or `probs`
should be specified.
probs: Positive floating-point `Tensor` with shape `[B1, ..., Bb]`
where `b >= 0` indicates the number of batch dimensions. Each entry
represents the probability of success for independent Geometric
distributions and must be in the range `(0, 1]`. Only one of `logits`
or `probs` should be specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits, probs, validate_args=validate_args, name=name)
with ops.control_dependencies(
[check_ops.assert_positive(self._probs)] if validate_args else []):
self._probs = array_ops.identity(self._probs, name="probs")
super(Geometric, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._probs, self._logits],
name=name)
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._probs)
def METHOD_NAME(self):
return self.probs.get_shape()
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
sampled = random_ops.random_uniform(
array_ops.concat([[n], array_ops.shape(self._probs)], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
seed=seed,
dtype=self.dtype)
return math_ops.floor(
math_ops.log(sampled) / math_ops.log1p(-self.probs))
def _cdf(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# Whether or not x is integer-form, the following is well-defined.
# However, scipy takes the floor, so we do too.
x = math_ops.floor(x)
x *= array_ops.ones_like(self.probs)
return array_ops.where(
x < 0.,
array_ops.zeros_like(x),
-math_ops.expm1((1. + x) * math_ops.log1p(-self.probs)))
def _log_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# For consistency with cdf, we take the floor.
x = math_ops.floor(x)
x *= array_ops.ones_like(self.probs)
probs = self.probs * array_ops.ones_like(x)
safe_domain = array_ops.where(
math_ops.equal(x, 0.),
array_ops.zeros_like(probs),
probs)
return x * math_ops.log1p(-safe_domain) + math_ops.log(probs)
def _entropy(self):
probs = self._probs
if self.validate_args:
probs = control_flow_ops.with_dependencies(
[check_ops.assert_less(
probs,
constant_op.constant(1., probs.dtype),
message="Entropy is undefined when logits = inf or probs = 1.")],
probs)
# Claim: entropy(p) = softplus(s)/p - s
# where s=logits and p=probs.
#
# Proof:
#
# entropy(p)
# := -[(1-p)log(1-p) + plog(p)]/p
# = -[log(1-p) + plog(p/(1-p))]/p
# = -[-softplus(s) + ps]/p
# = softplus(s)/p - s
#
# since,
# log[1-sigmoid(s)]
# = log[1/(1+exp(s)]
# = -log[1+exp(s)]
# = -softplus(s)
#
# using the fact that,
# 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s))
return nn.softplus(self.logits) / probs - self.logits
def _mean(self):
return math_ops.exp(-self.logits)
def _variance(self):
return self._mean() / self.probs
def _mode(self):
return array_ops.zeros(self.batch_shape_tensor(), dtype=self.dtype) |
4,594 | register | import abc
import pathlib
import contextlib
from typing import Union, Tuple, Dict
from ..util.data import merge
from ..util.entrypoint import base_entry_point
from ..configloader.configloader import BaseConfigLoader
from .base import BaseDataFlowObjectContext, BaseDataFlowObject
from .types import DataFlow
# Filetypes to ignore (don't try to load as a config)
IGNORE = ["swp"]
class MultiCommInAtomicMode(Exception):
"""
Raised when registration is locked.
"""
class NoConfigsForMultiComm(Exception):
"""
Raised when no configs are found for the loaded type of multicomm
"""
class NoDataFlows(Exception):
"""
Raised when no dataflows are found
"""
class NoDataFlowsForConfig(Exception):
"""
Raised when no dataflows are found for a channel config
"""
class BaseCommChannelConfig:
"""
Config structure for a communication channel. It MUST include a ``dataflow``
parameter.
"""
class BaseMultiCommContext(BaseDataFlowObjectContext, abc.ABC):
"""
Abstract Base Class for mutlicomm contexts
"""
def __init__(self, parent: "BaseMultiComm") -> None:
self.parent = parent
@abc.abstractmethod
async def METHOD_NAME(self, config: BaseCommChannelConfig) -> None:
"""
Register a communication channel with the multicomm context.
"""
@abc.abstractmethod
def register_config(self) -> BaseCommChannelConfig:
"""
Return the config object to be passed to the resigter method
"""
def _iter_configs(self, directory: pathlib.Path) -> Dict:
"""
Yield pathlib.Path objects for each relevant config file. Ignore some
filetypes.
"""
for path in directory.rglob("*"):
if path.suffix.replace(".", "") in IGNORE:
continue
yield path
async def register_directory(
self, directory: Union[pathlib.Path, str]
) -> None:
"""
Register all configs found in a directory
"""
# Get the config class for this multicomm
config_cls: BaseCommChannelConfig = self.register_config()
# For entering ConfigLoader contexts
async with contextlib.AsyncExitStack() as exit_stack:
# Configs for this multicomm
mc_configs: Dict[Tuple, Union[Dict, BaseCommChannelConfig]] = {}
df_configs: Dict[Tuple, DataFlow] = {}
# Convert to pathlib object if not already
if not isinstance(directory, pathlib.Path):
directory = pathlib.Path(directory)
# Load config loaders we'll need as we see their file types
parsers: Dict[str, BaseConfigLoader] = {}
# Grab all files containing BaseCommChannelConfigs. Each entry is a
# BaseCommChannelConfig. However, we don't have its dataflow
# property. Since that is stored in a separate directory
mc_dir = pathlib.Path(directory, "mc", self.ENTRY_POINT_LABEL)
if not mc_dir.is_dir():
raise NoConfigsForMultiComm(f"In {mc_dir!s}")
for path in self._iter_configs(mc_dir):
config_path, config = await BaseConfigLoader.load_file(
parsers, exit_stack, path, base_dir=mc_dir
)
mc_configs[config_path] = config
# Grab all files containing DataFlows
df_dir = pathlib.Path(directory, "df")
if not df_dir.is_dir():
raise NoDataFlows(f"In {df_dir!s}")
# Load all the DataFlows
for path in self._iter_configs(df_dir):
config_path, config = await BaseConfigLoader.load_file(
parsers, exit_stack, path, base_dir=df_dir
)
df_configs[config_path] = config
# Now that we have all the dataflow, add it to its respective
# multicomm config
mc_configs[config_path]["dataflow"] = config
# Load all overrides
override_dir = pathlib.Path(directory, "override")
for path in self._iter_configs(override_dir):
config_path, config = await BaseConfigLoader.load_file(
parsers, exit_stack, path, base_dir=override_dir
)
if not config_path in df_configs:
self.logger.info(
"Overriding non-existent DataFlow: %s", config_path
)
df_configs[config_path] = config
else:
merge(df_configs[config_path], config)
# Instantiate all configs and register them
for config_path in mc_configs.keys():
# Assign dataflow to its respective channel config
if not config_path in df_configs:
raise NoDataFlowsForConfig(config_path)
mc_configs[config_path]["dataflow"] = df_configs[config_path]
# Finally, turn the dict into an object and register it
mc_configs[config_path] = config_cls._fromdict(
**mc_configs[config_path]
)
await self.METHOD_NAME(mc_configs[config_path])
@base_entry_point("dffml.mutlicomm", "mc")
class BaseMultiComm(BaseDataFlowObject):
"""
Abstract Base Class for mutlicomms
""" |
4,595 | extract data | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.digitaltwins.v2022_05_31.aio.AzureDigitalTwinsManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.Operation"]:
"""Lists all of the available DigitalTwins service REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.digitaltwins.v2022_05_31.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-05-31"] = kwargs.pop("api_version", _params.pop("api-version", "2022-05-31"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, METHOD_NAME)
list.metadata = {"url": "/providers/Microsoft.DigitalTwins/operations"} |
4,596 | add args | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import options
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.fconv import FConvDecoder
@register_model('fconv_lm')
class FConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def METHOD_NAME(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR',
help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR',
help='decoder attention [True, ...]')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if hasattr(args, 'max_target_positions') and not hasattr(args, 'tokens_per_sample'):
args.tokens_per_sample = args.max_target_positions
decoder = FConvDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.tokens_per_sample,
share_embed=False,
positional_embeddings=False,
adaptive_softmax_cutoff=(
options.eval_str_list(args.adaptive_softmax_cutoff, type=int)
if args.criterion == 'adaptive_loss' else None
),
adaptive_softmax_dropout=args.adaptive_softmax_dropout,
)
return FConvLanguageModel(decoder)
@register_model_architecture('fconv_lm', 'fconv_lm')
def base_lm_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128)
args.decoder_layers = getattr(args, 'decoder_layers', '[(1268, 4)] * 13')
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
@register_model_architecture('fconv_lm', 'fconv_lm_dauphin_wikitext103')
def fconv_lm_dauphin_wikitext103(args):
layers = '[(850, 6)] * 3'
layers += ' + [(850, 1)] * 1'
layers += ' + [(850, 5)] * 4'
layers += ' + [(850, 1)] * 1'
layers += ' + [(850, 4)] * 3'
layers += ' + [(1024, 4)] * 1'
layers += ' + [(2048, 4)] * 1'
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 280)
args.decoder_layers = getattr(args, 'decoder_layers', layers)
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,20000,200000')
base_lm_architecture(args)
@register_model_architecture('fconv_lm', 'fconv_lm_dauphin_gbw')
def fconv_lm_dauphin_gbw(args):
layers = '[(512, 5)]'
layers += ' + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3'
layers += ' + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3'
layers += ' + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6'
layers += ' + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]'
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128)
args.decoder_layers = getattr(args, 'decoder_layers', layers)
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,50000,200000')
base_lm_architecture(args) |
4,597 | test dedicated host group | from conftest import roundtrip_check
from resoto_plugin_azure.resource.base import GraphBuilder
from resoto_plugin_azure.resource.compute import *
from resotolib.baseresources import VolumeStatus, InstanceStatus
def test_availability_sets(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureAvailabilitySet, builder)
assert len(collected) == 4
def test_capacity_reservation_group(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureCapacityReservationGroup, builder)
assert len(collected) == 2
def test_cloud_service(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureCloudService, builder)
assert len(collected) == 1
def test_compute_operation_value(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureComputeOperationValue, builder)
assert len(collected) == 1
def METHOD_NAME(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureDedicatedHostGroup, builder)
assert len(collected) == 1
def test_disks(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureDisk, builder, all_props=True)
assert len(collected) == 3
def test_disks_resource(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureDisk, builder, all_props=True)[0]
assert collected.volume_size == 200
assert collected.volume_type == "Premium_LRS"
assert collected.volume_status == VolumeStatus.UNKNOWN
assert collected.volume_iops == 120
assert collected.volume_throughput == 25
assert collected.volume_encrypted is True
def test_disk_access(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureDiskAccess, builder)
assert len(collected) == 2
def test_disk_encryption_set(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureDiskEncryptionSet, builder)
assert len(collected) == 2
def test_gallery(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureGallery, builder)
assert len(collected) == 1
def test_image(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureImage, builder)
assert len(collected) == 1
def test_placement_group(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureProximityPlacementGroup, builder)
assert len(collected) == 1
def test_sku(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureResourceSku, builder)
assert len(collected) == 3
def test_restore_point_collection(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureRestorePointCollection, builder)
assert len(collected) == 2
def test_ssh_key(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureSshPublicKeyResource, builder)
assert len(collected) == 1
def test_virtual_machine(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureVirtualMachine, builder)
assert len(collected) == 2
def test_virtual_machine_resources(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureVirtualMachine, builder)[0]
assert collected.instance_type == "Standard_A0"
assert collected.instance_status == InstanceStatus.RUNNING
def test_scale_set(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureVirtualMachineScaleSet, builder)
assert len(collected) == 1
def test_virtual_machine_size(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureVirtualMachineSize, builder)
assert len(collected) == 2
def test_virtual_machine_size_resources(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureVirtualMachineSize, builder)[0]
assert collected.instance_type == "Standard_A1_V2"
assert collected.instance_cores == 1.0
assert collected.instance_memory == 2.0
def test_snapshot(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureSnapshot, builder)
assert len(collected) == 2
def test_snapshot_resources(builder: GraphBuilder) -> None:
collected = roundtrip_check(AzureSnapshot, builder)[1]
assert collected.snapshot_status == "None"
assert (
collected.volume_id
== "/subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.Compute/snapshots/mySnapshot2"
)
assert collected.volume_size == 200
assert collected.encrypted is True
assert (
collected.owner_id
== "subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myStorageAccount"
) |
4,598 | get primary sp | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
from typing import Any, Dict, Optional
from requests import Request, RequestException, Response, Session, codes
from requests.adapters import HTTPAdapter
from nvflare.apis.overseer_spec import SP, OverseerAgent
from nvflare.security.logging import secure_format_exception
class HttpOverseerAgent(OverseerAgent):
def __init__(
self,
role,
overseer_end_point,
project,
name: str,
fl_port: str = "",
admin_port: str = "",
heartbeat_interval=5,
):
if role not in ["server", "client", "admin"]:
raise ValueError(f'Expect role in ["server", "client", "admin"] but got {role}')
super().__init__()
self._role = role
self._overseer_end_point = overseer_end_point
self._project = project
self._session = None
self._status_lock = threading.Lock()
self._report_and_query = threading.Thread(target=self._rnq_worker, args=())
self._psp = SP()
self._flag = threading.Event()
self._ca_path = None
self._cert_path = None
self._prv_key_path = None
self._last_service_session_id = ""
self._asked_to_exit = False
self._logger = logging.getLogger(self.__class__.__name__)
self._retry_delay = 4
self._asked_to_stop_retrying = False
self._update_callback = None
self._conditional_cb = False
if self._role == "server":
self._sp_end_point = ":".join([name, fl_port, admin_port])
self._heartbeat_interval = heartbeat_interval
def _send(
self, api_point, headers: Optional[Dict[str, Any]] = None, payload: Optional[Dict[str, Any]] = None
) -> Response:
try_count = 0
while not self._asked_to_stop_retrying:
try:
req = Request("POST", api_point, json=payload, headers=headers)
prepared = self._session.prepare_request(req)
resp = self._session.send(prepared)
return resp
except RequestException as e:
self._logger.debug(f"Overseer error: {secure_format_exception(e)}")
try_count += 1
time.sleep(self._retry_delay)
def set_secure_context(self, ca_path: str, cert_path: str = "", prv_key_path: str = ""):
self._ca_path = ca_path
self._cert_path = cert_path
self._prv_key_path = prv_key_path
def start(self, update_callback=None, conditional_cb=False):
self._session = Session()
adapter = HTTPAdapter(max_retries=1)
self._session.mount("http://", adapter)
self._session.mount("https://", adapter)
if self._ca_path:
self._session.verify = self._ca_path
self._session.cert = (self._cert_path, self._prv_key_path)
self._conditional_cb = conditional_cb
if update_callback:
self._update_callback = update_callback
self._report_and_query.start()
self._flag.set()
def pause(self):
self._asked_to_stop_retrying = True
self._flag.clear()
def resume(self):
self._asked_to_stop_retrying = False
self._flag.set()
def end(self):
self._asked_to_stop_retrying = True
self._flag.set()
self._asked_to_exit = True
self._report_and_query.join()
def is_shutdown(self) -> bool:
"""Return whether the agent receives a shutdown request."""
return self.overseer_info.get("system") == "shutdown"
def METHOD_NAME(self) -> SP:
"""Return current primary service provider.
If primary sp not available, such as not reported by SD, connection to SD not established yet
the name and ports will be empty strings.
"""
return self._psp
def promote_sp(self, sp_end_point, headers=None) -> Response:
api_point = self._overseer_end_point + "/promote"
return self._send(api_point, headers=None, payload={"sp_end_point": sp_end_point, "project": self._project})
def set_state(self, state) -> Response:
api_point = self._overseer_end_point + "/state"
return self._send(api_point, payload={"state": state})
def _do_callback(self):
if self._update_callback:
self._update_callback(self)
def _handle_ssid(self, ssid):
if not self._conditional_cb or self._last_service_session_id != ssid:
self._last_service_session_id = ssid
self._do_callback()
def _prepare_data(self):
data = dict(role=self._role, project=self._project)
return data
def _rnq_worker(self):
data = self._prepare_data()
if self._role == "server":
data["sp_end_point"] = self._sp_end_point
api_point = self._overseer_end_point + "/heartbeat"
while not self._asked_to_exit:
self._flag.wait()
self._rnq(api_point, headers=None, data=data)
time.sleep(self._heartbeat_interval)
def _rnq(self, api_point, headers, data):
resp = self._send(api_point, headers=headers, payload=data)
if resp is None:
return
if resp.status_code != codes.ok:
return
self.overseer_info = resp.json()
psp = self.overseer_info.get("primary_sp")
if psp:
name, fl_port, admin_port = psp.get("sp_end_point").split(":")
service_session_id = psp.get("service_session_id", "")
self._psp = SP(name, fl_port, admin_port, service_session_id, True)
# last_heartbeat = psp.get("last_heartbeat", "")
self._handle_ssid(service_session_id)
else:
self._psp = SP()
service_session_id = ""
self._handle_ssid(service_session_id) |
4,599 | on output click | # Generated file. To retain edits, remove this comment.
from dataclasses import dataclass
from typing import Any, Dict
import numpy as np
import js
from pyodide.ffi.wrappers import add_event_listener
import pyodide
from itkwasm_compress_stringify import compress_stringify_async
from compress_stringify_load_sample_inputs import load_sample_inputs
@dataclass
class CompressStringifyModel:
inputs: Dict['str', Any]
options: Dict['str', Any]
outputs: Dict['str', Any]
class CompressStringifyController:
def __init__(self, load_sample_inputs):
self.model = CompressStringifyModel({}, {}, {})
self.load_sample_inputs = load_sample_inputs
if load_sample_inputs is not None:
load_sample_inputs_button = js.document.querySelector("#compress_stringify-inputs [name=load-sample-inputs]")
load_sample_inputs_button.setAttribute('style', 'display: block-inline;')
add_event_listener(load_sample_inputs_button, 'click', self.on_load_sample_inputs_click)
# Inputs
input_element = js.document.querySelector('#compress_stringify-inputs input[name=input-file]')
add_event_listener(input_element, 'change', self.on_input_change)
# Options
stringify_element = js.document.querySelector('#compress_stringify-inputs sl-checkbox[name=stringify]')
self.stringify_element = stringify_element
add_event_listener(stringify_element, 'sl-change', self.on_stringify_change)
compression_level_element = js.document.querySelector('#compress_stringify-inputs sl-input[name=compression-level]')
self.compression_level_element = compression_level_element
add_event_listener(compression_level_element, 'sl-change', self.on_compression_level_change)
data_url_prefix_element = js.document.querySelector('#compress_stringify-inputs sl-input[name=data-url-prefix]')
self.data_url_prefix_element = data_url_prefix_element
add_event_listener(data_url_prefix_element, 'sl-change', self.on_data_url_prefix_change)
# Outputs
output_download_element = js.document.querySelector('#compress_stringify-outputs sl-button[name=output-download]')
self.output_download_element = output_download_element
add_event_listener(output_download_element, 'click', self.METHOD_NAME)
# Run
run_button = js.document.querySelector('#compress_stringify-inputs sl-button[name="run"]')
self.run_button = run_button
add_event_listener(run_button, 'click', self.on_run)
async def on_load_sample_inputs_click(self, event):
load_sample_inputs_button = js.document.querySelector("#compress_stringify-inputs [name=load-sample-inputs]")
load_sample_inputs_button.loading = True
self.model = await self.load_sample_inputs(self.model)
load_sample_inputs_button.loading = False
async def on_input_change(self, event):
files = event.target.files
array_buffer = await files.item(0).arrayBuffer()
input_bytes = array_buffer.to_bytes()
self.model.inputs['input'] = input_bytes
input_element = js.document.getElementById("#compress_stringify-input-details")
input_element.innerHTML = f"<pre>{str(np.frombuffer(input_bytes[:50], dtype=np.uint8)) + ' ...'}</pre>"
def on_stringify_change(self, event):
self.model.options['stringify'] = self.stringify_element.checked
def on_compression_level_change(self, event):
self.model.options['compression_level'] = int(self.compression_level_element.value)
def on_data_url_prefix_change(self, event):
self.model.options['data_url_prefix'] = self.data_url_prefix_element.value
def METHOD_NAME(self, event):
if 'output' not in self.model.outputs:
return
output = pyodide.ffi.to_js(self.model.outputs['output'])
js.globalThis.downloadFile(output, 'output.bin')
async def on_run(self, event):
event.preventDefault()
event.stopPropagation()
if 'input' not in self.model.inputs:
js.globalThis.notify("Error while running pipeline", "Missing input 'input'", "danger", "exclamation-octagon")
return
self.run_button.loading = True
try:
t0 = js.performance.now()
output = await compress_stringify_async(self.model.inputs['input'], **self.model.options)
t1 = js.performance.now()
js.globalThis.notify("compress_stringify successfully completed", f"in {t1 - t0} milliseconds.", "success", "rocket-fill")
self.model.outputs["output"] = output
self.output_download_element.variant = "success"
self.output_download_element.disabled = False
output_element = js.document.getElementById('compress_stringify-output-details')
output_element.innerHTML = f"<pre>{str(np.frombuffer(output[:200], dtype=np.uint8)) + ' ...'}</pre>"
output_element.disabled = False
except Exception as error:
js.globalThis.notify("Error while running pipeline", str(error), "danger", "exclamation-octagon")
raise error
finally:
self.run_button.loading = False
compress_stringify_controller = CompressStringifyController(load_sample_inputs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.