id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
5,700 | parse args | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import mmengine
from mmengine.config import Config, DictAction
from mmengine.hooks import Hook
from mmengine.runner import Runner
def METHOD_NAME():
parser = argparse.ArgumentParser(
description='MMPose test (and eval) model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir', help='the directory to save evaluation results')
parser.add_argument('--out', help='the file to save metric results.')
parser.add_argument(
'--dump',
type=str,
help='dump predictions to a pickle file for offline evaluation')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument(
'--show-dir',
help='directory where the visualization images will be saved.')
parser.add_argument(
'--show',
action='store_true',
help='whether to display the prediction results in a window.')
parser.add_argument(
'--interval',
type=int,
default=1,
help='visualize per interval samples.')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='display time of every window. (second)')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.METHOD_NAME()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def merge_args(cfg, args):
"""Merge CLI arguments to config."""
cfg.launcher = args.launcher
cfg.load_from = args.checkpoint
# -------------------- work directory --------------------
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# -------------------- visualization --------------------
if args.show or (args.show_dir is not None):
assert 'visualization' in cfg.default_hooks, \
'PoseVisualizationHook is not set in the ' \
'`default_hooks` field of config. Please set ' \
'`visualization=dict(type="PoseVisualizationHook")`'
cfg.default_hooks.visualization.enable = True
cfg.default_hooks.visualization.show = args.show
if args.show:
cfg.default_hooks.visualization.wait_time = args.wait_time
cfg.default_hooks.visualization.out_dir = args.show_dir
cfg.default_hooks.visualization.interval = args.interval
# -------------------- Dump predictions --------------------
if args.dump is not None:
assert args.dump.endswith(('.pkl', '.pickle')), \
'The dump file must be a pkl file.'
dump_metric = dict(type='DumpResults', out_file_path=args.dump)
if isinstance(cfg.test_evaluator, (list, tuple)):
cfg.test_evaluator = [*cfg.test_evaluator, dump_metric]
else:
cfg.test_evaluator = [cfg.test_evaluator, dump_metric]
# -------------------- Other arguments --------------------
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
return cfg
def main():
args = METHOD_NAME()
# load config
cfg = Config.fromfile(args.config)
cfg = merge_args(cfg, args)
# build the runner from config
runner = Runner.from_cfg(cfg)
if args.out:
class SaveMetricHook(Hook):
def after_test_epoch(self, _, metrics=None):
if metrics is not None:
mmengine.dump(metrics, args.out)
runner.register_hook(SaveMetricHook(), 'LOWEST')
# start testing
runner.test()
if __name__ == '__main__':
main() |
5,701 | test00 | # -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2016 sliptonic <shopinthewoods@gmail.com> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
import Path
from PathTests.PathTestUtils import PathTestBase
class TestPathCore(PathTestBase):
def METHOD_NAME(self):
"""Test Path command core functionality"""
# create empty command
c = Path.Command()
self.assertIsInstance(c, Path.Command)
# change name
c.Name = "G1"
self.assertEqual(c.Name, "G1")
# Assign Parameters
c.Parameters = {"X": 1, "Y": 0}
self.assertEqual(c.Parameters, {"Y": 0.0, "X": 1.0})
# change parameters
c.Parameters = {"X": 1, "Y": 0.5}
self.assertEqual(c.Parameters, {"Y": 0.5, "X": 1})
# output gcode
self.assertEqual(c.toGCode(), "G1 X1.000000 Y0.500000")
# create and assign name in one
c2 = Path.Command("G2")
self.assertEqual(c2.Name, "G2")
# Create Path and parameters in one
c3 = Path.Command("G1", {"X": 34, "Y": 1.2})
self.assertEqual(str(c3), "Command G1 [ X:34 Y:1.2 ]")
c4 = Path.Command("G1X4Y5")
self.assertEqual(str(c4), "Command G1 [ X:4 Y:5 ]")
# use placement
self.assertEqual(
str(c3.Placement), "Placement [Pos=(34,1.2,0), Yaw-Pitch-Roll=(0,0,0)]"
)
self.assertEqual(c3.toGCode(), "G1 X34.000000 Y1.200000")
p1 = FreeCAD.Placement()
p1.Base = FreeCAD.Vector(3, 2, 1)
self.assertEqual(str(p1), "Placement [Pos=(3,2,1), Yaw-Pitch-Roll=(0,0,0)]")
c5 = Path.Command("g1", p1)
self.assertEqual(str(c5), "Command G1 [ X:3 Y:2 Z:1 ]")
p2 = FreeCAD.Placement()
p2.Base = FreeCAD.Vector(5, 0, 0)
# overwrite placement
c5.Placement = p2
self.assertEqual(str(c5), "Command G1 [ X:5 ]")
self.assertEqual(c5.x, 5.0)
# overwrite individual parameters
c5.x = 10
self.assertEqual(c5.x, 10.0)
c5.y = 2
self.assertEqual(str(c5), "Command G1 [ X:10 Y:2 ]")
# set from gcode
c3.setFromGCode("G1X1Y0")
self.assertEqual(str(c3), "Command G1 [ X:1 Y:0 ]")
def test10(self):
"""Test Path Object core functionality"""
c1 = Path.Command("g1", {"x": 1, "y": 0})
c2 = Path.Command("g1", {"x": 0, "y": 2})
p = Path.Path([c1, c2])
self.assertAlmostEqual(str(p), "Path [ size:2 length:3.2361 ]", places=4)
self.assertEqual(
str(p.Commands), "[Command G1 [ X:1 Y:0 ], Command G1 [ X:0 Y:2 ]]"
)
self.assertAlmostEqual(p.Length, 3.2361, places=4)
p.addCommands(c1)
self.assertEqual(
p.toGCode(),
"G1 X1.000000 Y0.000000\nG1 X0.000000 Y2.000000\nG1 X1.000000 Y0.000000\n",
)
lines = """
G0X-0.5905Y-0.3937S3000M03
G0Z0.125
G1Z-0.004F3
G1X0.9842Y-0.3937F14.17
G1X0.9842Y0.433
G1X-0.5905Y0.433
G1X-0.5905Y-0.3937
G0Z0.5
"""
output = """G0 S3000.000000 X-0.590500 Y-0.393700
M03
G0 Z0.125000
G1 F3.000000 Z-0.004000
G1 F14.170000 X0.984200 Y-0.393700
G1 X0.984200 Y0.433000
G1 X-0.590500 Y0.433000
G1 X-0.590500 Y-0.393700
G0 Z0.500000
"""
# create a path directly form a piece of gcode.
p = Path.Path()
p.setFromGCode(lines)
self.assertEqual(p.toGCode(), output)
def test50(self):
"""Test Path.Length calculation"""
commands = []
commands.append(Path.Command("G1", {"X": 1}))
commands.append(Path.Command("G1", {"Y": 1}))
path = Path.Path(commands)
self.assertEqual(path.Length, 2) |
5,702 | assert matches glob | # Owner(s): ["oncall: package/deploy"]
from typing import Iterable
from torch.package import GlobGroup
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
class TestGlobGroup(PackageTestCase):
def METHOD_NAME(self, glob: GlobGroup, candidates: Iterable[str]):
for candidate in candidates:
self.assertTrue(glob.matches(candidate))
def assertNotMatchesGlob(self, glob: GlobGroup, candidates: Iterable[str]):
for candidate in candidates:
self.assertFalse(glob.matches(candidate))
def test_one_star(self):
glob_group = GlobGroup("torch.*")
self.METHOD_NAME(glob_group, ["torch.foo", "torch.bar"])
self.assertNotMatchesGlob(glob_group, ["tor.foo", "torch.foo.bar", "torch"])
def test_one_star_middle(self):
glob_group = GlobGroup("foo.*.bar")
self.METHOD_NAME(glob_group, ["foo.q.bar", "foo.foo.bar"])
self.assertNotMatchesGlob(
glob_group,
[
"foo.bar",
"foo.foo",
"outer.foo.inner.bar",
"foo.q.bar.more",
"foo.one.two.bar",
],
)
def test_one_star_partial(self):
glob_group = GlobGroup("fo*.bar")
self.METHOD_NAME(glob_group, ["fo.bar", "foo.bar", "foobar.bar"])
self.assertNotMatchesGlob(glob_group, ["oij.bar", "f.bar", "foo"])
def test_one_star_multiple_in_component(self):
glob_group = GlobGroup("foo/a*.htm*", separator="/")
self.METHOD_NAME(glob_group, ["foo/a.html", "foo/a.htm", "foo/abc.html"])
def test_one_star_partial_extension(self):
glob_group = GlobGroup("foo/*.txt", separator="/")
self.METHOD_NAME(
glob_group, ["foo/hello.txt", "foo/goodbye.txt", "foo/.txt"]
)
self.assertNotMatchesGlob(
glob_group, ["foo/bar/hello.txt", "bar/foo/hello.txt"]
)
def test_two_star(self):
glob_group = GlobGroup("torch.**")
self.METHOD_NAME(
glob_group, ["torch.foo", "torch.bar", "torch.foo.bar", "torch"]
)
self.assertNotMatchesGlob(glob_group, ["what.torch", "torchvision"])
def test_two_star_end(self):
glob_group = GlobGroup("**.torch")
self.METHOD_NAME(glob_group, ["torch", "bar.torch"])
self.assertNotMatchesGlob(glob_group, ["visiontorch"])
def test_two_star_middle(self):
glob_group = GlobGroup("foo.**.baz")
self.METHOD_NAME(
glob_group, ["foo.baz", "foo.bar.baz", "foo.bar1.bar2.baz"]
)
self.assertNotMatchesGlob(glob_group, ["foobaz", "foo.bar.baz.z"])
def test_two_star_multiple(self):
glob_group = GlobGroup("**/bar/**/*.txt", separator="/")
self.METHOD_NAME(
glob_group, ["bar/baz.txt", "a/bar/b.txt", "bar/foo/c.txt"]
)
self.assertNotMatchesGlob(glob_group, ["baz.txt", "a/b.txt"])
def test_raw_two_star(self):
glob_group = GlobGroup("**")
self.METHOD_NAME(glob_group, ["bar", "foo.bar", "ab.c.d.e"])
self.assertNotMatchesGlob(glob_group, [""])
def test_invalid_raw(self):
with self.assertRaises(ValueError):
GlobGroup("a.**b")
def test_exclude(self):
glob_group = GlobGroup("torch.**", exclude=["torch.**.foo"])
self.METHOD_NAME(
glob_group,
["torch", "torch.bar", "torch.barfoo"],
)
self.assertNotMatchesGlob(
glob_group,
["torch.foo", "torch.some.foo"],
)
def test_exclude_from_all(self):
glob_group = GlobGroup("**", exclude=["foo.**", "bar.**"])
self.METHOD_NAME(glob_group, ["a", "hello", "anything.really"])
self.assertNotMatchesGlob(glob_group, ["foo.bar", "foo.bar.baz"])
def test_list_include_exclude(self):
glob_group = GlobGroup(["foo", "bar.**"], exclude=["bar.baz", "bar.qux"])
self.METHOD_NAME(glob_group, ["foo", "bar.other", "bar.bazother"])
self.assertNotMatchesGlob(glob_group, ["bar.baz", "bar.qux"])
if __name__ == "__main__":
run_tests() |
5,703 | compute objectives | #!/usr/bin/env/python3
"""This minimal example trains an HMM-based aligner with the forward algorithm.
The encoder is based on a combination of convolutional, recurrent, and
feed-forward networks (CRDNN) that predict phoneme states.
Given the tiny dataset, the expected behavior is to overfit the training data
(with a validation performance that stays high).
"""
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class AlignBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the output probabilities."
batch = batch.to(self.device)
wavs, lens = batch.sig
feats = self.hparams.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
x = self.modules.model(feats)
x = self.modules.lin(x)
outputs = self.hparams.softmax(x)
return outputs, lens
def METHOD_NAME(self, predictions, batch, stage):
"Given the network predictions and targets computed the forward loss."
predictions, lens = predictions
phns, phn_lens = batch.phn_encoded
sum_alpha_T = self.hparams.aligner(
predictions, lens, phns, phn_lens, "forward"
)
loss = -sum_alpha_T.sum()
if stage != sb.Stage.TRAIN:
viterbi_scores, alignments = self.hparams.aligner(
predictions, lens, phns, phn_lens, "viterbi"
)
return loss
def on_stage_end(self, stage, stage_loss, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
if stage == sb.Stage.VALID:
print("Epoch %d complete" % epoch)
print("Train loss: %.2f" % self.train_loss)
if stage != sb.Stage.TRAIN:
print(stage, "loss: %.2f" % stage_loss)
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
label_encoder.expect_len(hparams["num_labels"])
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides("phn_list", "phn_encoded")
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
yield phn_encoded
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
label_encoder.update_from_didataset(train_data, output_key="phn_list")
label_encoder.update_from_didataset(valid_data, output_key="phn_list")
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "phn_encoded"])
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams.yaml"
data_folder = "../../samples/ASR/"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
ali_brain = AlignBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
ali_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
ali_brain.evaluate(valid_data)
# Check if model overfits for integration test
assert ali_brain.train_loss < 350
if __name__ == "__main__":
main()
def test_error(device):
main(device) |
5,704 | connect with context | from typing import TypeVar, Callable, overload
from functools import wraps
from AnyQt.QtCore import Qt, QObject, Signal, BoundSignal
from orangecanvas.utils.qobjref import qobjref_weak
class _InvokeEmitter(QObject):
sig = Signal(object, object)
class _InvokeCaller(QObject):
sig = Signal(object, object)
A = TypeVar("A")
T1 = TypeVar("T1")
T2 = TypeVar("T2")
T3 = TypeVar("T3")
T4 = TypeVar("T4")
T5 = TypeVar("T5")
T6 = TypeVar("T6")
@overload
def qinvoke(
func: Callable[[], A], context: QObject,
type: Qt.ConnectionType = Qt.QueuedConnection
) -> Callable[[], None]: ...
@overload
def qinvoke(
func: Callable[[T1], A], context: QObject,
type: Qt.ConnectionType = Qt.QueuedConnection
) -> Callable[[T1], None]: ...
@overload
def qinvoke(
func: Callable[[T1, T2], A], context: QObject,
type: Qt.ConnectionType = Qt.QueuedConnection
) -> Callable[[T1, T2], None]: ...
@overload
def qinvoke(
func: Callable[[T1, T2, T3], A], context: QObject,
type: Qt.ConnectionType = Qt.QueuedConnection
) -> Callable[[T1, T2, T3], None]: ...
@overload
def qinvoke(
func: Callable[[T1, T2, T3, T4], A], context: QObject,
type: Qt.ConnectionType = Qt.QueuedConnection
) -> Callable[[T1, T2, T3, T4], None]: ...
@overload
def qinvoke(
func: Callable[[T1, T2, T3, T4, T5], A], context: QObject,
type: Qt.ConnectionType = Qt.QueuedConnection
) -> Callable[[T1, T2, T3, T4, T5], None]: ...
@overload
def qinvoke(
func: Callable[[T1, T2, T3, T4, T5, T6], A], context: QObject,
type: Qt.ConnectionType = Qt.QueuedConnection
) -> Callable[[T1, T2, T3, T4, T5, T6], None]: ...
@overload
def qinvoke(
*, context: QObject, type: Qt.ConnectionType = Qt.QueuedConnection
) -> Callable[[Callable[..., A]], Callable[..., None]]: ...
def qinvoke(func: Callable = None, context: QObject = None, type=Qt.QueuedConnection):
"""
Wrap and return a callable, such that it will be executed in the
`context`'s thread/event loop.
Parameters
----------
func: Callable[..., Any]
The function to be executed.
context: QObject
The invoking context. The `func` will be called in the specific event
loop of `context`. If `context` is deleted then the call will be a
noop.
type: Qt.ConnectionType
The connection type.
Returns
-------
wrapped: Callable[..., None]
A wrapped function taking the same arguments as `func`, but retuning
no value. Calling this function will schedule `func` to be called from
`context`'s event loop.
"""
def decorator(func: Callable[..., A]) -> Callable[..., None]:
emitter = _InvokeEmitter()
# caller 'lives' in context's thread. If context is deleted so is the
# caller (child objects are deleted before parents). This is used to
# achieve (of fake) connection auto-disconnect.
caller = _InvokeCaller(context)
caller_ref = qobjref_weak(caller)
context_ref = qobjref_weak(context)
def call_in_context(args, kwargs):
context = context_ref()
if context is not None:
func(*args, *kwargs)
# connection from emitter -(queued)-> caller -(direct)-> func
emitter.sig.connect(caller.sig, type)
caller.sig.connect(call_in_context, Qt.DirectConnection)
def disconnect():
caller = caller_ref()
if caller is not None:
caller.sig.disconnect(call_in_context)
caller.setParent(None) # this should delete the caller
@wraps(func)
def wrapped(*args, **kwargs):
# emitter is captured in this closure. This should be the only
# reference to it. It should ne deleted along with `wrapped`.
emitter.sig.emit(args, kwargs)
wrapped.disconnect = disconnect # type: ignore
return wrapped
if func is not None:
if context is not None:
return decorator(func)
else:
raise TypeError
elif context is None:
raise TypeError
return decorator
def METHOD_NAME(
signal: BoundSignal,
context: QObject,
functor: Callable,
type=Qt.AutoConnection
):
"""
Connect a signal to a callable functor to be placed in a specific event
loop of context.
The connection will automatically disconnect if the sender or the
context is destroyed. However, you should take care that any objects
used within the functor are still alive when the signal is emitted.
Note
----
Like the QObject.connect overload that takes a explicit context QObject,
which is not exposed by PyQt
"""
f = qinvoke(functor, context=context, type=type)
return signal.connect(f) |
5,705 | test show backends | """
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
Test cases for salt.modules.haproxyconn
"""
import pytest
import salt.modules.haproxyconn as haproxyconn
class Mockcmds:
"""
Mock of cmds
"""
def __init__(self):
self.backend = None
self.server = None
self.weight = None
def listServers(self, backend):
"""
Mock of listServers method
"""
self.backend = backend
return (
"Name: server01 Status: UP Weight: 1 bIn: 22 bOut: 12\n"
"Name: server02 Status: MAINT Weight: 2 bIn: 0 bOut: 0"
)
def enableServer(self, server, backend):
"""
Mock of enableServer method
"""
self.backend = backend
self.server = server
return "server enabled"
def disableServer(self, server, backend):
"""
Mock of disableServer method
"""
self.backend = backend
self.server = server
return "server disabled"
def getWeight(self, server, backend, weight=0):
"""
Mock of getWeight method
"""
self.backend = backend
self.server = server
self.weight = weight
return "server weight"
@staticmethod
def showFrontends():
"""
Mock of showFrontends method
"""
return "frontend-alpha\nfrontend-beta\nfrontend-gamma"
@staticmethod
def showBackends():
"""
Mock of showBackends method
"""
return "backend-alpha\nbackend-beta\nbackend-gamma"
class Mockhaproxy:
"""
Mock of haproxy
"""
def __init__(self):
self.cmds = Mockcmds()
class MockHaConn:
"""
Mock of HaConn
"""
def __init__(self, socket=None):
self.ha_cmd = None
def sendCmd(self, ha_cmd, objectify=False):
"""
Mock of sendCmd method
"""
self.ha_cmd = ha_cmd
self.objectify = objectify
return ha_cmd
@pytest.fixture
def configure_loader_modules():
return {haproxyconn: {"haproxy": Mockhaproxy(), "_get_conn": MockHaConn}}
# 'list_servers' function tests: 1
def test_list_servers():
"""
Test list_servers
"""
assert haproxyconn.list_servers("mysql")
# 'enable_server' function tests: 1
def test_enable_server():
"""
Test enable_server
"""
assert haproxyconn.enable_server("web1.salt.com", "www")
# 'disable_server' function tests: 1
def test_disable_server():
"""
Test disable_server
"""
assert haproxyconn.disable_server("db1.salt.com", "mysql")
# 'get_weight' function tests: 1
def test_get_weight():
"""
Test get the weight of a server
"""
assert haproxyconn.get_weight("db1.salt.com", "mysql")
# 'set_weight' function tests: 1
def test_set_weight():
"""
Test setting the weight of a given server
"""
assert haproxyconn.set_weight("db1.salt.com", "mysql", weight=11)
# 'show_frontends' function tests: 1
def test_show_frontends():
"""
Test print all frontends received from the HAProxy socket
"""
assert haproxyconn.show_frontends()
def test_list_frontends():
"""
Test listing all frontends
"""
assert sorted(haproxyconn.list_frontends()) == sorted(
["frontend-alpha", "frontend-beta", "frontend-gamma"]
)
# 'show_backends' function tests: 1
def METHOD_NAME():
"""
Test print all backends received from the HAProxy socket
"""
assert haproxyconn.show_backends()
def test_list_backends():
"""
Test listing of all backends
"""
assert sorted(haproxyconn.list_backends()) == sorted(
["backend-alpha", "backend-beta", "backend-gamma"]
)
def test_get_backend():
"""
Test get_backend and compare returned value
"""
expected_data = {
"server01": {"status": "UP", "weight": 1, "bin": 22, "bout": 12},
"server02": {"status": "MAINT", "weight": 2, "bin": 0, "bout": 0},
}
assert haproxyconn.get_backend("test") == expected_data
def test_wait_state_true():
"""
Test a successful wait for state
"""
assert haproxyconn.wait_state("test", "server01")
def test_wait_state_false():
"""
Test a failed wait for state, with a timeout of 0
"""
assert not haproxyconn.wait_state("test", "server02", "up", 0) |
5,706 | test actionname | import datetime
from unittest import mock
from testifycompat import assert_equal
from testifycompat import assert_raises
from testifycompat import run
from testifycompat import setup
from testifycompat import TestCase
from tron import command_context
from tron import node
from tron import scheduler
from tron.core import actionrun
from tron.core import job
from tron.core import jobrun
from tron.core.jobrun import JobRunCollection
class TestEmptyContext(TestCase):
@setup
def build_context(self):
self.context = command_context.CommandContext(None)
def test__getitem__(self):
assert_raises(KeyError, self.context.__getitem__, "foo")
def test_get(self):
assert not self.context.get("foo")
class TestBuildFilledContext(TestCase):
def test_build_filled_context_no_objects(self):
output = command_context.build_filled_context()
assert not output.base
assert not output.next
def test_build_filled_context_single(self):
output = command_context.build_filled_context(command_context.JobContext,)
assert isinstance(output.base, command_context.JobContext)
assert not output.next
def test_build_filled_context_chain(self):
objs = [command_context.JobContext, command_context.JobRunContext]
output = command_context.build_filled_context(*objs)
assert isinstance(output.base, objs[1])
assert isinstance(output.next.base, objs[0])
assert not output.next.next
class SimpleContextTestCaseBase(TestCase):
__test__ = False
def test_hit(self):
assert_equal(self.context["foo"], "bar")
def test_miss(self):
assert_raises(KeyError, self.context.__getitem__, "your_mom")
def test_get_hit(self):
assert_equal(self.context.get("foo"), "bar")
def test_get_miss(self):
assert not self.context.get("unknown")
class SimpleDictContextTestCase(SimpleContextTestCaseBase):
@setup
def build_context(self):
self.context = command_context.CommandContext(dict(foo="bar"))
class SimpleObjectContextTestCase(SimpleContextTestCaseBase):
@setup
def build_context(self):
class Obj:
foo = "bar"
self.context = command_context.CommandContext(Obj)
class ChainedDictContextTestCase(SimpleContextTestCaseBase):
@setup
def build_context(self):
self.next_context = command_context.CommandContext(dict(foo="bar", next_foo="next_bar"),)
self.context = command_context.CommandContext(dict(), self.next_context,)
def test_chain_get(self):
assert_equal(self.context["next_foo"], "next_bar")
class ChainedDictOverrideContextTestCase(SimpleContextTestCaseBase):
@setup
def build_context(self):
self.next_context = command_context.CommandContext(dict(foo="your mom", next_foo="next_bar"),)
self.context = command_context.CommandContext(dict(foo="bar"), self.next_context,)
def test_chain_get(self):
assert_equal(self.context["next_foo"], "next_bar")
class ChainedObjectOverrideContextTestCase(SimpleContextTestCaseBase):
@setup
def build_context(self):
class MyObject(TestCase):
pass
obj = MyObject()
obj.foo = "bar"
self.next_context = command_context.CommandContext(dict(foo="your mom", next_foo="next_bar"),)
self.context = command_context.CommandContext(obj, self.next_context)
def test_chain_get(self):
assert_equal(self.context["next_foo"], "next_bar")
class TestJobContext(TestCase):
@setup
def setup_job(self):
self.last_success = mock.Mock(run_time=datetime.datetime(2012, 3, 14))
mock_scheduler = mock.create_autospec(scheduler.GeneralScheduler)
run_collection = mock.create_autospec(JobRunCollection, last_success=self.last_success,)
self.job = job.Job("MASTER.jobname", mock_scheduler, run_collection=run_collection,)
self.context = command_context.JobContext(self.job)
def test_name(self):
assert_equal(self.context.name, self.job.name)
def test__getitem__last_success(self):
item = self.context["last_success#day-1"]
expected_date = self.last_success.run_time - datetime.timedelta(days=1)
assert_equal(item, str(expected_date.day))
item = self.context["last_success#shortdate"]
assert_equal(item, "2012-03-14")
def test__getitem__last_success_bad_date_spec(self):
name = "last_success#beers-3"
assert_raises(KeyError, lambda: self.context[name])
def test__getitem__last_success_bad_date_name(self):
name = "first_success#shortdate-1"
assert_raises(KeyError, lambda: self.context[name])
def test__getitem__last_success_no_date_spec(self):
name = "last_success"
assert_raises(KeyError, lambda: self.context[name])
def test__getitem__missing(self):
assert_raises(KeyError, lambda: self.context["bogus"])
def test_namespace(self):
assert self.context.namespace == "MASTER"
class TestJobRunContext(TestCase):
@setup
def setup_context(self):
self.jobrun = mock.create_autospec(jobrun.JobRun, run_time="sometime", manual=True)
self.context = command_context.JobRunContext(self.jobrun)
def test_cleanup_job_status(self):
self.jobrun.action_runs.is_failed = False
self.jobrun.action_runs.is_complete_without_cleanup = True
assert_equal(self.context.cleanup_job_status, "SUCCESS")
def test_cleanup_job_status_failure(self):
self.jobrun.action_runs.is_failed = True
assert_equal(self.context.cleanup_job_status, "FAILURE")
def test_runid(self):
assert_equal(self.context.runid, self.jobrun.id)
def test_manual_run(self):
assert self.context.manual == "true"
@mock.patch("tron.command_context.timeutils.DateArithmetic", autospec=True)
def test__getitem__(self, mock_date_math):
name = "date_name"
time_value = self.context[name]
mock_date_math.parse.assert_called_with(name, self.jobrun.run_time)
assert_equal(time_value, mock_date_math.parse.return_value)
class TestActionRunContext(TestCase):
@setup
def build_context(self):
mock_node = mock.create_autospec(node.Node, hostname="something")
self.action_run = mock.create_autospec(actionrun.ActionRun, action_name="something", node=mock_node,)
self.context = command_context.ActionRunContext(self.action_run)
def METHOD_NAME(self):
assert_equal(self.context.actionname, self.action_run.action_name)
def test_node_hostname(self):
assert_equal(self.context.node, self.action_run.node.hostname)
class TestFiller(TestCase):
@setup
def setup_filler(self):
self.filler = command_context.Filler()
def test_filler_with_job__getitem__(self):
context = command_context.JobContext(self.filler)
todays_date = datetime.date.today().strftime("%Y-%m-%d")
assert_equal(context["last_success#shortdate"], todays_date)
def test_filler_with_job_run__getitem__(self):
context = command_context.JobRunContext(self.filler)
todays_date = datetime.date.today().strftime("%Y-%m-%d")
assert_equal(context["shortdate"], todays_date)
if __name__ == "__main__":
run() |
5,707 | pprint | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method which takes a tree as sole argument and
returns an iterator which generates tokens.
"""
from __future__ import absolute_import, division, unicode_literals
from .. import constants
from .._utils import default_etree
__all__ = ["getTreeWalker", "pprint"]
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
:arg str treeType: the name of the tree type required (case-insensitive).
Supported values are:
* "dom": The xml.dom.minidom DOM implementation
* "etree": A generic walker for tree implementations exposing an
elementtree-like interface (known to work with ElementTree,
cElementTree and lxml.etree).
* "lxml": Optimized walker for lxml.etree
* "genshi": a Genshi stream
:arg implementation: A module implementing the tree type e.g.
xml.etree.ElementTree or cElementTree (Currently applies to the "etree"
tree type only).
:arg kwargs: keyword arguments passed to the etree walker--for other
walkers, this has no effect
:returns: a TreeWalker class
"""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType == "dom":
from . import dom
treeWalkerCache[treeType] = dom.TreeWalker
elif treeType == "genshi":
from . import genshi
treeWalkerCache[treeType] = genshi.TreeWalker
elif treeType == "lxml":
from . import etree_lxml
treeWalkerCache[treeType] = etree_lxml.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
def concatenateCharacterTokens(tokens):
pendingCharacters = []
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
pendingCharacters.append(token["data"])
else:
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
pendingCharacters = []
yield token
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
def METHOD_NAME(walker):
"""Pretty printer for tree walkers
Takes a TreeWalker instance and pretty prints the output of walking the tree.
:arg walker: a TreeWalker instance
"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output) |
5,708 | available data cols | import pandas as pd
import traitlets as tl
from podpac.core.utils import common_doc, cached_property
from podpac.core.coordinates import Coordinates, StackedCoordinates
from podpac.core.data.datasource import COMMON_DATA_DOC, DATA_DOC
from podpac.core.data.file_source import BaseFileSource, FileKeysMixin, LoadFileMixin
@common_doc(COMMON_DATA_DOC)
class CSV(FileKeysMixin, LoadFileMixin, BaseFileSource):
"""Create a DataSource from a .csv file.
This class assumes that the data has a storage format such as:
header 1, header 2, header 3, ...
row1_data1, row1_data2, row1_data3, ...
row2_data1, row2_data2, row2_data3, ...
Attributes
----------
source : str
Path to the csv file
header : int, None
Row number containing the column names, default 0. Use None for no header.
dataset : pd.DataFrame
Raw Pandas DataFrame used to read the data
coordinates : :class:`podpac.Coordinates`
{coordinates}
data_key : str, int
data column number or column title, default 'data'
lat_key : str, int
latitude column number or column title, default 'lat'
lon_key : str, int
longitude column number or column title, default 'lon'
time_key : str, int
time column number or column title, default 'time'
alt_key : str, int
altitude column number or column title, default 'alt'
crs : str
Coordinate reference system of the coordinates
See Also
--------
CSV : Interpolated CSV file datasource for general use.
"""
header = tl.Any(default_value=0).tag(attr=True)
lat_key = tl.Union([tl.Unicode(), tl.Int()], default_value="lat").tag(attr=True)
lon_key = tl.Union([tl.Unicode(), tl.Int()], default_value="lon").tag(attr=True)
time_key = tl.Union([tl.Unicode(), tl.Int()], default_value="time").tag(attr=True)
alt_key = tl.Union([tl.Unicode(), tl.Int()], default_value="alt").tag(attr=True)
data_key = tl.Union([tl.Unicode(), tl.Int(), tl.List(trait=tl.Unicode()), tl.List(trait=tl.Int())]).tag(attr=True)
@tl.default("data_key")
def _default_data_key(self):
return super(CSV, self)._default_data_key()
@tl.validate("data_key")
def _validate_data_key(self, d):
keys = d["value"]
if not isinstance(keys, list):
keys = [d["value"]]
if isinstance(keys[0], int):
for col in keys:
if col not in self.METHOD_NAME:
raise ValueError("Invalid data_key %d, available columns are %s" % (col, self.METHOD_NAME))
else:
for key in keys:
if key not in self.available_data_keys:
raise ValueError("Invalid data_key '%s', available keys are %s" % (key, self.available_data_keys))
return d["value"]
@tl.default("outputs")
def _default_outputs(self):
if not isinstance(self.data_key, list):
return None
else:
return [self._get_key(elem) for elem in self.data_key]
# -------------------------------------------------------------------------
# public api methods
# -------------------------------------------------------------------------
def open_dataset(self, f):
return pd.read_csv(f, parse_dates=True, infer_datetime_format=True, header=self.header)
@cached_property
def dims(self):
"""list of dataset coordinate dimensions"""
lookup = {
self._get_key(self.lat_key): "lat",
self._get_key(self.lon_key): "lon",
self._get_key(self.alt_key): "alt",
self._get_key(self.time_key): "time",
}
return [lookup[key] for key in self.dataset.columns if key in lookup]
@cached_property
def keys(self):
"""available data keys"""
return self.dataset.columns.tolist()
@cached_property
def available_data_keys(self):
"""available data keys"""
dim_keys = [self._get_key(key) for key in [self.lat_key, self.lon_key, self.alt_key, self.time_key]]
keys = [key for key in self.keys if key not in dim_keys]
if len(keys) == 0:
raise ValueError("No data keys found in '%s'" % self.source)
return keys
@cached_property
def METHOD_NAME(self):
return [self._get_col(key) for key in self.available_data_keys]
@common_doc(COMMON_DATA_DOC)
def get_coordinates(self):
"""{get_coordinates}
Note: CSV files have StackedCoordinates.
"""
coords = super(CSV, self).get_coordinates()
if len(coords) == 1:
return coords
stacked = StackedCoordinates(list(coords.values()))
return Coordinates([stacked], validate_crs=False, **coords.properties)
@common_doc(COMMON_DATA_DOC)
def get_data(self, coordinates, coordinates_index):
"""{get_data}"""
if not isinstance(self.data_key, list):
I = self._get_col(self.data_key)
else:
I = [self._get_col(key) for key in self.data_key]
data = self.dataset.iloc[coordinates_index[0], I]
return self.create_output_array(coordinates, data=data)
# -------------------------------------------------------------------------
# helper methods
# -------------------------------------------------------------------------
def _lookup_key(self, dim):
lookup = {"lat": self.lat_key, "lon": self.lon_key, "alt": self.alt_key, "time": self.time_key}
return self._get_key(lookup[dim])
def _get_key(self, key):
return self.dataset.columns[key] if isinstance(key, int) else key
def _get_col(self, key):
return key if isinstance(key, int) else self.dataset.columns.get_loc(key) |
5,709 | get property | import base64,logging,subprocess
from flask import request,abort,jsonify,make_response
valid_properties = ["ssid", "brand", "client-ssid", "client-wifipassword", "client-wificountry", "channel", "hostname", "staticsite", "password",
"system", "ui-config", "wpa-passphrase", "openwell-download", "course-download","is-moodle","wifi-info","wifi-restart"]
valid_brand_properties = ["g_device", "server_url", "server_authorization", "server_sitename",
"server_siteadmin_name", "server_siteadmin_email", "server_siteadmin_phone", "enable_mass_storage",
"usb0nomount", "enhanced","lcd_pages_main","lcd_pages_info","lcd_pages_battery","lcd_pages_multi_bat",
"lcd_pages_memory","lcd_pages_stats","lcd_pages_admin","otg_enable"]
connectbox_version = 'dev'
try:
with open('/etc/connectbox-release', 'r') as version_file:
connectbox_version=version_file.read().replace('\n', '')
except Exception as err:
logging.warn('Error reading release: %s' % str(err))
def _abort_bad_request():
abort(make_response("BAD REQUEST", 400))
def _abort_unauthorized():
abort(make_response("Unauthorized", 401))
def _call_command(extra_args):
cmd_args = ["sudo", "/usr/local/connectbox/bin/ConnectBoxManage.sh"]
logging.debug("_call_command" + " ".join(cmd_args))
called_cmd = subprocess.run(
cmd_args + extra_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result_string = called_cmd.stdout
if called_cmd.returncode != 0:
result_string = called_cmd.stderr
res = jsonify(
code=called_cmd.returncode, result=result_string.decode("utf-8").rstrip().split("\n"))
res.headers['X-Connectbox-Version'] = connectbox_version
return res
def _authenticate(req):
logging.debug("_authenticate")
try:
auth_header = req.headers.get('Authorization')
if auth_header:
if auth_header.startswith("Basic "):
decoded = base64.b64decode(auth_header.split()[1]).decode('utf-8')
credentials = decoded.split(":")
if len(credentials) == 2:
cmd_args = [
"sudo",
"/usr/local/connectbox/bin/ConnectBoxManage.sh",
"check", "password", credentials[1]]
called_cmd = subprocess.run(
cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if called_cmd.returncode == 0:
return
except Exception as err:
logging.warn('Error authenticating request: %s' % str(err))
_abort_unauthorized()
def METHOD_NAME(prop):
logging.debug("get_property")
_authenticate(request)
prop_string = prop
if prop_string not in valid_properties or prop_string == "password":
_abort_bad_request()
return _call_command(["get", prop_string])
def get_brand_property(prop):
logging.debug("get brand property")
_authenticate(request)
prop_string = prop
if prop_string not in valid_brand_properties or prop_string == "server_authorization":
_abort_bad_request()
return _call_command(["get", "brand", prop_string])
def set_property_value_wrapped(prop):
logging.debug("set_property_value_wrapped")
_authenticate(request)
prop_string = prop
if prop_string not in valid_properties:
_abort_bad_request() # bad request
# we don't offer channel setting but the UI still exposes it. Stub it out
# until the UI is updated
if prop_string == "channel":
res = jsonify(code=0, result="Setting channel no longer supported")
res.headers['X-Connectbox-Version'] = connectbox_version
return res
possible_json = request.get_json(force=True, silent=True)
if (not possible_json) or ("value" not in possible_json):
_abort_bad_request() # bad request
return _call_command(["set", prop_string, possible_json["value"].encode("utf-8")])
def set_property(prop):
logging.debug("set_property")
_authenticate(request)
prop_string = prop
if prop_string not in valid_brand_properties:
_abort_bad_request() # bad request
string_data = request.get_data(as_text=True)
if not string_data:
_abort_bad_request() # bad request
return _call_command(["set", prop_string, string_data.encode("utf-8")])
def do_system_property(prop):
logging.debug("do_system_property")
_authenticate(request)
prop_string = prop
if prop_string not in ["shutdown", "reboot", "unmountusb", "reset","openwellusb","courseusb"]:
_abort_bad_request() # bad request
return _call_command([prop_string])
def not_authorized():
_abort_unauthorized()
def register(app):
app.add_url_rule(
rule='/admin/api',
endpoint='not_authorized',
view_func=not_authorized)
app.add_url_rule(
rule='/admin/api/',
endpoint='not_authorized',
view_func=not_authorized)
app.add_url_rule(
rule='/admin/api/brand/<prop>',
endpoint='get_brand_property',
methods=['GET'],
view_func=get_brand_property)
app.add_url_rule(
rule='/admin/api/<prop>',
endpoint='get_property',
methods=['GET'],
view_func=METHOD_NAME)
app.add_url_rule(
rule='/admin/api/do/<prop>',
endpoint='do_system_function',
methods=['GET'],
view_func=do_system_property)
app.add_url_rule(
rule='/admin/api/ui-config',
defaults={'prop': 'ui-config'},
endpoint='set_property',
methods=['PUT'],
view_func=set_property)
app.add_url_rule(
rule='/admin/api/<prop>',
endpoint='set_property_value_wrapped',
methods=['PUT'],
view_func=set_property_value_wrapped) |
5,710 | list tables | # -*- coding: utf-8 -*-
# flake8: noqa: E501
"""
Tasks for generating a data catalog from BigQuery.
"""
from google.cloud import bigquery
import gspread
import pandas as pd
from prefect import task
from pipelines.rj_escritorio.data_catalog.utils import (
get_bigquery_client,
write_data_to_gsheets,
)
from pipelines.utils.utils import get_credentials_from_env, log
@task
def METHOD_NAME( # pylint: disable=too-many-arguments
project_id: str,
client: bigquery.Client = None,
mode: str = "prod",
exclude_staging: bool = True,
exclude_test: bool = True,
exclude_logs: bool = True,
):
"""
List all datasets and tables in a project.
Args:
client: BigQuery client.
project_id: Project ID.
mode: BigQuery client mode.
exclude_staging: Exclude staging datasets.
exclude_test: Exclude anything that contains the word "test".
exclude_logs: Exclude log datasets.
Returns:
List of dictionaries in the format:
{
"project_id": "project_id",
"dataset_id": "dataset_id",
"table_id": "table_id",
"url": "https://console.cloud.google.com/bigquery?p={project_id}&d={dataset_id}&t={table_id}&page=table",
"private": True/False,
}
"""
if client is None:
log(f"Creating BigQuery client in mode {mode}.")
client = get_bigquery_client(mode=mode)
log(f"Listing tables in project {project_id}.")
tables = []
for dataset in client.list_datasets(project=project_id):
dataset_id: str = dataset.dataset_id
if exclude_staging and dataset_id.endswith("_staging"):
log(f"Excluding staging dataset {dataset_id}.")
continue
if exclude_test and "test" in dataset_id:
log(f"Excluding test dataset {dataset_id}.")
continue
if exclude_logs and (
dataset_id.startswith("logs_") or dataset_id.endswith("_logs")
):
log(f"Excluding logs dataset {dataset_id}.")
continue
for table in client.METHOD_NAME(dataset):
table_id = table.table_id
if exclude_test and "test" in table_id:
log(f"Excluding test table {table_id}.")
continue
table_info = {
"project_id": project_id,
"dataset_id": dataset_id,
"table_id": table_id,
"url": f"https://console.cloud.google.com/bigquery?p={project_id}&d={dataset_id}&t={table_id}&page=table",
"private": not project_id == "datario",
}
tables.append(table_info)
log(f"Found {len(tables)} tables in project {project_id}.")
return tables
@task
def merge_list_of_list_of_tables(list_of_list_of_tables: list) -> list:
"""
Merge a list of list of tables into a single list of tables.
Args:
list_of_list_of_tables: List of list of tables.
Returns:
List of tables.
"""
list_of_tables = [
table for list_of_tables in list_of_list_of_tables for table in list_of_tables
]
log(f"Merged {len(list_of_tables)} tables.")
return list_of_tables
@task
def generate_dataframe_from_list_of_tables(list_of_tables: list) -> pd.DataFrame:
"""
Generate a Pandas DataFrame from a list of tables.
Args:
list_of_tables: List of tables.
Returns:
Pandas DataFrame.
"""
dataframe = pd.DataFrame(list_of_tables)
log(f"Generated DataFrame with shape {dataframe.shape}.")
return dataframe
@task
def update_gsheets_data_catalog(
dataframe: pd.DataFrame, spreadsheet_url: str, sheet_name: str
) -> None:
"""
Update a Google Sheets spreadsheet with a DataFrame.
Args:
dataframe: Pandas DataFrame.
spreadsheet_url: Google Sheets spreadsheet URL.
sheet_name: Google Sheets sheet name.
"""
# Get gspread client
credentials = get_credentials_from_env(
scopes=[
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/drive",
]
)
gspread_client = gspread.authorize(credentials)
# Open spreadsheet
log(f"Opening Google Sheets spreadsheet {spreadsheet_url} with sheet {sheet_name}.")
sheet = gspread_client.open_by_url(spreadsheet_url)
worksheet = sheet.worksheet(sheet_name)
# Update spreadsheet
log("Deleting old data.")
worksheet.clear()
log("Rewriting headers.")
write_data_to_gsheets(
worksheet=worksheet,
data=[dataframe.columns.tolist()],
)
log("Updating new data.")
write_data_to_gsheets(
worksheet=worksheet,
data=dataframe.values.tolist(),
start_cell="A2",
)
# Add filters
log("Adding filters.")
first_col = "A"
last_col = chr(ord(first_col) + len(dataframe.columns) - 1)
worksheet.set_basic_filter(f"{first_col}:{last_col}")
# Resize columns
log("Resizing columns.")
worksheet.columns_auto_resize(0, len(dataframe.columns) - 1)
log("Done.") |
5,711 | convert variables to arrays | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility methods for working with TensorFlow Federated Model objects."""
from collections.abc import Callable
from typing import Any, NamedTuple, Union
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.learning.models import variable
class ModelWeights(NamedTuple):
"""A container for the trainable and non-trainable variables of a `Model`.
Note this does not include the model's local variables.
It may also be used to hold other values that are parallel to these variables,
e.g., tensors corresponding to variable values, or updates to model variables.
"""
trainable: Any
non_trainable: Any
@classmethod
def from_model(cls, model):
py_typecheck.check_type(model, (variable.VariableModel, tf.keras.Model))
return cls(model.trainable_variables, model.non_trainable_variables)
@classmethod
def from_tff_result(cls, struct):
py_typecheck.check_type(struct, structure.Struct)
return cls(
[value for _, value in structure.iter_elements(struct.trainable)],
[value for _, value in structure.iter_elements(struct.non_trainable)],
)
def assign_weights_to(self, model):
"""Assign these TFF model weights to the weights of a model.
Args:
model: A `tf.keras.Model` or `tff.learning.models.VariableModel` instance
to assign the weights to.
"""
py_typecheck.check_type(model, (variable.VariableModel, tf.keras.Model))
if isinstance(model, tf.keras.Model):
tf.nest.map_structure(
lambda var, t: var.assign(t), model.trainable_weights, self.trainable
)
tf.nest.map_structure(
lambda var, t: var.assign(t),
model.non_trainable_weights,
self.non_trainable,
)
else:
tf.nest.map_structure(
lambda var, t: var.assign(t),
model.trainable_variables,
self.trainable,
)
tf.nest.map_structure(
lambda var, t: var.assign(t),
model.non_trainable_variables,
self.non_trainable,
)
def METHOD_NAME(self) -> 'ModelWeights':
"""Converts any internal `tf.Variable`s to numpy arrays."""
if not tf.compat.v1.executing_eagerly():
raise ValueError(
'Can only convert to numpy array in eager mode outside '
'a @tf.function.'
)
if isinstance(self.trainable, structure.Struct):
new_trainable = structure.map_structure(np.array, self.trainable)
else:
new_trainable = tf.nest.map_structure(np.array, self.trainable)
if isinstance(self.non_trainable, structure.Struct):
new_non_trainable = structure.map_structure(np.array, self.non_trainable)
else:
new_non_trainable = tf.nest.map_structure(np.array, self.non_trainable)
return ModelWeights(new_trainable, new_non_trainable)
def weights_type_from_model(
model: Union[variable.VariableModel, Callable[[], variable.VariableModel]]
) -> computation_types.StructType:
"""Creates a `tff.Type` from a `tff.learning.models.VariableModel` or callable that constructs a model.
Args:
model: A `tff.learning.models.VariableModel` instance, or a no-arg callable
that returns a model.
Returns:
A `tff.StructType` representing the TFF type of the `ModelWeights`
structure for `model`.
"""
if callable(model):
# Wrap model construction in a graph to avoid polluting the global context
# with variables created for this model.
with tf.Graph().as_default():
model = model()
py_typecheck.check_type(model, variable.VariableModel)
return type_conversions.type_from_tensors(ModelWeights.from_model(model)) # pytype: disable=bad-return-type |
5,712 | insert names table | """
Module that provides the names used when processing netcdf files
Each type of Environment object has a set of names that it uses
to determine what the contents of a netcdf file are.
**cf_names:**
These are "standard names", as defined by the CF metadata standard:
https://cfconventions.org/standard-names.html
These are the best options to use, as the are standardized. When loading a netcdf file, the Environment
objects will first look for variable with cf_names, to identify their meaning. If no
variables exist with the standard names, then common variable names will be used.
**nc_names:**
These are common variable names used for the variables PyGNOME uses. If you set the variable names in
a netcdf files to one these names, PYGNOME should be able to load the file.
**Name Mapping:**
**grid_temperature**
Default Names: water_t, temp
CF Standard Names: sea_water_temperature, sea_surface_temperature
**grid_salinity**
Default Names: salt
CF Standard Names: sea_water_salinity, sea_surface_salinity
**grid_sediment**
Default Names: sand_06
CF Standard Names:
**ice_concentration**
Default Names: ice_fraction, aice
CF Standard Names: sea_ice_area_fraction
**bathymetry**
Default Names: h
CF Standard Names: depth
**grid_current**
Default Names for u: u, U, water_u, curr_ucmp, u_surface, u_sur
Default Names for v: v, V, water_v, curr_vcmp, v_surface, v_sur
Default Names for w: w, W
CF Standard Names for u: eastward_sea_water_velocity, surface_eastward_sea_water_velocity
CF Standard Names for v: northward_sea_water_velocity, surface_northward_sea_water_velocity
CF Standard Names for w: upward_sea_water_velocity
**grid_wind**
Default Names for u: air_u, Air_U, air_ucmp, wind_u, u-component_of_wind_height_above_ground, UWind
Default Names for v: air_v, Air_V, air_vcmp, wind_v, v-component_of_wind_height_above_ground, VWind
CF Standard Names for u: eastward_wind, eastward wind
CF Standard Names for v: northward_wind, northward wind
**ice_velocity**
Default Names for u: ice_u, uice
Default Names for v: ice_v, vice
CF Standard Names for u: eastward_sea_ice_velocity
CF Standard Names for v: northward_sea_ice_velocity
"""
nc_names = {
'grid_temperature': {
'default_names': ['water_t', 'temp'],
'cf_names': ['sea_water_temperature', 'sea_surface_temperature']
},
'grid_salinity': {
'default_names': ['salt'],
'cf_names': ['sea_water_salinity', 'sea_surface_salinity']
},
'grid_sediment': {
'default_names': ['sand_06'],
'cf_names': []
},
'ice_concentration': {
'default_names': ['ice_fraction', 'aice'],
'cf_names': ['sea_ice_area_fraction']
},
'bathymetry': {
'default_names': ['h'],
'cf_names': ['depth']
},
'grid_current': {
'default_names': {
'u': ['u', 'U', 'water_u', 'curr_ucmp', 'u_surface', 'u_sur'],
'v': ['v', 'V', 'water_v', 'curr_vcmp', 'v_surface', 'v_sur'],
'w': ['w', 'W']
},
'cf_names': {
'u': [
'eastward_sea_water_velocity',
'surface_eastward_sea_water_velocity'
],
'v': [
'northward_sea_water_velocity',
'surface_northward_sea_water_velocity'
],
'w': ['upward_sea_water_velocity']
}
},
'grid_wind': {
'default_names': {
'u': ['air_u', 'Air_U', 'air_ucmp', 'wind_u','u-component_of_wind_height_above_ground','Uwind','u10'],
'v': ['air_v', 'Air_V', 'air_vcmp', 'wind_v','v-component_of_wind_height_above_ground','Vwind','v10']
},
'cf_names': {
'u': ['eastward_wind', 'eastward wind'],
'v': ['northward_wind', 'northward wind']
}
},
'ice_velocity': {
'default_names': {
'u': ['ice_u', 'uice'],
'v': ['ice_v', 'vice']
},
'cf_names': {
'u': ['eastward_sea_ice_velocity'],
'v': ['northward_sea_ice_velocity']
}
},
}
def METHOD_NAME(table_text):
"""
function to insert the names table into the docstring
should be run when name mapping is updated.
"""
this = __file__
tempfile = "names.temp.py"
with open(this) as infile:
contents = iter(infile.readlines())
with open(tempfile, 'w') as outfile:
for line in contents:
outfile.write(line)
if "**Name Mapping:**" in line:
break
outfile.write("\n")
outfile.write(table_text)
outfile.write("\n")
for line in contents:
if line.strip() == '"""':
outfile.write(line)
break
for line in contents:
outfile.write(line)
shutil.copy(tempfile, this)
os.remove(tempfile)
def build_names_table():
"""
This builds the table of names for the docstring (and the docs)
NOTE: it could use some fancier rst formatting ...
"""
table = []
for env_obj, names in nc_names.items():
table.append(f"\n\n**{env_obj:}**\n")
try: # some are dicts, some are lists ...
for var, var_names in names['default_names'].items():
table.append(f"\n Default Names for {var}: {', '.join(var_names)}\n")
except AttributeError:
table.append(f"\n Default Names: {', '.join(names['default_names'])}\n")
table.append('\n')
try: # some are dicts, some are lists ...
for var, var_names in names['cf_names'].items():
table.append(f"\n CF Standard Names for {var}: {', '.join(var_names)}\n")
except AttributeError:
table.append(f"\n CF Standard Names: {', '.join(names['cf_names'])}\n")
return ''.join(table)
if __name__ == "__main__":
"""
when run as a script, builds a table of names for the docstring of this module
this should be run any time the names dict changes
To run:
set the working dir to this directory
python names.py build
The docstring for this module should be changed in place
"""
import os
import sys
import shutil
if "rebuild" in sys.argv:
print("rebuilding docstring")
METHOD_NAME(build_names_table())
else:
print("Doing Nothing")
print('To rebuild the docstring, pass "rebuild" in on the command line:')
print(' python names.py rebuild') |
5,713 | execute operations | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"elastic-san list-sku",
is_preview=True,
)
class ListSku(AAZCommand):
"""Get a list of Elastic SAN skus.
:example: Get a list of Elastic SAN skus.
az elastic-san list-sku
"""
_aaz_info = {
"version": "2022-12-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/providers/microsoft.elasticsan/skus", "2022-12-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self.METHOD_NAME()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.filter = AAZStrArg(
options=["--filter"],
help="Specify $filter='location eq <location>' to filter on location.",
)
return cls._args_schema
def METHOD_NAME(self):
self.pre_operations()
self.SkusList(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
return result
class SkusList(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/providers/Microsoft.ElasticSan/skus",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"$filter", self.ctx.args.filter,
),
**self.serialize_query_param(
"api-version", "2022-12-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
flags={"read_only": True},
)
_schema_on_200.value = AAZListType(
flags={"read_only": True},
)
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.capabilities = AAZListType(
flags={"read_only": True},
)
_element.location_info = AAZListType(
serialized_name="locationInfo",
flags={"read_only": True},
)
_element.locations = AAZListType(
flags={"read_only": True},
)
_element.name = AAZStrType(
flags={"required": True},
)
_element.resource_type = AAZStrType(
serialized_name="resourceType",
flags={"read_only": True},
)
_element.tier = AAZStrType()
capabilities = cls._schema_on_200.value.Element.capabilities
capabilities.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.capabilities.Element
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.value = AAZStrType(
flags={"read_only": True},
)
location_info = cls._schema_on_200.value.Element.location_info
location_info.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.location_info.Element
_element.location = AAZStrType(
flags={"read_only": True},
)
_element.zones = AAZListType(
flags={"read_only": True},
)
zones = cls._schema_on_200.value.Element.location_info.Element.zones
zones.Element = AAZStrType()
locations = cls._schema_on_200.value.Element.locations
locations.Element = AAZStrType()
return cls._schema_on_200
class _ListSkuHelper:
"""Helper class for ListSku"""
__all__ = ["ListSku"] |
5,714 | query parameters | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"devcenter admin check-name-availability execute",
)
class Execute(AAZCommand):
"""Check the availability of name for resource.
:example: Check name availability
az devcenter admin check-name-availability execute --name "name1" --type "Microsoft.DevCenter/devcenters"
"""
_aaz_info = {
"version": "2023-04-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/providers/microsoft.devcenter/checknameavailability", "2023-04-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
# define Arg Group "NameAvailabilityRequest"
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["--name"],
arg_group="NameAvailabilityRequest",
help="The name of the resource for which availability needs to be checked.",
)
_args_schema.type = AAZStrArg(
options=["--type"],
arg_group="NameAvailabilityRequest",
help="The resource type.",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.CheckNameAvailabilityExecute(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class CheckNameAvailabilityExecute(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/providers/Microsoft.DevCenter/checkNameAvailability",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-04-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("name", AAZStrType, ".name")
_builder.set_prop("type", AAZStrType, ".type")
return self.serialize_content(_content_value)
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.message = AAZStrType()
_schema_on_200.name_available = AAZBoolType(
serialized_name="nameAvailable",
)
_schema_on_200.reason = AAZStrType()
return cls._schema_on_200
class _ExecuteHelper:
"""Helper class for Execute"""
__all__ = ["Execute"] |
5,715 | test it creates reminder when sms was | from typing import Any
from unittest.mock import patch
from contextlib import contextmanager
import pytest
from users.models import JustfixUser
from users.tests.factories import UserFactory
from texting.models import (
PhoneNumberLookup,
Reminder,
REMINDERS,
get_lookup_description_for_phone_number,
exclude_users_with_invalid_phone_numbers,
)
from texting.twilio import SendSmsResult, TWILIO_BLOCKED_NUMBER_ERR, TWILIO_OTHER_ERR
@pytest.mark.parametrize(
"obj,expected",
[
[PhoneNumberLookup(), "unknown"],
[PhoneNumberLookup(is_valid=True), "valid"],
[PhoneNumberLookup(is_valid=False), "invalid"],
],
)
def test_pnl_validity_str(obj, expected):
assert obj.validity_str == expected
@pytest.mark.parametrize(
"obj,expected",
[
[PhoneNumberLookup(), ""],
[PhoneNumberLookup(carrier={"type": "mobile"}), "mobile"],
],
)
def test_pnl_carrier_type(obj, expected):
assert obj.carrier_type == expected
@pytest.mark.parametrize(
"obj,expected",
[
[PhoneNumberLookup(), "unknown"],
[PhoneNumberLookup(is_valid=False), "invalid"],
[PhoneNumberLookup(is_valid=True, carrier={"type": "mobile"}), "valid mobile"],
],
)
def test_pnl_adjectives(obj, expected):
assert obj.adjectives == expected
@pytest.mark.parametrize(
"obj,expected",
[
[PhoneNumberLookup(), "unknown phone number"],
[
PhoneNumberLookup(is_valid=False, phone_number="5551234567"),
"invalid phone number 5551234567",
],
],
)
def test_pnl_str(obj, expected):
assert str(obj) == expected
class MockTwilioDbTest:
@pytest.fixture(autouse=True)
def setup_fixture(self, db):
pass
@contextmanager
def mock_twilio(self, is_valid=None, carrier=None):
with patch("texting.twilio.is_phone_number_valid", return_value=is_valid) as m1:
self.is_phone_number_valid = m1
with patch("texting.twilio.get_carrier_info", return_value=carrier) as m2:
self.get_carrier_info = m2
yield
class TestInvalidate:
def test_it_works_when_no_record_existed(self, db):
lookup = PhoneNumberLookup.objects.invalidate("5551234567")
assert lookup.pk
assert lookup.is_valid is False
assert lookup.carrier is None
def test_it_modifies_existing_records(self, db):
orig = PhoneNumberLookup(phone_number="5551234567", is_valid=True, carrier={"hi": 1})
orig.save()
lookup = PhoneNumberLookup.objects.invalidate("5551234567")
assert lookup.pk == orig.pk
assert lookup.is_valid is False
assert lookup.carrier is None
class TestGetOrLookup(MockTwilioDbTest):
def test_it_returns_new_saved_lookup_with_carrier_info_for_valid_numbers(self):
with self.mock_twilio(is_valid=True, carrier={"type": "mobile"}):
lookup = PhoneNumberLookup.objects.get_or_lookup("5551234567")
assert lookup is not None
assert lookup.pk is not None
assert lookup.is_valid is True
assert lookup.carrier_type == "mobile"
self.is_phone_number_valid.assert_called_once_with("5551234567")
self.get_carrier_info.assert_called_once_with("5551234567")
def test_it_returns_new_saved_lookup_without_carrier_info_for_invalid_numbers(self):
with self.mock_twilio(is_valid=False):
lookup = PhoneNumberLookup.objects.get_or_lookup("5551234567")
assert lookup is not None
assert lookup.pk is not None
assert lookup.is_valid is False
assert lookup.carrier_type == ""
self.is_phone_number_valid.assert_called_once_with("5551234567")
self.get_carrier_info.assert_not_called()
def test_it_returns_existing_lookup(self):
lookup = PhoneNumberLookup(phone_number="5551234567", is_valid=True)
lookup.save()
with self.mock_twilio():
assert PhoneNumberLookup.objects.get_or_lookup("5551234567") == lookup
self.is_phone_number_valid.assert_not_called()
self.get_carrier_info.assert_not_called()
def test_it_returns_none_on_lookup_error(self):
with self.mock_twilio(is_valid=None):
assert PhoneNumberLookup.objects.get_or_lookup("5551234567") is None
self.is_phone_number_valid.assert_called_once_with("5551234567")
self.get_carrier_info.assert_not_called()
class TestGetLookupDescriptionForPhoneNumber(MockTwilioDbTest):
NO_INFO = "No lookup details are available."
def test_it_returns_no_info_on_empty_numbers(self):
assert get_lookup_description_for_phone_number("") == self.NO_INFO
def test_it_returns_no_info_when_lookup_fails(self):
with self.mock_twilio():
assert get_lookup_description_for_phone_number("5551234567") == self.NO_INFO
def test_it_returns_info_when_lookup_succeeds(self):
with self.mock_twilio(is_valid=False):
assert (
get_lookup_description_for_phone_number("5551234567")
== "This appears to be an invalid phone number."
)
class TestExcludeUsersWithInvalidPhoneNumbers:
@pytest.fixture(autouse=True)
def setup_fixture(self, db):
self.phone_number = "5551234567"
self.user = UserFactory(phone_number=self.phone_number)
def get_users_with_valid_numbers(self):
return exclude_users_with_invalid_phone_numbers(JustfixUser.objects.all())
def test_users_are_not_excluded_when_no_lookup_exists(self):
assert self.get_users_with_valid_numbers().count() == 1
def test_users_are_not_excluded_when_lookup_indicates_valid_phone_number(self):
PhoneNumberLookup(phone_number=self.phone_number, is_valid=True).save()
assert self.get_users_with_valid_numbers().count() == 1
def test_users_are_excluded_when_lookup_indicates_invalid_phone_number(self):
PhoneNumberLookup(phone_number=self.phone_number, is_valid=False).save()
assert self.get_users_with_valid_numbers().count() == 0
class TestTryToCreateFromSendSmsResult:
def METHOD_NAME(self, db):
user = UserFactory()
result = Reminder.objects.try_to_create_from_send_sms_result(
SendSmsResult("boop"), kind=REMINDERS.LOC, user=user
)
assert result and result.pk
assert result.sid != ""
assert result.err_code is None
assert result.kind == REMINDERS.LOC
assert result.user == user
def test_it_creates_reminder_when_sms_failed_and_should_not_be_retried(self, db):
user = UserFactory()
result = Reminder.objects.try_to_create_from_send_sms_result(
SendSmsResult(err_code=TWILIO_BLOCKED_NUMBER_ERR), kind=REMINDERS.LOC, user=user
)
assert result and result.pk
assert result.sid == ""
assert result.err_code == TWILIO_BLOCKED_NUMBER_ERR
assert result.kind == REMINDERS.LOC
assert result.user == user
def test_it_does_nothing_when_when_sms_failed_and_should_be_retried(self):
user: Any = "fake user that will not be used"
result = Reminder.objects.try_to_create_from_send_sms_result(
SendSmsResult(err_code=TWILIO_OTHER_ERR), kind=REMINDERS.LOC, user=user
)
assert result is None |
5,716 | test functools wraps | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
from typing import Dict, List, Set
from unittest.mock import patch
import numpy as np
from parameterized import parameterized
from streamlit import util
class UtilTest(unittest.TestCase):
"""Test Streamlit utility functions."""
def test_memoization(self):
"""Test that util.memoize works."""
non_memoized_func = lambda: random.randint(0, 1000000)
yes_memoized_func = util.memoize(non_memoized_func)
self.assertNotEqual(non_memoized_func(), non_memoized_func())
self.assertEqual(yes_memoized_func(), yes_memoized_func())
@parameterized.expand(
[("Linux", False, True), ("Windows", True, False), ("Darwin", False, True)]
)
def test_open_browser(self, os_type, webbrowser_expect, popen_expect):
"""Test web browser opening scenarios."""
from streamlit import env_util
env_util.IS_WINDOWS = os_type == "Windows"
env_util.IS_DARWIN = os_type == "Darwin"
env_util.IS_LINUX_OR_BSD = os_type == "Linux"
with patch("streamlit.env_util.is_executable_in_path", return_value=True):
with patch("webbrowser.open") as webbrowser_open:
with patch("subprocess.Popen") as subprocess_popen:
util.open_browser("http://some-url")
self.assertEqual(webbrowser_expect, webbrowser_open.called)
self.assertEqual(popen_expect, subprocess_popen.called)
def test_open_browser_linux_no_xdg(self):
"""Test opening the browser on Linux with no xdg installed"""
from streamlit import env_util
env_util.IS_LINUX_OR_BSD = True
with patch("streamlit.env_util.is_executable_in_path", return_value=False):
with patch("webbrowser.open") as webbrowser_open:
with patch("subprocess.Popen") as subprocess_popen:
util.open_browser("http://some-url")
self.assertEqual(True, webbrowser_open.called)
self.assertEqual(False, subprocess_popen.called)
def METHOD_NAME(self):
"""Test wrap for functools.wraps"""
import streamlit as st
@st.cache
def f():
return True
self.assertEqual(True, hasattr(f, "__wrapped__"))
@parameterized.expand(
[
({}, {}),
(
{
"HELLO": 4,
"Hello": "world",
"hElLo": 5.5,
"": "",
},
{"hello": 4, "hello": "world", "hello": 5.5, "": ""},
),
]
)
def test_lower_clean_dict_keys(self, input_dict, answer_dict):
return_dict = util.lower_clean_dict_keys(input_dict)
self.assertEqual(return_dict, answer_dict)
@parameterized.expand(
[
(np.array([1, 2, 3, 4, 5]), 5, 4),
# This one will have 0.15000000000000002 because of floating point precision
(np.arange(0.0, 0.25, 0.05), 0.15, 3),
([0, 1, 2, 3], 3, 3),
([0.1, 0.2, 0.3], 0.2, 1),
([0.1, 0.2, None], None, 2),
([0.1, 0.2, float("inf")], float("inf"), 2),
(["He", "ello w", "orld"], "He", 0),
(list(np.arange(0.0, 0.25, 0.05)), 0.15, 3),
]
)
def test_successful_index_(self, input, find_value, expected_index):
actual_index = util.index_(input, find_value)
self.assertEqual(actual_index, expected_index)
@parameterized.expand(
[
(np.array([1, 2, 3, 4, 5]), 6),
(np.arange(0.0, 0.25, 0.05), 0.1500002),
([0, 1, 2, 3], 3.00001),
([0.1, 0.2, 0.3], 0.3000004),
([0.1, 0.2, 0.3], None),
(["He", "ello w", "orld"], "world"),
(list(np.arange(0.0, 0.25, 0.05)), 0.150002),
]
)
def test_unsuccessful_index_(self, input, find_value):
with self.assertRaises(ValueError):
util.index_(input, find_value)
@parameterized.expand(
[
({"x": ["a"]}, ["x"], {}),
({"a": ["a1", "a2"], "b": ["b1", "b2"]}, ["a"], {"b": ["b1", "b2"]}),
({"c": ["c1", "c2"]}, "no_existing_key", {"c": ["c1", "c2"]}),
(
{
"embed": ["true"],
"embed_options": ["show_padding", "show_colored_line"],
},
["embed", "embed_options"],
{},
),
(
{"EMBED": ["TRUE"], "EMBED_OPTIONS": ["DISABLE_SCROLLING"]},
["embed", "embed_options"],
{},
),
]
)
def test_drop_key_query_params(
self,
query_params: Dict[str, List[str]],
keys_to_drop: List[str],
result: Dict[str, List[str]],
):
self.assertDictEqual(
util.exclude_key_query_params(query_params, keys_to_drop), result
)
@parameterized.expand(
[
({"x": ["a"]}, "x", {"a"}),
({"a": ["a1"], "b": ["b1", "b2"]}, "a", {"a1"}),
({"c": ["c1", "c2"]}, "no_existing_key", set()),
(
{
"embed": ["true"],
"embed_options": ["show_padding", "show_colored_line"],
},
"embed",
{"true"},
),
(
{"EMBED": ["TRUE"], "EMBED_OPTIONS": ["DISABLE_SCROLLING"]},
"embed_options",
{"disable_scrolling"},
),
]
)
def test_extract_key_query_params(
self, query_params: Dict[str, List[str]], param_key: str, result: Set[str]
):
self.assertSetEqual(
util.extract_key_query_params(query_params, param_key), result
)
def test_calc_md5_can_handle_bytes_and_strings(self):
self.assertEqual(
util.calc_md5("eventually bytes"),
util.calc_md5("eventually bytes".encode("utf-8")),
) |
5,717 | tear down | # The MIT License (MIT)
# Copyright (c) 2023 by the xcube team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import unittest
from typing import Optional, Mapping, Any
import pytest
from xcube.core.new import new_cube
from xcube.server.api import ApiError
from xcube.webapi.datasets.context import DatasetsContext
from xcube.webapi.viewer import Viewer
STYLES_CONFIG = {
"Styles": [
{
"Identifier": "SST",
"ColorMappings": {
"analysed_sst": {
"ValueRange": [270, 290],
"ColorBar": "inferno"
}
}
}
]
}
class ViewerTest(unittest.TestCase):
def setUp(self) -> None:
self.viewer: Optional[Viewer] = None
def METHOD_NAME(self) -> None:
if self.viewer is not None:
self.viewer.stop_server()
def get_viewer(self, server_config: Optional[Mapping[str, Any]] = None) \
-> Viewer:
self.viewer = Viewer(server_config=server_config)
return self.viewer
def test_start_and_stop_server(self):
viewer = self.get_viewer()
self.assertTrue(viewer.is_server_running)
self.assertIsInstance(viewer.datasets_ctx, DatasetsContext)
viewer.stop_server()
self.assertFalse(viewer.is_server_running)
def test_info(self):
viewer = self.get_viewer()
# Just a smoke test:
viewer.info() # will print something
def test_show(self):
viewer = self.get_viewer()
# Just a smoke test:
result = viewer.show() # will show viewer
if result is not None:
from IPython.core.display import HTML
self.assertIsInstance(result, HTML)
def test_no_config(self):
viewer = self.get_viewer()
self.assertIsInstance(viewer.server_config, dict)
self.assertIn("port", viewer.server_config)
self.assertIn("address", viewer.server_config)
self.assertIn("reverse_url_prefix", viewer.server_config)
def test_with_config(self):
viewer = self.get_viewer(STYLES_CONFIG)
self.assertIsInstance(viewer.server_config, dict)
self.assertIn("port", viewer.server_config)
self.assertIn("address", viewer.server_config)
self.assertIn("reverse_url_prefix", viewer.server_config)
self.assertIn("Styles", viewer.server_config)
self.assertEqual(STYLES_CONFIG["Styles"],
viewer.server_config["Styles"])
def test_urls(self):
viewer = self.get_viewer()
self.assertIn("port", viewer.server_config)
port = viewer.server_config["port"]
reverse_url_prefix = viewer.server_config.get("reverse_url_prefix")
if not reverse_url_prefix:
expected_server_url = f"http://localhost:{port}"
self.assertEqual(expected_server_url,
viewer.server_url)
expected_viewer_url = f"{expected_server_url}/viewer/" \
f"?serverUrl={expected_server_url}"
self.assertEqual(expected_viewer_url,
viewer.viewer_url)
else:
self.assertIsInstance(viewer.server_url, str)
self.assertIsInstance(viewer.viewer_url, str)
self.assertIn(reverse_url_prefix, viewer.server_url)
self.assertIn(viewer.server_url, viewer.viewer_url)
def test_urls_with_jl_env_var(self):
env_var_key = "XCUBE_JUPYTER_LAB_URL"
env_var_value = os.environ.get(env_var_key)
os.environ[env_var_key] = "http://xcube-test-lab/"
try:
viewer = self.get_viewer()
self.assertTrue(viewer.server_url.startswith(
"http://xcube-test-lab/proxy/"
))
self.assertTrue(viewer.viewer_url.startswith(
"http://xcube-test-lab/proxy/"
))
self.assertTrue("/viewer/" in viewer.viewer_url)
finally:
if env_var_value is not None:
os.environ[env_var_key] = env_var_value
else:
del os.environ[env_var_key]
def test_add_and_remove_dataset(self):
viewer = self.get_viewer()
# Generate identifier and get title from dataset
ds_id_1 = viewer.add_dataset(
new_cube(variables={"analysed_sst": 280.},
title="My SST 1"),
)
self.assertIsInstance(ds_id_1, str)
# Provide identifier and title
ds_id_2 = viewer.add_dataset(
new_cube(variables={"analysed_sst": 282.}),
ds_id="my_sst_2",
title="My SST 2"
)
self.assertEqual("my_sst_2", ds_id_2)
ds_config_1 = self.viewer.datasets_ctx.get_dataset_config(ds_id_1)
self.assertEqual({"Identifier": ds_id_1,
"Title": "My SST 1"},
ds_config_1)
ds_config_2 = self.viewer.datasets_ctx.get_dataset_config(ds_id_2)
self.assertEqual({"Identifier": ds_id_2,
"Title": "My SST 2"},
ds_config_2)
self.viewer.remove_dataset(ds_id_1)
with pytest.raises(ApiError.NotFound):
self.viewer.datasets_ctx.get_dataset_config(ds_id_1)
self.viewer.remove_dataset(ds_id_2)
with pytest.raises(ApiError.NotFound):
self.viewer.datasets_ctx.get_dataset_config(ds_id_2)
def test_add_dataset_with_style(self):
viewer = self.get_viewer(STYLES_CONFIG)
ds_id = viewer.add_dataset(
new_cube(variables={"analysed_sst": 280.}),
title="My SST",
style="SST"
)
ds_config = self.viewer.datasets_ctx.get_dataset_config(ds_id)
self.assertEqual({"Identifier": ds_id,
"Title": "My SST",
"Style": "SST"},
ds_config)
def test_add_dataset_with_color_mapping(self):
viewer = self.get_viewer()
ds_id = viewer.add_dataset(
new_cube(variables={"analysed_sst": 280.}),
title="My SST",
color_mappings={
"analysed_sst": {
"ValueRange": [280., 290.],
"ColorBar": "plasma"
}
},
)
ds_config = self.viewer.datasets_ctx.get_dataset_config(ds_id)
self.assertEqual({"Identifier": ds_id,
"Title": "My SST",
"Style": ds_id},
ds_config)
|
5,718 | test semaphore pull request | from unittest.mock import patch
import orjson
from zerver.lib.test_classes import WebhookTestCase
class SemaphoreHookTests(WebhookTestCase):
STREAM_NAME = "semaphore"
URL_TEMPLATE = "/api/v1/external/semaphore?stream={stream}&api_key={api_key}"
WEBHOOK_DIR_NAME = "semaphore"
# Messages are generated by Semaphore on git push. The subject lines below
# contain information on the repo and branch, and the message has links and
# details about the build, deploy, server, author, and commit
# Tests for Semaphore Classic
def test_semaphore_build(self) -> None:
expected_topic = "knighthood/master" # repo/branch
expected_message = """
[Build 314](https://semaphoreci.com/donquixote/knighthood/branches/master/builds/314) passed:
* **Commit**: [a490b8d508e: Create user account for Rocinante](https://github.com/donquixote/knighthood/commit/a490b8d508ebbdab1d77a5c2aefa35ceb2d62daf)
* **Author**: don@lamancha.com
""".strip()
self.check_webhook(
"build",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_semaphore_deploy(self) -> None:
expected_topic = "knighthood/master"
expected_message = """
[Deploy 17](https://semaphoreci.com/donquixote/knighthood/servers/lamancha-271/deploys/17) of [build 314](https://semaphoreci.com/donquixote/knighthood/branches/master/builds/314) passed:
* **Commit**: [a490b8d508e: Create user account for Rocinante](https://github.com/donquixote/knighthood/commit/a490b8d508ebbdab1d77a5c2aefa35ceb2d62daf)
* **Author**: don@lamancha.com
* **Server**: lamancha-271
""".strip()
self.check_webhook(
"deploy",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
# Tests for Semaphore 2.0
def test_semaphore2_push(self) -> None:
expected_topic = "notifications/rw/webhook_impl" # repo/branch
expected_message = """
[Notifications](https://semaphore.semaphoreci.com/workflows/acabe58e-4bcc-4d39-be06-e98d71917703) pipeline **stopped**:
* **Commit**: [(2d9f5fcec1c)](https://github.com/renderedtext/notifications/commit/2d9f5fcec1ca7c68fa7bd44dd58ec4ff65814563) Implement webhooks for SemaphoreCI
* **Branch**: rw/webhook_impl
* **Author**: [radwo](https://github.com/radwo)
""".strip()
self.check_webhook(
"push", expected_topic, expected_message, content_type="application/json"
)
def test_semaphore2_push_non_gh_repo(self) -> None:
expected_topic = "notifications/rw/webhook_impl" # repo/branch
expected_message = """
[Notifications](https://semaphore.semaphoreci.com/workflows/acabe58e-4bcc-4d39-be06-e98d71917703) pipeline **stopped**:
* **Commit**: (2d9f5fcec1c) Implement webhooks for SemaphoreCI
* **Branch**: rw/webhook_impl
* **Author**: radwo
""".strip()
with patch("zerver.webhooks.semaphore.view.is_github_repo", return_value=False):
self.check_webhook(
"push", expected_topic, expected_message, content_type="application/json"
)
def METHOD_NAME(self) -> None:
expected_topic = "notifications/test-notifications"
expected_message = """
[Notifications](https://semaphore.semaphoreci.com/workflows/84383f37-d025-4811-b719-61c6acc92a1e) pipeline **failed**:
* **Pull request**: [Testing PR notifications](https://github.com/renderedtext/notifications/pull/3)
* **Branch**: test-notifications
* **Author**: [radwo](https://github.com/radwo)
""".strip()
self.check_webhook(
"pull_request", expected_topic, expected_message, content_type="application/json"
)
def test_semaphore_pull_request_non_gh_repo(self) -> None:
expected_topic = "notifications/test-notifications"
expected_message = """
[Notifications](https://semaphore.semaphoreci.com/workflows/84383f37-d025-4811-b719-61c6acc92a1e) pipeline **failed**:
* **Pull request**: Testing PR notifications (#3)
* **Branch**: test-notifications
* **Author**: radwo
""".strip()
with patch("zerver.webhooks.semaphore.view.is_github_repo", return_value=False):
self.check_webhook(
"pull_request", expected_topic, expected_message, content_type="application/json"
)
def test_semaphore_tag(self) -> None:
expected_topic = "notifications"
expected_message = """
[Notifications](https://semaphore.semaphoreci.com/workflows/a8704319-2422-4828-9b11-6b2afa3554e6) pipeline **stopped**:
* **Tag**: [v1.0.1](https://github.com/renderedtext/notifications/tree/v1.0.1)
* **Author**: [radwo](https://github.com/radwo)
""".strip()
self.check_webhook("tag", expected_topic, expected_message, content_type="application/json")
def test_semaphore_tag_non_gh_repo(self) -> None:
expected_topic = "notifications"
expected_message = """
[Notifications](https://semaphore.semaphoreci.com/workflows/a8704319-2422-4828-9b11-6b2afa3554e6) pipeline **stopped**:
* **Tag**: v1.0.1
* **Author**: radwo
""".strip()
with patch("zerver.webhooks.semaphore.view.is_github_repo", return_value=False):
self.check_webhook(
"tag", expected_topic, expected_message, content_type="application/json"
)
def test_semaphore_unknown(self) -> None:
expected_topic = "knighthood/master"
expected_message = "unknown: passed"
self.check_webhook(
"unknown",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_semaphore_unknown_event(self) -> None:
expected_topic = "notifications"
expected_message = """
[Notifications](https://semaphore.semaphoreci.com/workflows/a8704319-2422-4828-9b11-6b2afa3554e6) pipeline **stopped** for unknown event
""".strip()
with patch(
"zerver.webhooks.semaphore.tests.SemaphoreHookTests.get_body", self.get_unknown_event
):
self.check_webhook(
"tag", expected_topic, expected_message, content_type="application/json"
)
def get_unknown_event(self, fixture_name: str) -> str:
"""Return modified payload with revision.reference_type changed"""
fixture_data = orjson.loads(
self.webhook_fixture_data("semaphore", fixture_name, file_type="json")
)
fixture_data["revision"]["reference_type"] = "unknown"
return fixture_data |
5,719 | parse cap |
from up2date_client import config
from up2date_client import up2dateErrors
try: # python2
import UserDict
except ImportError: # python3
import collections as UserDict
import gettext
t = gettext.translation('rhn-client-tools', fallback=True)
# Python 3 translations don't have a ugettext method
if not hasattr(t, 'ugettext'):
t.ugettext = t.gettext
_ = t.ugettext
# a dict with "capability name" as the key, and the version
# as the value.
neededCaps = {"caneatCheese": {'version':"21"},
"supportsAutoUp2dateOption": {'version': "1"},
"registration.finish_message": {'version': "1"},
"xmlrpc.packages.extended_profile": {'version':"1"},
"registration.delta_packages": {'version':"1"},
"registration.update_contact_info": {'version': "1"},
"registration.extended_update_support": {"version" : "1"},
"registration.smbios": {"version" : "1"}}
def METHOD_NAME(capstring):
value = None
caps = capstring.split(',')
capslist = []
for cap in caps:
try:
(key_version, value) = [i.strip() for i in cap.split("=", 1)]
except ValueError:
# Bad directive: not in 'a = b' format
continue
# parse out the version
# lets give it a shot sans regex's first...
(key,version) = key_version.split("(", 1)
# just to be paranoid
if version[-1] != ")":
print("something broke in parsing the capabilited headers")
#FIXME: raise an approriate exception here...
# trim off the trailing paren
version = version[:-1]
data = {'version': version, 'value': value}
capslist.append((key, data))
return capslist
class Capabilities(UserDict.UserDict):
def __init__(self):
UserDict.UserDict.__init__(self)
self.missingCaps = {}
#self.populate()
# self.validate()
self.neededCaps = neededCaps
self.cfg = config.initUp2dateConfig()
def populate(self, headers):
for key, val in headers.items():
if key.lower() == "x-rhn-server-capability":
capslist = METHOD_NAME(val)
for (cap,data) in capslist:
self.data[cap] = data
def parseCapVersion(self, versionString):
index = versionString.find('-')
# version of "-" is bogus, ditto for "1-"
if index > 0:
rng = versionString.split("-")
start = rng[0]
end = rng[1]
versions = range(int(start), int(end)+1)
return versions
vers = versionString.split(':')
if len(vers) > 1:
versions = [int(a) for a in vers]
return versions
return [int(versionString)]
def validateCap(self, cap, capvalue):
if not cap in self.data:
errstr = _("This client requires the server to support %s, which the current " \
"server does not support") % cap
self.missingCaps[cap] = None
else:
data = self.data[cap]
# DOES the server have the version we need
if int(capvalue['version']) not in self.parseCapVersion(data['version']):
self.missingCaps[cap] = self.neededCaps[cap]
def validate(self):
for key in self.neededCaps.keys():
self.validateCap(key, self.neededCaps[key])
self.workaroundMissingCaps()
def setConfig(self, key, configItem):
if key in self.tmpCaps:
self.cfg[configItem] = 0
del self.tmpCaps[key]
else:
self.cfg[configItem] = 1
def workaroundMissingCaps(self):
# if we have caps that we know we want, but we can
# can work around, setup config variables here so
# that we know to do just that
self.tmpCaps = self.missingCaps
# this is an example of how to work around it
key = 'caneatCheese'
if key in self.tmpCaps:
# do whatevers needed to workaround
del self.tmpCaps[key]
else:
# we support this, set a config option to
# indicate that possibly
pass
# dict of key to configItem, and the config item that
# corresponds with it
capsConfigMap = {'supportsAutoUp2dateOption': 'supportsAutoUp2dateOption',
'registration.finish_message': 'supportsFinishMessage',
"registration.update_contact_info" : 'supportsUpdateContactInfo',
"registration.delta_packages" : 'supportsDeltaPackages',
"xmlrpc.packages.extended_profile" : 'supportsExtendedPackageProfile',
"registration.extended_update_support" : "supportsEUS",
"registration.smbios" : "supportsSMBIOS"}
for key in capsConfigMap.keys():
self.setConfig(key, capsConfigMap[key])
# if we want to blow up on missing caps we cant eat around
missingCaps = []
wrongVersionCaps = []
if len(self.tmpCaps):
for cap in self.tmpCaps:
capInfo = self.tmpCaps[cap]
if capInfo == None:
# it's completly mssing
missingCaps.append((cap, capInfo))
else:
wrongVersionCaps.append((cap, capInfo))
errString = ""
errorList = []
if len(wrongVersionCaps):
for (cap, capInfo) in wrongVersionCaps:
errString = errString + "Needs %s of version: %s but server has version: %s\n" % (cap,
capInfo['version'],
self.data[cap]['version'])
errorList.append({"capName":cap, "capInfo":capInfo, "serverVersion":self.data[cap]})
if len(missingCaps):
for (cap, capInfo) in missingCaps:
errString = errString + "Needs %s but server does not support that capability\n" % (cap)
errorList.append({"capName":cap, "capInfo":capInfo, "serverVersion":""})
if len(errString):
raise up2dateErrors.ServerCapabilityError(errString, errorList)
def hasCapability(self, capability, version=None):
"""Checks if the server supports a capability and optionally a version.
Returns True or False.
This complements the neededCaps mechanism provided by this module.
Using hasCapability makes it easier to do something only if the server
supports it or to put workaround code in the user of this class. The
neededCaps mechanism makes it easier to put workaround code in this
module, which makes sense if it is to be shared.
'capability' should be a string such as 'registration.foobar'. It can
be a capability in 'neededCaps' above or one that isn't there. 'version'
can be a string (where isdigit() is True) or an int.
"""
assert version is None or str(version).isdigit()
if not capability in self.data:
return False
if version:
data = self.data[capability]
if int(version) not in self.parseCapVersion(data['version']):
return False
return True |
5,720 | properties | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetDraResult',
'AwaitableGetDraResult',
'get_dra',
'get_dra_output',
]
@pulumi.output_type
class GetDraResult:
"""
Dra model.
"""
def __init__(__self__, id=None, name=None, METHOD_NAME=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", METHOD_NAME)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Gets or sets the Id of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Gets or sets the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def METHOD_NAME(self) -> 'outputs.DraModelPropertiesResponse':
"""
Dra model properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.DraModelResponseSystemData':
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Gets or sets the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetDraResult(GetDraResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDraResult(
id=self.id,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
system_data=self.system_data,
type=self.type)
def get_dra(fabric_agent_name: Optional[str] = None,
fabric_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDraResult:
"""
Gets the details of the fabric agent.
Azure REST API version: 2021-02-16-preview.
:param str fabric_agent_name: The fabric agent (Dra) name.
:param str fabric_name: The fabric name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['fabricAgentName'] = fabric_agent_name
__args__['fabricName'] = fabric_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:datareplication:getDra', __args__, opts=opts, typ=GetDraResult).value
return AwaitableGetDraResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_dra)
def get_dra_output(fabric_agent_name: Optional[pulumi.Input[str]] = None,
fabric_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDraResult]:
"""
Gets the details of the fabric agent.
Azure REST API version: 2021-02-16-preview.
:param str fabric_agent_name: The fabric agent (Dra) name.
:param str fabric_name: The fabric name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
5,721 | synchronizer | """opentrons.hardware_control.scripts.repl - cli for hc api
Running this script will create and spin up a hardware controller
and expose it to a python commandline.
"""
import os
from functools import wraps
import asyncio
import logging
from logging.config import dictConfig
from opentrons.hardware_control.api import API
from opentrons.hardware_control.ot3api import OT3API
update_firmware = True
has_robot_server = True
if os.environ.get("OPENTRONS_SIMULATION"):
print("Running with simulators")
has_robot_server = False
if os.environ.get("OT2", None):
print(
'"OT2" env var detected, running with OT2 HC. '
"If you dont want this, remove the OT2 env var"
)
os.environ["OT_API_FF_enableOT3HardwareController"] = "false"
else:
print("Running with OT3 HC. If you dont want this, set an env var named 'OT2'")
os.environ["OT_API_FF_enableOT3HardwareController"] = "true"
if os.environ.get("OT3_DISABLE_FW_UPDATES"):
update_firmware = False
print("OT3 firmware updates are disabled")
from code import interact # noqa: E402
from subprocess import run # noqa: E402
from typing import Union, Type, Any # noqa: E402
from opentrons.types import Mount, Point # noqa: E402
from opentrons.config import feature_flags as ff # noqa: E402
from opentrons.hardware_control.modules.types import ModuleType # noqa: E402
from opentrons.hardware_control.types import ( # noqa: E402
Axis,
OT3Mount,
SubSystem,
GripperProbe,
CriticalPoint,
)
from opentrons.hardware_control.ot3_calibration import ( # noqa: E402
calibrate_pipette,
calibrate_belts,
delete_belt_calibration_data,
calibrate_gripper_jaw,
calibrate_module,
find_calibration_structure_height,
find_edge_binary,
CalibrationMethod,
find_axis_center,
gripper_pin_offsets_mean,
)
from opentrons.hardware_control.thread_manager import ThreadManager # noqa: E402
log = logging.getLogger(__name__)
LOG_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"basic": {"format": "%(asctime)s %(name)s %(levelname)s %(message)s"}
},
"handlers": {
"file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"formatter": "basic",
"filename": "/var/log/repl.log",
"maxBytes": 5000000,
"level": logging.INFO,
"backupCount": 3,
},
},
"loggers": {
"": {
"handlers": ["file_handler"],
"level": logging.INFO,
},
},
}
if ff.enable_ot3_hardware_controller():
HCApi: Union[Type[OT3API], Type[API]] = OT3API
def build_thread_manager() -> ThreadManager[Union[API, OT3API]]:
return ThreadManager(
OT3API.build_hardware_controller,
use_usb_bus=ff.rear_panel_integration(),
update_firmware=update_firmware,
)
def wrap_async_util_fn(fn: Any, *bind_args: Any, **bind_kwargs: Any) -> Any:
@wraps(fn)
def METHOD_NAME(*args: Any, **kwargs: Any) -> Any:
return asyncio.new_event_loop().run_until_complete(
fn(*bind_args, *args, **bind_kwargs, **kwargs)
)
return METHOD_NAME
else:
HCApi = API
def build_thread_manager() -> ThreadManager[Union[API, OT3API]]:
return ThreadManager(
API.build_hardware_controller,
use_usb_bus=ff.rear_panel_integration(),
update_firmware=update_firmware,
)
logging.basicConfig(level=logging.INFO)
def stop_server() -> None:
run(["systemctl", "stop", "opentrons-robot-server"])
def build_api() -> ThreadManager[Union[API, OT3API]]:
# NOTE: We are using StreamHandler so when the hw controller is
# being built we can log firmware update progress to stdout.
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
logging.getLogger().addHandler(stream_handler)
tm = build_thread_manager()
logging.getLogger().removeHandler(stream_handler)
tm.managed_thread_ready_blocking()
if update_firmware:
async def _do_update() -> None:
async for update in tm.update_firmware():
print(f"Update: {update.subsystem.name}: {update.progress}%")
asyncio.run(_do_update())
return tm
def do_interact(api: ThreadManager[Union[API, OT3API]]) -> None:
interact(
banner=(
"Hardware Control API REPL\nCall methods on api like "
"api.move_to(Mount.RIGHT, Point(400, 400, 500))"
),
local={
"api": api.sync,
"Mount": Mount,
"Point": Point,
"Axis": Axis,
"OT3Mount": OT3Mount,
"SubSystem": SubSystem,
"GripperProbe": GripperProbe,
"ModuleType": ModuleType,
"find_edge": wrap_async_util_fn(find_edge_binary, api),
"find_calibration_structure_height": wrap_async_util_fn(
find_calibration_structure_height, api
),
"calibrate_pipette": wrap_async_util_fn(calibrate_pipette, api),
"calibrate_belts": wrap_async_util_fn(calibrate_belts, api),
"delete_belt_calibration_data": delete_belt_calibration_data,
"calibrate_gripper": wrap_async_util_fn(calibrate_gripper_jaw, api),
"calibrate_module": wrap_async_util_fn(calibrate_module, api),
"gripper_pin_offsets_mean": gripper_pin_offsets_mean,
"CalibrationMethod": CalibrationMethod,
"find_axis_center": wrap_async_util_fn(find_axis_center, api),
"CriticalPoint": CriticalPoint,
},
)
if __name__ == "__main__":
dictConfig(LOG_CONFIG)
if has_robot_server:
stop_server()
api_tm = build_api()
do_interact(api_tm)
api_tm.clean_up() |
5,722 | test indirect inputs | #
# Copyright 2017-2023 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Run API."""
from pathlib import Path
import yaml
from renku.core.workflow.plan_factory import (
get_indirect_inputs_path,
get_indirect_outputs_path,
read_indirect_parameters,
)
from renku.ui.api import Input, Output, Parameter, Project
def METHOD_NAME(project):
"""Test defining indirect inputs."""
path_1 = "/some/absolute/path"
path_2 = "relative/path"
path_3 = "a/path with white-spaces"
input_1 = Input("input-1", path_1)
with Project() as project:
input_2 = Input("input-2", path_2)
input_3 = Input("input-3", path_3)
assert Path(path_1) == input_1.path
assert Path(path_2) == input_2.path
assert Path(path_3) == input_3.path
content = get_indirect_inputs_path(project.path).read_text()
assert {path_1, path_2, path_3} == set(yaml.safe_load(content).values())
assert {input_1.name, input_2.name, input_3.name} == set(yaml.safe_load(content).keys())
def test_indirect_outputs(project):
"""Test defining indirect outputs."""
path_1 = "/some/absolute/path"
path_2 = "relative/path"
path_3 = "a/path with white-spaces"
output_1 = Output("output-1", path_1)
with Project() as project:
output_2 = Output("output-2", path_2)
output_3 = Output("output-3", path_3)
assert Path(path_1) == output_1.path
assert Path(path_2) == output_2.path
assert Path(path_3) == output_3.path
content = get_indirect_outputs_path(project.path).read_text()
assert {path_1, path_2, path_3} == set(yaml.safe_load(content).values())
assert {output_1.name, output_2.name, output_3.name} == set(yaml.safe_load(content).keys())
def test_indirect_inputs_outputs(project):
"""Test defining indirect inputs and outputs together."""
path_1 = "/some/absolute/path"
path_2 = "relative/path"
input_1 = Input("input-1", path_1)
output_2 = Output("output-1", path_2)
assert Path(path_1) == input_1.path
assert Path(path_2) == output_2.path
input_content = get_indirect_inputs_path(project.path).read_text()
output_content = get_indirect_outputs_path(project.path).read_text()
assert path_1 == list(yaml.safe_load(input_content).values())[0]
assert input_1.name == list(yaml.safe_load(input_content).keys())[0]
assert path_2 == list(yaml.safe_load(output_content).values())[0]
assert output_2.name == list(yaml.safe_load(output_content).keys())[0]
def test_open_inputs(project):
"""Test inputs can be passed to open function."""
with open(Input("input-1", "input.txt"), "w") as f:
f.write("some data")
assert "some data" == (project.path / "input.txt").read_text()
def test_open_outputs(project):
"""Test outputs can be passed to open function."""
with open(Output("output-1", "output.txt"), "w") as f:
f.write("some data")
assert "some data" == (project.path / "output.txt").read_text()
def test_parameters(project):
"""Test defining parameters."""
p1 = Parameter("parameter-1", 42)
with Project():
p2 = Parameter("param-2", "42")
p3 = Parameter("parameter_3", 42.42)
assert (42, "42", 42.42) == (p1.value, p2.value, p3.value)
_ = Parameter("parameter_3", 42.42)
data = read_indirect_parameters(project.path)
assert {"parameter-1", "param-2", "parameter_3"} == set(data.keys())
assert {42, "42", 42.42} == set(data.values()) |
5,723 | get read group | # ===========================================================================
#
# PUBLIC DOMAIN NOTICE
# National Center for Biotechnology Information
#
# This software/database is a "United States Government Work" under the
# terms of the United States Copyright Act. It was written as part of
# the author's official duties as a United States Government employee and
# thus cannot be copyrighted. This software/database is freely available
# to the public for use. The National Library of Medicine and the U.S.
# Government have not placed any restriction on its use or reproduction.
#
# Although all reasonable efforts have been taken to ensure the accuracy
# and reliability of the software and data, the NLM and the U.S.
# Government do not and cannot warrant the performance or results that
# may be obtained by using this software or data. The NLM and the U.S.
# Government disclaim all warranties, express or implied, including
# warranties of performance, merchantability or fitness for any particular
# purpose.
#
# Please cite the author in any work or product based on this material.
#
# ===========================================================================
#
#
from ctypes import byref, c_uint32, c_int32
from . import NGS
from .String import NGS_String, NGS_RawString, getNGSString, getNGSValue
from .FragmentIterator import FragmentIterator
# Read
# represents an NGS machine read
# having some number of biological Fragments
class Read(FragmentIterator):
fullyAligned = 1
partiallyAligned = 2
aligned = fullyAligned | partiallyAligned
unaligned = 4
all = aligned | unaligned
def getReadId(self):
return getNGSString(self, NGS.lib_manager.PY_NGS_ReadGetReadId)
# ----------------------------------------------------------------------
# Fragment
def getNumFragments(self):
return getNGSValue(self, NGS.lib_manager.PY_NGS_ReadGetNumFragments, c_uint32)
def fragmentIsAligned(self, fragIdx):
"""
:param: fragIdx is zero-based and non-negative fragment index
:returns: true if a fragment is aligned
"""
ret = c_int32()
ngs_str_err = NGS_RawString()
try:
res = NGS.lib_manager.PY_NGS_ReadFragmentIsAligned(self.ref, fragIdx, byref(ret), byref(ngs_str_err.ref))
finally:
ngs_str_err.close()
return bool(ret.value)
# ----------------------------------------------------------------------
# read details
# ReadCategory
def getReadCategory(self):
return getNGSValue(self, NGS.lib_manager.PY_NGS_ReadGetReadCategory, c_uint32)
def METHOD_NAME(self):
return getNGSString(self, NGS.lib_manager.PY_NGS_ReadGetReadGroup)
def getReadName(self):
return getNGSString(self, NGS.lib_manager.PY_NGS_ReadGetReadName)
def getReadBases(self, offset=0, length=-1):
"""
:param: offset is zero-based and non-negative
:param: length must be >= 0
:returns: sequence bases
"""
ngs_str_err = NGS_RawString()
try:
ngs_str_seq = NGS_String()
try:
res = NGS.lib_manager.PY_NGS_ReadGetReadBases(self.ref, offset, length, byref(ngs_str_seq.ref), byref(ngs_str_err.ref))
return ngs_str_seq.getPyString()
finally:
ngs_str_seq.close()
finally:
ngs_str_err.close()
def getReadQualities(self, offset=0, length=-1):
"""
:param: offset is zero-based and non-negative
:param: length must be >= 0
:returns: phred quality values using ASCII offset of 33
"""
ngs_str_err = NGS_RawString()
try:
ngs_str_seq = NGS_String()
try:
res = NGS.lib_manager.PY_NGS_ReadGetReadQualities(self.ref, offset, length, byref(ngs_str_seq.ref), byref(ngs_str_err.ref))
return ngs_str_seq.getPyString()
finally:
ngs_str_seq.close()
finally:
ngs_str_err.close() |
5,724 | test need foreign | from leapp.models import MultipathConfFacts8to9, MultipathConfig8to9
from leapp.reporting import Report
def _assert_foreign_report(report):
assert report['title'] == \
'device-mapper-multipath now defaults to ignoring foreign devices'
assert report['severity'] == 'info'
def _assert_allow_usb_report(report):
assert report['title'] == \
'device-mapper-multipath now defaults to ignoring USB devices'
assert report['severity'] == 'info'
def _assert_invalid_regexes_report(report, paths_str):
assert report['title'] == \
'device-mapper-multipath no longer accepts "*" as a valid regular expression'
assert report['severity'] == 'info'
assert paths_str in report['summary']
def _build_config(pathname, config_dir, enable_foreign_exists, invalid_regexes_exist, allow_usb_exists):
return MultipathConfig8to9(
pathname=pathname,
config_dir=config_dir,
enable_foreign_exists=enable_foreign_exists,
invalid_regexes_exist=invalid_regexes_exist,
allow_usb_exists=allow_usb_exists,
)
def _build_facts(confs):
return MultipathConfFacts8to9(configs=confs)
def test_need_everything(current_actor_context):
config = _build_config('need_everything.conf', None, False, True, False)
facts = _build_facts([config])
current_actor_context.feed(facts)
current_actor_context.run()
reports = list(current_actor_context.consume(Report))
assert reports and len(reports) == 3
_assert_foreign_report(reports[0].report)
_assert_allow_usb_report(reports[1].report)
_assert_invalid_regexes_report(reports[2].report, 'need_everything.conf')
def test_need_nothing(current_actor_context):
config = _build_config('need_nothing.conf', '/etc/multipath/conf.d', True, False, True)
facts = _build_facts([config])
current_actor_context.feed(facts)
current_actor_context.run()
reports = current_actor_context.consume(Report)
assert not reports
def METHOD_NAME(current_actor_context):
config = _build_config('need_foreign.conf', None, False, False, True)
facts = _build_facts([config])
current_actor_context.feed(facts)
current_actor_context.run()
reports = list(current_actor_context.consume(Report))
assert reports and len(reports) == 1
_assert_foreign_report(reports[0].report)
def test_need_allos_usb(current_actor_context):
config = _build_config('need_allow_usb.conf', None, True, False, False)
facts = _build_facts([config])
current_actor_context.feed(facts)
current_actor_context.run()
reports = list(current_actor_context.consume(Report))
assert reports and len(reports) == 1
_assert_allow_usb_report(reports[0].report)
def test_invalid_regexes(current_actor_context):
config1 = _build_config('invalid_regexes1.conf', None, True, True, True)
config2 = _build_config('no_invalid_regexes.conf', None, True, False, True)
config3 = _build_config('invalid_regexes2.conf', None, True, True, True)
facts = _build_facts([config1, config2, config3])
current_actor_context.feed(facts)
current_actor_context.run()
reports = list(current_actor_context.consume(Report))
assert reports and len(reports) == 1
_assert_invalid_regexes_report(reports[0].report, 'invalid_regexes1.conf and invalid_regexes2.conf')
def test_not_in_main_conf(current_actor_context):
main_conf = _build_config('main.conf', '/etc/multipath/conf.d', False, True, False)
other_conf = _build_config('other.conf', None, True, False, True)
facts = _build_facts([main_conf, other_conf])
current_actor_context.feed(facts)
current_actor_context.run()
reports = list(current_actor_context.consume(Report))
assert reports and len(reports) == 1
_assert_invalid_regexes_report(reports[0].report, 'main.conf')
def test_in_main_conf(current_actor_context):
main_conf = _build_config('main.conf', '/etc/multipath/conf.d', True, True, True)
other_conf = _build_config('other.conf', None, False, False, False)
next_conf = _build_config('next.conf', None, False, True, False)
last_conf = _build_config('last.conf', None, False, True, False)
facts = _build_facts([main_conf, other_conf, next_conf, last_conf])
current_actor_context.feed(facts)
current_actor_context.run()
reports = list(current_actor_context.consume(Report))
assert reports and len(reports) == 1
_assert_invalid_regexes_report(reports[0].report, 'main.conf, next.conf and last.conf')
def test_in_none_conf(current_actor_context):
main_conf = _build_config('main.conf', '/etc/multipath/conf.d', False, False, False)
other_conf = _build_config('other.conf', None, False, False, False)
facts = _build_facts([main_conf, other_conf])
current_actor_context.feed(facts)
current_actor_context.run()
reports = list(current_actor_context.consume(Report))
assert reports and len(reports) == 2
_assert_foreign_report(reports[0].report)
_assert_allow_usb_report(reports[1].report)
def test_mixed_conf(current_actor_context):
main_conf = _build_config('main.conf', None, True, False, False)
next_conf = _build_config('next.conf', None, False, True, False)
last_conf = _build_config('last.conf', None, True, False, False)
facts = _build_facts([main_conf, next_conf, last_conf])
current_actor_context.feed(facts)
current_actor_context.run()
reports = list(current_actor_context.consume(Report))
assert reports and len(reports) == 2
_assert_allow_usb_report(reports[0].report)
_assert_invalid_regexes_report(reports[1].report, 'next.conf') |
5,725 | test refresh token | """ This is a test of the AuthDB. Requires authlib
It supposes that the DB is present and installed in DIRAC
"""
import time
import DIRAC
DIRAC.initialize() # Initialize configuration
from authlib.jose import JsonWebKey, JsonWebSignature, jwt, RSAKey
from authlib.common.encoding import json_b64encode, urlsafe_b64decode, json_loads
from DIRAC.FrameworkSystem.DB.AuthDB import AuthDB
db = AuthDB()
payload = {
"sub": "user",
"iss": "issuer",
"iat": int(time.time()),
"exp": int(time.time()) + (12 * 3600),
"scope": "scope",
"setup": "setup",
"group": "my_group",
}
def METHOD_NAME():
"""Try to revoke/save/get refresh tokens"""
DToken = dict(
access_token=jwt.encode({"alg": "HS256"}, payload, "secret").decode("utf-8"),
refresh_token=jwt.encode({"alg": "HS256"}, payload, "secret").decode("utf-8"),
expires_at=int(time.time()) + 3600,
)
New_DToken = dict(
access_token=jwt.encode({"alg": "HS256"}, payload, "secret").decode("utf-8"),
refresh_token=jwt.encode({"alg": "HS256"}, payload, "secret").decode("utf-8"),
expires_in=int(time.time()) + 3600,
)
preset_jti = "123"
# Remove refresh token
result = db.revokeRefreshToken(preset_jti)
assert result["OK"], result["Message"]
# Store tokens
result = db.storeRefreshToken(DToken.copy(), preset_jti)
assert result["OK"], result["Message"]
assert result["Value"]["jti"] == preset_jti
assert result["Value"]["iat"] <= int(time.time())
result = db.storeRefreshToken(New_DToken.copy())
assert result["OK"], result["Message"]
assert result["Value"]["jti"]
assert result["Value"]["iat"] <= int(time.time())
token_id = result["Value"]["jti"]
issued_at = result["Value"]["iat"]
# Check token
result = db.getCredentialByRefreshToken(preset_jti)
assert result["OK"], result["Message"]
assert result["Value"]["jti"] == preset_jti
assert result["Value"]["access_token"] == DToken["access_token"]
assert result["Value"]["refresh_token"] == DToken["refresh_token"]
result = db.getCredentialByRefreshToken(token_id)
assert result["OK"], result["Message"]
assert result["Value"]["jti"] == token_id
assert int(result["Value"]["issued_at"]) == issued_at
assert result["Value"]["access_token"] == New_DToken["access_token"]
assert result["Value"]["refresh_token"] == New_DToken["refresh_token"]
# Check token after request
for jti in [token_id, preset_jti]:
result = db.getCredentialByRefreshToken(jti)
assert result["OK"], result["Message"]
assert not result["Value"]
# Renew tokens
result = db.storeRefreshToken(New_DToken.copy(), token_id)
assert result["OK"], result["Message"]
# Revoke token
result = db.revokeRefreshToken(token_id)
assert result["OK"], result["Message"]
# Check token
result = db.getCredentialByRefreshToken(token_id)
assert result["OK"], result["Message"]
assert not result["Value"]
def test_keys():
"""Try to store/get/remove keys"""
# JWS
jws = JsonWebSignature(algorithms=["RS256"])
code_payload = {
"user_id": "user",
"scope": "scope",
"client_id": "client",
"redirect_uri": "redirect_uri",
"code_challenge": "code_challenge",
}
# Token metadata
header = {"alg": "RS256"}
payload = {"sub": "user", "iss": "issuer", "scope": "scope", "setup": "setup", "group": "my_group"}
# Remove all keys
result = db.removeKeys()
assert result["OK"], result["Message"]
# Check active keys
result = db.getActiveKeys()
assert result["OK"], result["Message"]
assert result["Value"] == []
# Create new one
result = db.getPrivateKey()
assert result["OK"], result["Message"]
private_key = result["Value"]
assert isinstance(private_key, RSAKey)
# Sign token
header["kid"] = private_key.thumbprint()
# Find key by KID
result = db.getPrivateKey(header["kid"])
assert result["OK"], result["Message"]
# as_dict has no arguments for authlib < 1.0.0
# for authlib >= 1.0.0:
assert result["Value"].as_dict(True) == private_key.as_dict(True)
# Sign token
token = jwt.encode(header, payload, private_key)
# Sign auth code
code = jws.serialize_compact(header, json_b64encode(code_payload), private_key)
# Get public key set
result = db.getKeySet()
keyset = result["Value"]
assert result["OK"], result["Message"]
# as_dict has no arguments for authlib < 1.0.0
# for authlib >= 1.0.0:
assert bool([key for key in keyset.as_dict(True)["keys"] if key["kid"] == header["kid"]])
# Read token
_payload = jwt.decode(token, JsonWebKey.import_key_set(keyset.as_dict()))
assert _payload == payload
# Read auth code
data = jws.deserialize_compact(code, keyset.keys[0])
_code_payload = json_loads(urlsafe_b64decode(data["payload"]))
assert _code_payload == code_payload
def test_Sessions():
"""Try to store/get/remove Sessions"""
# Example of the new session metadata
sData1 = {
"client_id": "DIRAC_CLI",
"device_code": "SsoGTDglu6LThpx0CigM9i9J72B5atZ24ULr6R1awm",
"expires_in": 1800,
"id": "SsoGTDglu6LThpx0CigM9i9J72B5atZ24ULr6R1awm",
"interval": 5,
"scope": "g:my_group",
"uri": "https://domain.com/auth/device?&response_type=device&client_id=DIRAC_CLI&scope=g:my_group",
"user_code": "MDKP-MXMF",
"verification_uri": "https://domain.com/auth/device",
"verification_uri_complete": "https://domain.com/auth/device?user_code=MDKP-MXMF",
}
# Example of the updated session
sData2 = {
"client_id": "DIRAC_CLI",
"device_code": "SsoGTDglu6LThpx0CigM9i9J72B5atZ24ULr6R1awm",
"expires_in": 1800,
"id": "SsoGTDglu6LThpx0CigM9i9J72B5atZ24ULr6R1awm",
"interval": 5,
"scope": "g:my_group",
"uri": "https://domain.com/auth/device?&response_type=device&client_id=DIRAC_CLI&scope=g:my_group",
"user_code": "MDKP-MXMF",
"verification_uri": "https://domain.com/auth/device",
"verification_uri_complete": "https://domain.com/auth/device?user_code=MDKP-MXMF",
"user_id": "username",
}
# Remove old session
db.removeSession(sData1["id"])
# Add session
result = db.addSession(sData1)
assert result["OK"], result["Message"]
# Get session
result = db.getSessionByUserCode(sData1["user_code"])
assert result["OK"], result["Message"]
assert result["Value"]["device_code"] == sData1["device_code"]
assert result["Value"].get("user_id") != sData2["user_id"]
# Update session
result = db.updateSession(sData2, sData1["id"])
assert result["OK"], result["Message"]
# Get session
result = db.getSession(sData2["id"])
assert result["OK"], result["Message"]
assert result["Value"]["device_code"] == sData2["device_code"]
assert result["Value"]["user_id"] == sData2["user_id"]
# Remove session
result = db.removeSession(sData2["id"])
assert result["OK"], result["Message"]
# Make sure that the session is absent
result = db.getSession(sData2["id"])
assert result["OK"], result["Message"]
assert not result["Value"] |
5,726 | test data frame listoffset listoffset double | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import numpy as np # noqa: F401
import pytest
import awkward as ak
ROOT = pytest.importorskip("ROOT")
ROOT.ROOT.EnableImplicitMT(1)
compiler = ROOT.gInterpreter.Declare
def test_data_frame_integers():
ak_array_in = ak.Array([1, 2, 3, 4, 5])
data_frame = ak.to_rdataframe({"x": ak_array_in})
assert data_frame.GetColumnType("x") == "int64_t"
ak_array_out = ak.from_rdataframe(
data_frame,
columns=("x",),
)
assert ak_array_in.to_list() == ak_array_out["x"].to_list()
def test_data_frame_double():
ak_array_in = ak.Array([1.1, 2.2, 3.3, 4.4, 5.5])
data_frame = ak.to_rdataframe({"x": ak_array_in})
assert data_frame.GetColumnType("x") == "double"
ak_array_out = ak.from_rdataframe(
data_frame,
columns=("x",),
)
assert ak_array_in.to_list() == ak_array_out["x"].to_list()
def test_data_frame_char():
ak_array_in = ak.Array(["a", "b", "c", "d", "e"])
data_frame = ak.to_rdataframe({"x": ak_array_in})
assert data_frame.GetColumnType("x") == "std::string"
ak_array_out = ak.from_rdataframe(
data_frame,
columns=("x",),
)
assert ak_array_in.to_list() == ak_array_out["x"].to_list()
def test_data_frame_complex():
ak_array_in = ak.Array([1.1 + 0.1j, 2.2 + 0.2j, 3.3 + 0.3j, 4.4 + 0.4j, 5.5 + 0.5j])
data_frame = ak.to_rdataframe({"x": ak_array_in})
assert data_frame.GetColumnType("x") == "std::complex<double>"
ak_array_out = ak.from_rdataframe(
data_frame,
columns=("x",),
)
assert ak_array_in.to_list() == ak_array_out["x"].to_list()
def test_data_frame_listoffset_integers():
ak_array_in = ak.Array([[1], [2, 3, 4], [5]])
data_frame = ak.to_rdataframe({"x": ak_array_in})
assert data_frame.GetColumnType("x") == "ROOT::VecOps::RVec<int64_t>"
ak_array_out = ak.from_rdataframe(
data_frame,
columns=("x",),
)
assert ak_array_in.to_list() == ak_array_out["x"].to_list()
def METHOD_NAME():
ak_array_in = ak.Array(
[
[[1.1, 2.2, 3.3]],
[[4.4, 5.5]],
[[6.6], [7.7, 8.8, 9.9]],
]
)
data_frame = ak.to_rdataframe({"x": ak_array_in})
# awkward::ListArray_ type
# assert data_frame.GetColumnType("x") == "ROOT::VecOps::RVec<double>"
ak_array_out = ak.from_rdataframe(
data_frame,
columns=("x",),
)
assert ak_array_in.to_list() == ak_array_out["x"].to_list()
def test_data_frame_vec_of_vec():
array = ak.Array(
[
[
{"x": 1.1, "y": [1]},
{"x": None, "y": [1, 2]},
{"x": 3.3, "y": [1, 2, 3]},
],
[],
[{"x": None, "y": [1, 2, 3, 4]}, {"x": 5.5, "y": [1, 2, 3, 4, 5]}],
]
)
# ] * 10000)
rdf2 = ak.to_rdataframe({"array": array})
# We create a matrix RxC here
# Note when dimensions R and C are large, the following code suffers
# from potential performance penalties caused by frequent reallocation
# of memory by the push_back() function. This should be used only when
# vector dimensions are not known in advance.
rdf3 = rdf2.Define(
"output",
"""
std::vector<std::vector<double>> tmp1;
for (auto record : array) {
std::vector<double> tmp2;
for (auto number : record.y()) {
tmp2.push_back(number * number);
}
tmp1.push_back(tmp2);
}
return tmp1;
""",
)
assert rdf3.GetColumnType("output") == "vector<vector<double> >"
out = ak.from_rdataframe(
rdf3,
columns=("output",),
)
assert out["output"].to_list() == (array["y"] * array["y"] * 1.0).to_list()
rdf3 = rdf2.Define(
"output2",
"""
std::vector<std::vector<std::vector<double>>> tmp1;
for (auto record : array) {
std::vector<std::vector<double>> tmp2;
// we can check if it's None:
// if (record.x().has_value())
// or set it to 1 so that we do not scale:
double x_number = record.x().value_or(1);
for (auto number : record.y()) {
std::vector<double> tmp3;
for (int64_t i = 0; i < std::rint(x_number); i++) {
double value = x_number * number;
tmp3.push_back(value);
}
tmp2.push_back(tmp3);
}
tmp1.push_back(tmp2);
}
return tmp1;
""",
)
assert rdf3.GetColumnType("output2") == "vector<vector<vector<double> > >"
out = ak.from_rdataframe(
rdf3,
columns=("output2",),
)
result = ak.Array(
[
[
[[1.1]], # "x" is 1 - "y" values are unchanged, and each is nesed
[
[1.0],
[2.0],
], # "x" is None - "y" values are unchanged, and each is nesed
[
[3.3, 3.3, 3.3],
[6.6, 6.6, 6.6],
[9.899999999999999, 9.899999999999999, 9.899999999999999],
], # "x" is 3.3 - "y" values are scaled by 3.3 and each is nesed 3 times
],
[],
[
[
[1.0],
[2.0],
[3.0],
[4.0],
], # "x" is None - "y" values are unchanged, and each is nesed
[
[5.5, 5.5, 5.5, 5.5, 5.5, 5.5],
[11.0, 11.0, 11.0, 11.0, 11.0, 11.0],
[16.5, 16.5, 16.5, 16.5, 16.5, 16.5],
[22.0, 22.0, 22.0, 22.0, 22.0, 22.0],
[27.5, 27.5, 27.5, 27.5, 27.5, 27.5],
], # "x" is 5.5 - "y" values are scaled by 5.5 and each is nesed 5 times
],
]
)
assert out["output2"].to_list() == result.to_list() |
5,727 | test transform chain | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from numpy.testing import assert_allclose
import pytest
import vispy.visuals.transforms as tr
from vispy.geometry import Rect
from vispy.testing import run_tests_if_main
NT = tr.NullTransform
ST = tr.STTransform
AT = tr.MatrixTransform
RT = tr.MatrixTransform
PT = tr.PolarTransform
LT = tr.LogTransform
CT = tr.ChainTransform
def assert_chain_types(chain, types):
assert list(map(type, chain.transforms)) == types
def assert_chain_objects(chain1, chain2):
assert chain1.transforms == chain2.transforms
def test_multiplication():
n = NT()
s = ST()
a = AT()
p = PT()
log_trans = LT()
c1 = CT([s, a, p])
assert c1
c2 = CT([s, a, s])
assert isinstance(n * n, NT)
assert isinstance(n * s, ST)
assert isinstance(s * s, ST)
assert isinstance(a * s, AT)
assert isinstance(a * a, AT)
assert isinstance(s * a, AT)
assert isinstance(n * p, PT)
assert isinstance(s * p, CT)
assert isinstance(a * p, CT)
assert isinstance(p * a, CT)
assert isinstance(p * s, CT)
assert_chain_types(p * a, [PT, AT])
assert_chain_types(p * s, [PT, ST])
assert_chain_types(s * p, [ST, PT])
assert_chain_types(s * p * a, [ST, PT, AT])
assert_chain_types(s * a * p, [AT, PT])
assert_chain_types(p * s * a, [PT, ST, AT])
assert_chain_types(s * p * s, [ST, PT, ST])
assert_chain_types(s * a * p * s * a, [AT, PT, ST, AT])
assert_chain_types(c2 * a, [ST, AT, ST, AT])
assert_chain_types(p * log_trans * s, [PT, LT, ST])
def METHOD_NAME():
# Make dummy classes for easier distinguishing the transforms
class DummyTrans(tr.BaseTransform):
glsl_map = "vec4 trans(vec4 pos) {return pos;}"
glsl_imap = "vec4 trans(vec4 pos) {return pos;}"
class TransA(DummyTrans):
pass
class TransB(DummyTrans):
pass
class TransC(DummyTrans):
pass
# Create test transforms
a, b, c = TransA(), TransB(), TransC()
# Test Chain creation
assert tr.ChainTransform().transforms == []
assert tr.ChainTransform(a).transforms == [a]
assert tr.ChainTransform(a, b).transforms == [a, b]
assert tr.ChainTransform(a, b, c, a).transforms == [a, b, c, a]
# Test composition by multiplication
assert_chain_objects(a * b, tr.ChainTransform(a, b))
assert_chain_objects(a * b * c, tr.ChainTransform(a, b, c))
assert_chain_objects(a * b * c * a, tr.ChainTransform(a, b, c, a))
# Test adding/prepending to transform
chain = tr.ChainTransform()
chain.append(a)
assert chain.transforms == [a]
chain.append(b)
assert chain.transforms == [a, b]
chain.append(c)
assert chain.transforms == [a, b, c]
chain.prepend(b)
assert chain.transforms == [b, a, b, c]
chain.prepend(c)
assert chain.transforms == [c, b, a, b, c]
# Test simplifying
t1 = tr.STTransform(scale=(2, 3))
t2 = tr.STTransform(translate=(3, 4))
t3 = tr.STTransform(translate=(3, 4))
# Create multiplied versions
t123 = t1*t2*t3
t321 = t3*t2*t1
c123 = tr.ChainTransform(t1, t2, t3)
c321 = tr.ChainTransform(t3, t2, t1)
c123s = c123.simplified
c321s = c321.simplified
#
assert isinstance(t123, tr.STTransform) # or the test is useless
assert isinstance(t321, tr.STTransform) # or the test is useless
assert isinstance(c123s, tr.ChainTransform) # or the test is useless
assert isinstance(c321s, tr.ChainTransform) # or the test is useless
# Test Mapping
t1 = tr.STTransform(scale=(2, 3))
t2 = tr.STTransform(translate=(3, 4))
chain1 = tr.ChainTransform(t1, t2)
chain2 = tr.ChainTransform(t2, t1)
#
assert chain1.transforms == [t1, t2] # or the test is useless
assert chain2.transforms == [t2, t1] # or the test is useless
#
m12 = (t1*t2).map((1, 1)).tolist()
m21 = (t2*t1).map((1, 1)).tolist()
m12_ = chain1.map((1, 1)).tolist()
m21_ = chain2.map((1, 1)).tolist()
#
# print(m12, m21, m12_, m21_)
assert m12 != m21
assert m12 == m12_
assert m21 == m21_
# Test shader map
t1 = tr.STTransform(scale=(2, 3))
t2 = tr.STTransform(translate=(3, 4))
chain = tr.ChainTransform(t1, t2)
#
funcs = chain.shader_map().dependencies()
funcsi = chain.shader_imap().dependencies()
#
assert t1.shader_map() in funcs
assert t2.shader_map() in funcs
assert t1.shader_imap() in funcsi
assert t2.shader_imap() in funcsi
def test_map_rect():
r = Rect((2, 7), (13, 19))
r1 = ST(scale=(2, 2), translate=(-10, 10)).map(r)
assert r1 == Rect((-6, 24), (26, 38))
def test_st_transform():
# Check that STTransform maps exactly like MatrixTransform
pts = np.random.normal(size=(10, 4))
scale = (1, 7.5, -4e-8)
translate = (1e6, 0.2, 0)
st = tr.STTransform(scale=scale, translate=translate)
at = tr.MatrixTransform()
at.scale(scale)
at.translate(translate)
assert np.allclose(st.map(pts), at.map(pts))
assert np.allclose(st.inverse.map(pts), at.inverse.map(pts))
def test_st_mapping():
p1 = [[5., 7.], [23., 8.]]
p2 = [[-1.3, -1.4], [1.1, 1.2]]
t = tr.STTransform()
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :len(p2)], p2)
def test_affine_mapping():
t = tr.MatrixTransform()
p1 = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
# test pure translation
p2 = p1 + 5.5
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :p2.shape[1]], p2)
# test pure scaling
p2 = p1 * 5.5
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :p2.shape[1]], p2)
# test scale + translate
p2 = (p1 * 5.5) + 3.5
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :p2.shape[1]], p2)
# test SRT
p2 = np.array([[10, 5, 3],
[10, 15, 3],
[30, 5, 3],
[10, 5, 3.5]])
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :p2.shape[1]], p2)
m = np.random.RandomState(0).normal(size=(4, 4))
transforms = [
NT(),
ST(scale=(1e-4, 2e5), translate=(10, -6e9)),
AT(m),
RT(m),
]
@pytest.mark.parametrize('trn', transforms)
def test_inverse(trn):
rng = np.random.RandomState(0)
N = 20
x = rng.normal(size=(N, 3))
pw = rng.normal(size=(N, 3), scale=3)
pos = x * 10 ** pw
assert_allclose(pos, trn.inverse.map(trn.map(pos))[:, :3], atol=1e-7)
# log transform only works on positive values
# abs_pos = np.abs(pos)
# tr = LT(base=(2, 4.5, 0))
# assert np.allclose(abs_pos, tr.inverse.map(tr.map(abs_pos))[:,:3])
run_tests_if_main() |
5,728 | get layout gnames | from ..faces import RectFace, TextFace
from collections import OrderedDict
__all__ = [ "get_layout_sciname", "get_layout_gnames", "get_layout_ogs_egg5",
"get_layout_evoltype", "get_layout_lca_rects" ]
def get_level(node, level=0):
if node.is_root:
return level
else:
return get_level(node.up, level + 1)
def get_layout_sciname():
def summary(nodes):
"Return a list of names summarizing the given list of nodes"
return list(OrderedDict((first_name(node), None) for node in nodes).keys())
def first_name(tree):
"Return the name of the first node that has a name"
sci_names = []
for node in tree.traverse('preorder'):
if node.is_leaf:
sci_name = node.props.get('sci_name')
sci_names.append(sci_name)
return next(iter(sci_names))
def layout_fn(node):
if node.is_leaf:
sci_name = node.props.get('sci_name')
name_seq = node.name.split('.',1)[1]
node.add_face(TextFace(sci_name, color = 'black', padding_x=2),
column=0, position="branch_right")
node.add_face(TextFace(name_seq, color = 'grey', padding_x=2),
column=1, position="branch_right")
else:
# Collapsed face
names = summary(node.children)
texts = names if len(names) < 6 else (names[:3] + ['...'] + names[-2:])
for i, text in enumerate(texts):
node.add_face(TextFace(text, padding_x=2),
position="branch_right", column=1, collapsed_only=True)
layout_fn.__name__ = 'Scientific name'
layout_fn.contains_aligned_face = True
return layout_fn
def METHOD_NAME():
def layout_fn(node):
if node.props.get('gname'):
gname= node.props.get('gname')
color = node.props.get('gname_color')
gname_face = TextFace(gname, color=color)
if node.is_leaf:
node.add_face(gname_face, column = 1, position = "aligned")
else:
node.add_face(gname_face, column = 1, position = "aligned", collapsed_only=True)
layout_fn.__name__ = 'Gene names'
layout_fn.contains_aligned_face = True
return layout_fn
def get_layout_ogs_egg5():
def layout_fn(node):
if node.props.get('og_egg5'):
color = node.props.get('og_egg5_color')
f = RectFace(10, 10, color=color)
if node.is_leaf:
node.add_face(f, column=2, position="aligned")
else:
node.add_face(f, column=2, position="aligned", collapsed_only=True)
layout_fn.__name__ = 'OGs egg5'
layout_fn.contains_aligned_face = True
return layout_fn
def get_layout_evoltype():
def layout_fn(node):
if not node.is_leaf:
if node.props.get('evoltype_2') == 'S':
node.sm_style["fgcolor"] = 'blue'
node.sm_style["size"] = 2
elif node.props.get('evoltype_2') == 'D':
node.sm_style["fgcolor"] = 'red'
node.sm_style["size"] = 2
elif node.props.get('evoltype_2') == 'FD':
node.sm_style["fgcolor"] = 'Coral'
node.sm_style["size"] = 2
if node.props.get('is_og'):
node.sm_style['size'] = 5
if node.up.props.get('lca'):
color = node.up.props.get('Lca_color')
else:
color = node.props.get('Lca_color')
node.sm_style["fgcolor"] = color
layout_fn.__name__ = 'Evolution events'
return layout_fn
def get_layout_lca_rects():
def layout_fn(node):
if node.props.get('lca_node_name'):
lca = node.props.get('lca_node_name')
color = node.props.get('Lca_color')
level = get_level(node)
lca_face = RectFace(15, float('inf'),
color=color,
text=lca,
fgcolor="white",
padding_x=1, padding_y=1)
lca_face.rotate_text = True
node.add_face(lca_face, position='aligned', column=level)
node.add_face(lca_face, position='aligned', column=level,
collapsed_only=True)
layout_fn.__name__ = 'Last common ancestor'
layout_fn.contains_aligned_face = True
return layout_fn |
5,729 | test weird floats | from enum import Enum, IntEnum
from math import isnan
from test.test_json import PyTest, CTest
SMALL = 1
BIG = 1<<32
HUGE = 1<<64
REALLY_HUGE = 1<<96
class BigNum(IntEnum):
small = SMALL
big = BIG
huge = HUGE
really_huge = REALLY_HUGE
E = 2.718281
PI = 3.141593
TAU = 2 * PI
class FloatNum(float, Enum):
e = E
pi = PI
tau = TAU
INF = float('inf')
NEG_INF = float('-inf')
NAN = float('nan')
class WierdNum(float, Enum):
inf = INF
neg_inf = NEG_INF
nan = NAN
class TestEnum:
def test_floats(self):
for enum in FloatNum:
self.assertEqual(self.dumps(enum), repr(enum.value))
self.assertEqual(float(self.dumps(enum)), enum)
self.assertEqual(self.loads(self.dumps(enum)), enum)
def METHOD_NAME(self):
for enum, expected in zip(WierdNum, ('Infinity', '-Infinity', 'NaN')):
self.assertEqual(self.dumps(enum), expected)
if not isnan(enum):
self.assertEqual(float(self.dumps(enum)), enum)
self.assertEqual(self.loads(self.dumps(enum)), enum)
else:
self.assertTrue(isnan(float(self.dumps(enum))))
self.assertTrue(isnan(self.loads(self.dumps(enum))))
def test_ints(self):
for enum in BigNum:
self.assertEqual(self.dumps(enum), str(enum.value))
self.assertEqual(int(self.dumps(enum)), enum)
self.assertEqual(self.loads(self.dumps(enum)), enum)
def test_list(self):
self.assertEqual(self.dumps(list(BigNum)),
str([SMALL, BIG, HUGE, REALLY_HUGE]))
self.assertEqual(self.loads(self.dumps(list(BigNum))),
list(BigNum))
self.assertEqual(self.dumps(list(FloatNum)),
str([E, PI, TAU]))
self.assertEqual(self.loads(self.dumps(list(FloatNum))),
list(FloatNum))
self.assertEqual(self.dumps(list(WierdNum)),
'[Infinity, -Infinity, NaN]')
self.assertEqual(self.loads(self.dumps(list(WierdNum)))[:2],
list(WierdNum)[:2])
self.assertTrue(isnan(self.loads(self.dumps(list(WierdNum)))[2]))
def test_dict_keys(self):
s, b, h, r = BigNum
e, p, t = FloatNum
i, j, n = WierdNum
d = {
s:'tiny', b:'large', h:'larger', r:'largest',
e:"Euler's number", p:'pi', t:'tau',
i:'Infinity', j:'-Infinity', n:'NaN',
}
nd = self.loads(self.dumps(d))
self.assertEqual(nd[str(SMALL)], 'tiny')
self.assertEqual(nd[str(BIG)], 'large')
self.assertEqual(nd[str(HUGE)], 'larger')
self.assertEqual(nd[str(REALLY_HUGE)], 'largest')
self.assertEqual(nd[repr(E)], "Euler's number")
self.assertEqual(nd[repr(PI)], 'pi')
self.assertEqual(nd[repr(TAU)], 'tau')
self.assertEqual(nd['Infinity'], 'Infinity')
self.assertEqual(nd['-Infinity'], '-Infinity')
self.assertEqual(nd['NaN'], 'NaN')
def test_dict_values(self):
d = dict(
tiny=BigNum.small,
large=BigNum.big,
larger=BigNum.huge,
largest=BigNum.really_huge,
e=FloatNum.e,
pi=FloatNum.pi,
tau=FloatNum.tau,
i=WierdNum.inf,
j=WierdNum.neg_inf,
n=WierdNum.nan,
)
nd = self.loads(self.dumps(d))
self.assertEqual(nd['tiny'], SMALL)
self.assertEqual(nd['large'], BIG)
self.assertEqual(nd['larger'], HUGE)
self.assertEqual(nd['largest'], REALLY_HUGE)
self.assertEqual(nd['e'], E)
self.assertEqual(nd['pi'], PI)
self.assertEqual(nd['tau'], TAU)
self.assertEqual(nd['i'], INF)
self.assertEqual(nd['j'], NEG_INF)
self.assertTrue(isnan(nd['n']))
class TestPyEnum(TestEnum, PyTest): pass
class TestCEnum(TestEnum, CTest): pass |
5,730 | test firewall rules not equal | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from ..azure_common import BaseTest, arm_template, cassette_name
class EventHubTest(BaseTest):
def test_event_hub_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-event-hub-compliance',
'resource': 'azure.eventhub'
}, validate=True)
self.assertTrue(p)
@arm_template('eventhub.json')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@cassette_name('firewall')
def test_firewall_rules_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'include': ['11.0.0.0/24']}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@cassette_name('firewall')
def test_firewall_rules_not_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'include': ['11.0.1.0/24']}],
})
resources = p.run()
self.assertEqual(len(resources), 0)
@cassette_name('firewall')
def test_firewall_rules_ranges(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'include': ['11.0.0.0-11.0.0.255']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@cassette_name('firewall')
def test_firewall_rules_not_ranges(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'include': ['11.0.1.0-11.0.1.255']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@cassette_name('firewall')
def test_firewall_rules_equal(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'equal': ['11.0.0.0/24', '10.1.1.1/32']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@cassette_name('firewall')
def METHOD_NAME(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'equal': ['11.0.1.0/24', '10.1.1.1/32']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources)) |
5,731 | get offset pairs | from __future__ import annotations
import logging
from typing import NamedTuple, TYPE_CHECKING
from analysis.YaraPluginBase import YaraBasePlugin
from helperFunctions.tag import TagColor
from ..internal.key_parser import read_asn1_key, read_pkcs_cert, read_ssl_cert
if TYPE_CHECKING:
from collections.abc import Callable
class Match(NamedTuple):
offset: int
label: str
matched_string: str
class AnalysisPlugin(YaraBasePlugin):
"""
Searches for known Crypto material (e.g., public and private keys)
"""
NAME = 'crypto_material'
DESCRIPTION = 'detects crypto material like SSH keys and SSL certificates'
VERSION = '0.5.2'
MIME_BLACKLIST = ['filesystem'] # noqa: RUF012
FILE = __file__
STARTEND = [ # noqa: RUF012
'PgpPublicKeyBlock',
'PgpPrivateKeyBlock',
'PgpPublicKeyBlock_GnuPG',
'genericPublicKey',
'SshRsaPrivateKeyBlock',
'SshEncryptedRsaPrivateKeyBlock',
'SSLPrivateKey',
]
STARTONLY = ['SshRsaPublicKeyBlock'] # noqa: RUF012
PKCS8 = 'Pkcs8PrivateKey'
PKCS12 = 'Pkcs12Certificate'
SSLCERT = 'SSLCertificate'
def process_object(self, file_object):
file_object = super().process_object(file_object)
yara_results = file_object.processed_analysis[self.NAME]
analysis_result = self.convert_yara_result(yara_results, file_object.binary)
analysis_result['summary'] = list(analysis_result)
file_object.processed_analysis[self.NAME] = analysis_result
self._add_private_key_tag(file_object, analysis_result)
return file_object
def convert_yara_result(self, yara_results, binary):
analysis_result = {}
for matching_rule in yara_results.get('summary', []):
matches = [Match(*t) for t in yara_results[matching_rule]['strings']]
matches.sort(key=lambda m: m.offset)
parsing_function = self._get_parsing_function(matching_rule)
if not parsing_function:
continue
crypto_items = parsing_function(matches=matches, binary=binary)
if crypto_items:
analysis_result[matching_rule] = {'material': crypto_items, 'count': len(crypto_items)}
return analysis_result
def _get_parsing_function(self, match: str) -> Callable | None:
if match in self.STARTEND:
return self.extract_labeled_keys
if match in self.STARTONLY:
return self.extract_start_only_key
if match == self.PKCS8:
return self.get_pkcs8_key
if match == self.PKCS12:
return self.get_pkcs12_cert
if match == self.SSLCERT:
return self.get_ssl_cert
logging.warning(f'Unknown crypto rule match: {match}')
return None
def extract_labeled_keys(self, matches: list[Match], binary, min_key_len=128) -> list[str]:
return [
binary[start:end].decode(encoding='utf_8', errors='replace')
for start, end in self.METHOD_NAME(matches)
if end - start > min_key_len
]
@staticmethod
def extract_start_only_key(matches: list[Match], **_) -> list[str]:
return [match.matched_string for match in matches if match.label == '$start_string']
@staticmethod
def get_pkcs8_key(matches: list[Match], binary=None) -> list[str]:
keys = []
for match in matches:
key = read_asn1_key(binary=binary, offset=match.offset)
if key is not None:
keys.append(key)
return keys
@staticmethod
def get_pkcs12_cert(matches: list[Match], binary=None) -> list[str]:
keys = []
for match in matches:
text_cert = read_pkcs_cert(binary=binary, offset=match.offset)
if text_cert is not None:
keys.append(text_cert)
return keys
def get_ssl_cert(self, matches: list[Match], binary=None) -> list[str]:
contents = []
for pair in self.METHOD_NAME(matches):
start_index, end_index = pair
text_cert = read_ssl_cert(binary=binary, start=start_index, end=end_index)
if text_cert is not None:
contents.append(text_cert)
return contents
@staticmethod
def METHOD_NAME(matches: list[Match]):
pairs = []
for index in range(len(matches) - 1):
if _is_consecutive_key_block(matches, index):
pairs.append((matches[index].offset, _calculate_end_index(matches[index + 1])))
elif _is_consecutive_pgp_block(matches, index):
pairs.append((matches[index].offset, _calculate_end_index(matches[index + 2])))
elif _is_consecutive_encrypted_key(matches, index):
pairs.append((matches[index].offset, _calculate_end_index(matches[index + 3])))
return pairs
def _add_private_key_tag(self, file_object, result):
if any('private' in key.lower() for key in result):
self.add_analysis_tag(
file_object=file_object,
tag_name='private_key_inside',
value='Private Key Found',
color=TagColor.ORANGE,
propagate=True,
)
def _is_consecutive_key_block(matches: list[Match], index: int) -> bool:
return matches[index].label == '$start_string' and matches[index + 1].label == '$end_string'
def _is_consecutive_pgp_block(matches: list[Match], index: int) -> bool:
return (
matches[index].label == '$start_string'
and matches[index + 1].label == '$gnupg_version_string'
and len(matches) > index + 2
and matches[index + 2].label == '$end_string'
)
def _is_consecutive_encrypted_key(matches: list[Match], index: int) -> bool:
return (
len(matches) > index + 3
and matches[index].label == '$start_string'
and matches[index + 1].label == '$proc_type'
and matches[index + 2].label == '$dek_info'
and matches[index + 3].label == '$end_string'
)
def _calculate_end_index(match: Match) -> int:
return match.offset + len(match.matched_string) |
5,732 | get run command | #!/usr/bin/env python3
import os
import logging
import subprocess
import sys
from github import Github
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
from s3_helper import S3Helper
from get_robot_token import get_best_robot_token
from pr_info import PRInfo
from build_download_helper import download_shared_build
from upload_result_helper import upload_results
from docker_pull_helper import get_image_with_version
from commit_status_helper import post_commit_status
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
DOCKER_IMAGE = "clickhouse/split-build-smoke-test"
DOWNLOAD_RETRIES_COUNT = 5
RESULT_LOG_NAME = "run.log"
CHECK_NAME = "Split build smoke test"
def process_result(result_folder, server_log_folder):
status = "success"
description = "Server started and responded"
summary = [("Smoke test", "OK")]
with open(os.path.join(result_folder, RESULT_LOG_NAME), "r") as run_log:
lines = run_log.read().split("\n")
if not lines or lines[0].strip() != "OK":
status = "failure"
logging.info("Lines is not ok: %s", str("\n".join(lines)))
summary = [("Smoke test", "FAIL")]
description = "Server failed to respond, see result in logs"
result_logs = []
server_log_path = os.path.join(server_log_folder, "clickhouse-server.log")
stderr_log_path = os.path.join(result_folder, "stderr.log")
client_stderr_log_path = os.path.join(result_folder, "clientstderr.log")
run_log_path = os.path.join(result_folder, RESULT_LOG_NAME)
for path in [
server_log_path,
stderr_log_path,
client_stderr_log_path,
run_log_path,
]:
if os.path.exists(path):
result_logs.append(path)
return status, description, summary, result_logs
def METHOD_NAME(build_path, result_folder, server_log_folder, docker_image):
return (
f"docker run --network=host --volume={build_path}:/package_folder"
f" --volume={server_log_folder}:/var/log/clickhouse-server"
f" --volume={result_folder}:/test_output"
f" {docker_image} >{result_folder}/{RESULT_LOG_NAME}"
)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
stopwatch = Stopwatch()
temp_path = TEMP_PATH
repo_path = REPO_COPY
reports_path = REPORTS_PATH
pr_info = PRInfo()
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, CHECK_NAME)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
for root, _, files in os.walk(reports_path):
for f in files:
if f == "changed_images.json":
images_path = os.path.join(root, "changed_images.json")
break
docker_image = get_image_with_version(reports_path, DOCKER_IMAGE)
packages_path = os.path.join(temp_path, "packages")
if not os.path.exists(packages_path):
os.makedirs(packages_path)
download_shared_build(CHECK_NAME, reports_path, packages_path)
server_log_path = os.path.join(temp_path, "server_log")
if not os.path.exists(server_log_path):
os.makedirs(server_log_path)
result_path = os.path.join(temp_path, "result_path")
if not os.path.exists(result_path):
os.makedirs(result_path)
run_command = METHOD_NAME(
packages_path, result_path, server_log_path, docker_image
)
logging.info("Going to run command %s", run_command)
with subprocess.Popen(run_command, shell=True) as process:
retcode = process.wait()
if retcode == 0:
logging.info("Run successfully")
else:
logging.info("Run failed")
subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True)
print("Result path", os.listdir(result_path))
print("Server log path", os.listdir(server_log_path))
state, description, test_results, additional_logs = process_result(
result_path, server_log_path
)
ch_helper = ClickHouseHelper()
s3_helper = S3Helper("https://s3.amazonaws.com")
report_url = upload_results(
s3_helper,
pr_info.number,
pr_info.sha,
test_results,
additional_logs,
CHECK_NAME,
)
print(f"::notice ::Report url: {report_url}")
post_commit_status(gh, pr_info.sha, CHECK_NAME, description, state, report_url)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,
test_results,
state,
stopwatch.duration_seconds,
stopwatch.start_time_str,
report_url,
CHECK_NAME,
)
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
if state == "error":
sys.exit(1) |
5,733 | create fom | #!/usr/bin/env python
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
import numpy as np
from typer import Argument, run
from pymor.basic import *
def main(
grid_intervals: int = Argument(..., help='Grid interval count.'),
training_samples: int = Argument(..., help='Number of samples used for training the reduced basis.')
):
"""Example script for solving linear PDE-constrained parameter optimization problems."""
fom, mu_bar = METHOD_NAME(grid_intervals)
parameter_space = fom.parameters.space(0, np.pi)
ranges = parameter_space.ranges['diffusion']
initial_guess = fom.parameters.parse([0.25, 0.5])
def fom_objective_functional(mu):
return fom.output(mu)[0, 0]
def fom_gradient_of_functional(mu):
return fom.output_d_mu(fom.parameters.parse(mu), return_array=True, use_adjoint=True)
from functools import partial
from time import perf_counter
from scipy.optimize import minimize
opt_fom_minimization_data = {'num_evals': 0,
'evaluations': [],
'evaluation_points': [],
'time': np.inf}
tic = perf_counter()
opt_fom_result = minimize(partial(record_results, fom_objective_functional,
fom.parameters.parse, opt_fom_minimization_data),
initial_guess.to_numpy(),
method='L-BFGS-B',
jac=fom_gradient_of_functional,
bounds=(ranges, ranges),
options={'ftol': 1e-15})
opt_fom_minimization_data['time'] = perf_counter()-tic
reference_mu = opt_fom_result.x
from pymor.algorithms.greedy import rb_greedy
from pymor.parameters.functionals import MinThetaParameterFunctional
from pymor.reductors.coercive import CoerciveRBReductor
coercivity_estimator = MinThetaParameterFunctional(fom.operator.coefficients, mu_bar)
training_set = parameter_space.sample_uniformly(training_samples)
RB_reductor = CoerciveRBReductor(fom, product=fom.energy_product, coercivity_estimator=coercivity_estimator)
RB_greedy_data = rb_greedy(fom, RB_reductor, training_set, atol=1e-2)
rom = RB_greedy_data['rom']
def rom_objective_functional(mu):
return rom.output(mu)[0, 0]
def rom_gradient_of_functional(mu):
return rom.output_d_mu(fom.parameters.parse(mu), return_array=True, use_adjoint=True)
opt_rom_minimization_data = {'num_evals': 0,
'evaluations': [],
'evaluation_points': [],
'time': np.inf,
'offline_time': RB_greedy_data['time']}
tic = perf_counter()
opt_rom_result = minimize(partial(record_results, rom_objective_functional, fom.parameters.parse,
opt_rom_minimization_data),
initial_guess.to_numpy(),
method='L-BFGS-B',
jac=rom_gradient_of_functional,
bounds=(ranges, ranges),
options={'ftol': 1e-15})
opt_rom_minimization_data['time'] = perf_counter()-tic
print('\nResult of optimization with FOM model and adjoint gradient')
report(opt_fom_result, fom.parameters.parse, opt_fom_minimization_data, reference_mu)
print('Result of optimization with ROM model and adjoint gradient')
report(opt_rom_result, fom.parameters.parse, opt_rom_minimization_data, reference_mu)
def METHOD_NAME(grid_intervals, output_type='l2', vector_valued_output=False):
domain = RectDomain(([-1, -1], [1, 1]))
indicator_domain = ExpressionFunction(
'(-2/3. <= x[0] <= -1/3.) * (-2/3. <= x[1] <= -1/3.) * 1. \
+ (-2/3. <= x[0] <= -1/3.) * (1/3. <= x[1] <= 2/3.) * 1.',
dim_domain=2)
rest_of_domain = ConstantFunction(1, 2) - indicator_domain
f = ExpressionFunction('0.5*pi*pi*cos(0.5*pi*x[0])*cos(0.5*pi*x[1])', dim_domain=2)
parameters = {'diffusion': 2}
thetas = [
ExpressionParameterFunctional(
'1.1 + sin(diffusion[0])*diffusion[1]',
parameters,
derivative_expressions={'diffusion': ['cos(diffusion[0])*diffusion[1]',
'sin(diffusion[0])']},
),
ExpressionParameterFunctional(
'1.1 + sin(diffusion[1])',
parameters,
derivative_expressions={'diffusion': ['0', 'cos(diffusion[1])']},
),
]
diffusion = LincombFunction([rest_of_domain, indicator_domain], thetas)
theta_J = ExpressionParameterFunctional('1 + 1/5 * diffusion[0] + 1/5 * diffusion[1]', parameters,
derivative_expressions={'diffusion': ['1/5', '1/5']})
if vector_valued_output:
problem = StationaryProblem(
domain, f, diffusion, outputs=[(output_type, f * theta_J), (output_type, f * 0.5 * theta_J)]
)
else:
problem = StationaryProblem(domain, f, diffusion, outputs=[(output_type, f * theta_J)])
print('Discretize ...')
mu_bar = problem.parameters.parse([np.pi/2, np.pi/2])
fom, _ = discretize_stationary_cg(problem, diameter=1. / grid_intervals, mu_energy_product=mu_bar)
return fom, mu_bar
def record_results(function, parse, data, mu):
QoI = function(mu)
data['num_evals'] += 1
data['evaluation_points'].append(parse(mu).to_numpy())
data['evaluations'].append(QoI)
print('.', end='')
return QoI
def report(result, parse, data, reference_mu):
if (result.status != 0):
print('\n failed!')
else:
print('\n succeeded!')
print(' mu_min: {}'.format(parse(result.x)))
print(' J(mu_min): {}'.format(result.fun))
print(' absolute error w.r.t. reference solution: {:.2e}'.format(np.linalg.norm(result.x-reference_mu)))
print(' num iterations: {}'.format(result.nit))
print(' num function calls: {}'.format(data['num_evals']))
print(' time: {:.5f} seconds'.format(data['time']))
if 'offline_time' in data:
print(' offline time: {:.5f} seconds'.format(data['offline_time']))
print('')
if __name__ == '__main__':
run(main) |
5,734 | test key and id | import logging
import unittest
from io import StringIO
from typing import Optional
from jsonasobj2 import as_json
from linkml.utils.schemaloader import SchemaLoader
from tests.test_utils.environment import env
from tests.utils.filters import json_metadata_filter
from tests.utils.test_environment import TestEnvironmentTestCase
class SchemaLoaderTestCase(TestEnvironmentTestCase):
env = env
def eval_loader(
self,
base_name: str,
logger: Optional[logging.Logger] = None,
source: Optional[str] = None,
) -> None:
loader = SchemaLoader(source or self.env.input_path(base_name + ".yaml"), logger=logger)
self.env.generate_single_file(
base_name + ".json",
lambda: as_json(loader.resolve()),
filtr=json_metadata_filter,
value_is_returned=True,
)
self.env.generate_single_file(
base_name + ".errs",
lambda: "\n".join(loader.synopsis.errors()),
filtr=json_metadata_filter,
value_is_returned=True,
)
@unittest.skip("Disabled until we get SchemaDefinitionList implemented")
def test_basic_merge(self):
"""Test the basic merge paths"""
logstream = StringIO()
logging.basicConfig()
logger = logging.getLogger(self.__class__.__name__)
for handler in logger.handlers:
logger.removeHandler(handler)
logger.addHandler(logging.StreamHandler(logstream))
logger.setLevel(logging.INFO)
self.eval_loader("merge1", logger=logger)
self.assertIn("Overlapping subset and slot names: s1, s2", logstream.getvalue().strip())
@unittest.skip("Disabled until we get SchemaDefinitionList implemented")
def test_mergeerror1(self):
"""Test conflicting definitions path"""
fn = env.input_path("mergeerror1.yaml")
with self.assertRaises(ValueError) as ve:
SchemaLoader(fn)
self.assertEqual(
"Conflicting URIs (http://example.org/schema2, http://example.org/schema1) for item: c1",
str(ve.exception),
)
def test_imports(self):
self.eval_loader("base")
@unittest.skip("Re-enable this once we get fully migrated")
def test_error_paths(self):
"""Test various loader error situations"""
fn = env.input_path("loadererror1.yaml")
with self.assertRaises(ValueError, msg="Unknown slot domain should fail") as e:
SchemaLoader(fn).resolve()
self.assertIn('loadererror1.yaml", line 11, col 13', str(e.exception))
fn = env.input_path("loadererror2.yaml")
with self.assertRaises(ValueError, msg="No Type URI") as e:
SchemaLoader(fn).resolve()
self.assertIn('type "string" does not declare a URI', str(e.exception))
fn = env.input_path("loadererror2a.yaml")
with self.assertRaises(ValueError, msg="Optional key slot should fail") as e:
SchemaLoader(fn).resolve()
self.assertIn("slot: s1 - key and identifier slots cannot be optional", str(e.exception))
fn = env.input_path("loadertest1.yaml")
schema = SchemaLoader(fn).resolve()
self.assertEqual("string", schema.slots["s1"].range)
fn = env.input_path("loadererror4.yaml")
with self.assertRaises(ValueError, msg="Default prefix is not defined") as e:
SchemaLoader(fn).resolve()
self.assertIn('loadererror4.yaml", line 6, col 17', str(e.exception))
@unittest.skip("Re-enable this once we get fully migrated")
def test_empty_range(self):
"""A type must have either a base or a parent"""
fn = env.input_path("loadererror5.yaml")
with self.assertRaises(ValueError, msg="Range error should be raised") as e:
_ = SchemaLoader(fn).resolve()
self.assertIn('loadererror5.yaml", line 9, col 3', str(e.exception))
def test_multi_key(self):
"""Multiple keys are not supported"""
fn = env.input_path("loadererror6.yaml")
with self.assertRaises(ValueError, msg="Multiple keys/identifiers not allowed") as e:
_ = SchemaLoader(fn).resolve()
self.assertIn("multiple keys/identifiers not allowed", str(e.exception))
fn = env.input_path("loadererror7.yaml")
with self.assertRaises(ValueError, msg="Two or more keys are not allowed") as e:
_ = SchemaLoader(fn).resolve()
self.assertIn("multiple keys/identifiers not allowed", str(e.exception))
def METHOD_NAME(self):
"""A slot cannot be both a key and an identifier"""
fn = env.input_path("loadererror8.yaml")
with self.assertRaises(ValueError, msg="A slot cannot be both a key and identifier") as e:
_ = SchemaLoader(fn).resolve()
self.assertIn(
"A slot cannot be both a key and identifier at the same time",
str(e.exception),
)
fn = env.input_path("loadererror9.yaml")
with self.assertRaises(ValueError, msg="A slot cannot be both a key and identifier") as e:
_ = SchemaLoader(fn).resolve()
self.assertIn(
"A slot cannot be both a key and identifier at the same time",
str(e.exception),
)
@unittest.skip("Re-enable this once we get fully migrated")
def test_missing_type_uri(self):
"""A type with neither a typeof or uri is an error"""
fn = env.input_path("loadererror10.yaml")
with self.assertRaises(ValueError, msg="A non-typeof type has to have a URI") as e:
_ = SchemaLoader(fn).resolve()
self.assertIn('loadererror10.yaml", line 12, col 3', str(e.exception))
fn = env.input_path("loaderpass11.yaml")
_ = SchemaLoader(fn).resolve()
@unittest.skip("Re-enable this once we get fully migrated")
def test_undefined_subset(self):
"""Throw an error on an undefined subset reference"""
fn = env.input_path("loadererror11.yaml")
with self.assertRaises(ValueError, msg="Subset references must be valid") as e:
_ = SchemaLoader(fn).resolve()
self.assertIn('loadererror11.yaml", line 22, col 16', str(e.exception))
def test_importmap(self):
"""Test the importmap parameter"""
fn = env.input_path("import_test_1.yaml")
importmap = {
"http://example.org/import_test_2": "import_test_2",
"loc/imp3": "import_test_3",
"base:import_test_4": "http://example.org/import_test_4",
"http://example.org/import_test_4": "import_test_4",
"types": "http://w3id.org/linkml/types",
}
self.env.generate_single_file(
"import_test_1.json",
lambda: as_json(SchemaLoader(fn, importmap=importmap).resolve()),
filtr=json_metadata_filter,
)
if __name__ == "__main__":
unittest.main() |
5,735 | test dlpack export is conj | # Owner(s): ["module: tests"]
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import TestCase, run_tests, IS_JETSON
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, dtypes, skipMeta, skipCUDAIfRocm,
onlyNativeDeviceTypes)
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.utils.dlpack import from_dlpack, to_dlpack
class TestTorchDlPack(TestCase):
exact_dtype = True
@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_dlpack_capsule_conversion(self, device, dtype):
# DLpack does not explicitly support bool (xref dmlc/dlpack#75)
x = make_tensor((5,), dtype=dtype, device=device)
z = from_dlpack(to_dlpack(x))
self.assertEqual(z, x)
@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_dlpack_protocol_conversion(self, device, dtype):
x = make_tensor((5,), dtype=dtype, device=device)
z = from_dlpack(x)
self.assertEqual(z, x)
@skipMeta
@onlyNativeDeviceTypes
def test_dlpack_shared_storage(self, device):
x = make_tensor((5,), dtype=torch.float64, device=device)
z = from_dlpack(to_dlpack(x))
z[0] = z[0] + 20.0
self.assertEqual(z, x)
@skipMeta
@onlyCUDA
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_dlpack_conversion_with_streams(self, device, dtype):
# Create a stream where the tensor will reside
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
# Do an operation in the actual stream
x = make_tensor((5,), dtype=dtype, device=device) + 1
# DLPack protocol helps establish a correct stream order
# (hence data dependency) at the exchange boundary.
# DLPack manages this synchronization for us, so we don't need to
# explicitly wait until x is populated
if IS_JETSON:
# DLPack protocol that establishes correct stream order
# does not behave as expected on Jetson
stream.synchronize()
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
z = from_dlpack(x)
stream.synchronize()
self.assertEqual(z, x)
@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_from_dlpack(self, device, dtype):
x = make_tensor((5,), dtype=dtype, device=device)
y = torch.from_dlpack(x)
self.assertEqual(x, y)
@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_from_dlpack_noncontinguous(self, device, dtype):
x = make_tensor((25,), dtype=dtype, device=device).reshape(5, 5)
y1 = x[0]
y1_dl = torch.from_dlpack(y1)
self.assertEqual(y1, y1_dl)
y2 = x[:, 0]
y2_dl = torch.from_dlpack(y2)
self.assertEqual(y2, y2_dl)
y3 = x[1, :]
y3_dl = torch.from_dlpack(y3)
self.assertEqual(y3, y3_dl)
y4 = x[1]
y4_dl = torch.from_dlpack(y4)
self.assertEqual(y4, y4_dl)
y5 = x.t()
y5_dl = torch.from_dlpack(y5)
self.assertEqual(y5, y5_dl)
@skipMeta
@onlyCUDA
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_dlpack_conversion_with_diff_streams(self, device, dtype):
stream_a = torch.cuda.Stream()
stream_b = torch.cuda.Stream()
# DLPack protocol helps establish a correct stream order
# (hence data dependency) at the exchange boundary.
# the `tensor.__dlpack__` method will insert a synchronization event
# in the current stream to make sure that it was correctly populated.
with torch.cuda.stream(stream_a):
x = make_tensor((5,), dtype=dtype, device=device) + 1
z = torch.from_dlpack(x.__dlpack__(stream_b.cuda_stream))
stream_a.synchronize()
stream_b.synchronize()
self.assertEqual(z, x)
@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_from_dlpack_dtype(self, device, dtype):
x = make_tensor((5,), dtype=dtype, device=device)
y = torch.from_dlpack(x)
assert x.dtype == y.dtype
@skipMeta
@onlyCUDA
def test_dlpack_default_stream(self, device):
class DLPackTensor:
def __init__(self, tensor):
self.tensor = tensor
def __dlpack_device__(self):
return self.tensor.__dlpack_device__()
def __dlpack__(self, stream=None):
if torch.version.hip is None:
assert stream == 1
else:
assert stream == 0
capsule = self.tensor.__dlpack__(stream)
return capsule
# CUDA-based tests runs on non-default streams
with torch.cuda.stream(torch.cuda.default_stream()):
x = DLPackTensor(make_tensor((5,), dtype=torch.float32, device=device))
from_dlpack(x)
@skipMeta
@onlyCUDA
@skipCUDAIfRocm
def test_dlpack_convert_default_stream(self, device):
# tests run on non-default stream, so _sleep call
# below will run on a non-default stream, causing
# default stream to wait due to inserted syncs
torch.cuda.default_stream().synchronize()
# run _sleep call on a non-default stream, causing
# default stream to wait due to inserted syncs
side_stream = torch.cuda.Stream()
with torch.cuda.stream(side_stream):
x = torch.zeros(1, device=device)
torch.cuda._sleep(2**20)
self.assertTrue(torch.cuda.default_stream().query())
d = x.__dlpack__(1)
# check that the default stream has work (a pending cudaStreamWaitEvent)
self.assertFalse(torch.cuda.default_stream().query())
@skipMeta
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_dlpack_tensor_invalid_stream(self, device, dtype):
with self.assertRaises(TypeError):
x = make_tensor((5,), dtype=dtype, device=device)
x.__dlpack__(stream=object())
@skipMeta
def test_dlpack_error_on_bool_tensor(self):
x = torch.tensor([True], dtype=torch.bool)
with self.assertRaises(RuntimeError):
to_dlpack(x)
# TODO: add interchange tests once NumPy 1.22 (dlpack support) is required
@skipMeta
def test_dlpack_export_requires_grad(self):
x = torch.zeros(10, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, r"require gradient"):
x.__dlpack__()
@skipMeta
def METHOD_NAME(self):
x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
y = torch.conj(x)
with self.assertRaisesRegex(RuntimeError, r"conjugate bit"):
y.__dlpack__()
@skipMeta
def test_dlpack_export_non_strided(self):
x = torch.sparse_coo_tensor([[0]], [1], size=(1,))
y = torch.conj(x)
with self.assertRaisesRegex(RuntimeError, r"strided"):
y.__dlpack__()
@skipMeta
def test_dlpack_normalize_strides(self):
x = torch.rand(16)
y = x[::3][:1]
self.assertEqual(y.shape, (1,))
self.assertEqual(y.stride(), (3,))
z = from_dlpack(y)
self.assertEqual(z.shape, (1,))
# gh-83069, make sure __dlpack__ normalizes strides
self.assertEqual(z.stride(), (1,))
instantiate_device_type_tests(TestTorchDlPack, globals())
if __name__ == '__main__':
run_tests() |
5,736 | verify nndct graph |
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Dict, List, NoReturn, Optional
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Graph
from nndct_shared.utils import (AddXopError, NndctOption, GLOBAL_MAP, NNDCT_KEYS, NndctScreenLogger)
from nndct_shared.utils import QError, QWarning
from .xgraph import XGraph
from .xop_creator import NNDCTIR2XIR_CONVERTOR, custom_xop, to_xir
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
class XirCompiler(object):
@staticmethod
def do_compile(compile_graph: Graph,
output_file_name=None,
quant_config_info: Optional[NndctQuantInfo] = None,
graph_attr_kwargs: Optional[Dict[str, Any]] = None) -> NoReturn:
r""" convert nndct graph to xmodel"""
# debug
# for type, bnfp in quant_config_info.items():
# print(f"{type}\n")
# for name, bnfp_value in bnfp.items():
# print(f"{name}:{bnfp_value}\n")
if NndctOption.nndct_quant_off.value:
quant_config_info = None
xgraph = XGraph(compile_graph.name)
if graph_attr_kwargs is not None:
for name, attr in graph_attr_kwargs.items():
xgraph.graph.set_attr(name, attr)
for node in compile_graph.nodes:
for param_type, param_tensor in node.op.params.items():
if (node.op.type == NNDCT_OP.BATCH_NORM
and param_type not in [node.op.ParamName.GAMMA, node.op.ParamName.BETA]):
continue
if xgraph.get_op_by_name(param_tensor.name):
continue
# print(f"{node.name}: {param_tensor.name}, {id(param_tensor)}")
data = np.copy(param_tensor.data)
if node.op.type in [NNDCT_OP.CONVTRANSPOSE2D, NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D] and param_type == node.op.ParamName.WEIGHTS:
# OHWI -> OH'W'I reverse the order of ele in both h and w axis
data = np.flip(data, (1, 2))
data = np.ascontiguousarray(data)
elif node.op.type in [NNDCT_OP.CONVTRANSPOSE3D, NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D] and param_type == node.op.ParamName.WEIGHTS:
# OHWDI -> OH'W'D'I reverse the order of ele in both h and w axis
data = np.flip(data, (1, 2, 3))
data = np.ascontiguousarray(data)
try:
if data.dtype == np.float16:
data = data.astype(np.float32)
xgraph.create_fixed_const_op(
name=param_tensor.name,
data=data,
quant_info=quant_config_info)
except Exception as e:
raise AddXopError(param_tensor.name, 'const', str(e))
custom2xir = GLOBAL_MAP.get_ele(NNDCT_KEYS.CUSTOM_TO_XIR_LIST)
if custom2xir:
for op_type in custom2xir:
NNDCTIR2XIR_CONVERTOR[op_type] = (op_type, to_xir(op_type))
for node in compile_graph.nodes:
if node.op.type == NNDCT_OP.RETURN:
continue
#print("convert...:", node.op.type, node.name, node.out_tensors[0].shape, node.in_quant_part)
#import sys
#sys.stdout.flush()
try:
NNDCTIR2XIR_CONVERTOR.get(node.op.type, (node.op.type, custom_xop))[1](xgraph, node, quant_config_info)
except Exception as e:
raise AddXopError(node.name, node.op.type, str(e))
if output_file_name:
if quant_config_info is None:
output_file_name += '_float'
else:
output_file_name += '_int'
xgraph.export_to_xmodel(output_file_name)
return xgraph
@staticmethod
def verify_xmodel(compile_graph: Graph, xgraph: XGraph):
"""verify the xmodel by nndct node shape"""
for node in compile_graph.nodes:
if not node.out_tensors:
continue
if node.out_tensors[0].ndim and node.out_tensors[0].ndim > 1:
xop_shape = xgraph.get_op_output_shape(node.name)
if tuple(xop_shape) != tuple(node.out_tensors[0].shape):
NndctScreenLogger().error2user(QError.SHAPE_MISMATCH, f"output shape of {node.name}({node.out_tensors[0].shape}) is different from the output shape of XIR ({xop_shape}).")
@staticmethod
def METHOD_NAME(compile_graph):
msg = ""
for node in compile_graph.nodes:
if node.op.type == NNDCT_OP.RETURN:
continue
if node.blocks:
msg += f"XIR don't support control flow op.({node.name}, {node.op.type})\n"
elif len(node.out_tensors) > 1 and all([len(tensor.uses) > 0 for tensor in node.out_tensors]):
msg += f"XIR don't support multi-outputs op.({node.name}, {node.op.type})\n"
elif node.op.type not in NNDCTIR2XIR_CONVERTOR.keys() and all([tensor.shape is None for tensor in node.out_tensors]):
msg += f"XIR don't support custom op without shape info.({node.name}, {node.op.type})\n"
if msg:
return False, msg
return True, msg
|
5,737 | is visible | """
Models for album app
"""
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ValidationError
from django.core.validators import validate_image_file_extension
from django.db import models
from django.forms import ClearableFileInput, FileField, ModelChoiceField, ModelForm
from django.urls import reverse
from mptt.models import MPTTModel, TreeForeignKey
from nablapps.core.models import BaseImageModel, TimeStamped
class AlbumImage(BaseImageModel):
"""
An album image.
Each album image is associated with a single album
"""
description = models.TextField(verbose_name="Bildetekst", blank=True, null=True)
album = models.ForeignKey(
"album.Album",
verbose_name="Album",
related_name="images",
null=True,
on_delete=models.CASCADE,
)
num = models.PositiveIntegerField(
verbose_name="Nummer", blank=True, null=True, editable=False
)
is_display_image = models.BooleanField(
verbose_name="Er visningbilde",
help_text="Bildet som vises i listen over album",
default=False,
)
def get_absolute_url(self):
"""Get canonical url for image"""
return reverse("album_image", kwargs={"pk": self.album.id, "num": self.num + 1})
@property
def is_published(self):
"""Check is parent album is hidden (meaning unpublished)"""
return self.album.visibility != "h"
class Meta:
verbose_name = "Albumbilde"
verbose_name_plural = "Albumbilder"
db_table = "content_albumimage"
class Album(MPTTModel, TimeStamped):
"""
Model representing an album which is a collection of images.
"""
title = models.CharField(
max_length=100, verbose_name="Albumtittel", blank=False, null=True
)
VISIBILITY_OPTIONS = (("p", "public"), ("u", "users"), ("h", "hidden"))
visibility = models.CharField(
max_length=1,
verbose_name="Synlighet",
choices=VISIBILITY_OPTIONS,
default="h",
blank=False,
)
parent = TreeForeignKey(
"self",
on_delete=models.CASCADE,
null=True,
blank=True,
related_name="children",
verbose_name="Forelder",
help_text="Bildealbum som dette albumet hører til. Album er relaterte med en trestruktur.",
)
class Meta:
verbose_name = "Album"
verbose_name_plural = "Album"
db_table = "content_album"
def get_absolute_url(self):
"""Return canonical url for album"""
return reverse("album", kwargs={"pk": self.pk})
def METHOD_NAME(self, user=AnonymousUser()):
"""
Return whether this album is visible for the supplied user.
If visibility is 'p' then all users can see the album.
If visibility is 'u' all logged in users can see the album.
All logged in users with the permission to change albums can see the album.
"""
return (
self.visibility == "p"
or self.visibility == "u"
and user.is_authenticated
or user.has_perm("content.change_album")
)
@property
def first(self):
"""Get the image which is considered to be the first in the album"""
return self.images.order_by("-is_display_image", "num").first()
def __str__(self):
return self.title
class AlbumForm(ModelForm):
model = Album
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.instance._state.adding:
self.fields["parent"].queryset = Album.objects.exclude(
pk__in=self.instance.get_descendants(include_self=True)
)
class Meta:
model = Album
fields = ("title", "visibility", "parent")
parent = ModelChoiceField(
Album.objects.all(),
required=False,
label="Forelder",
help_text="Bildealbum som dette albumet hører til. (Album er relaterte med en trestruktur.)",
)
photos = FileField(
widget=ClearableFileInput(attrs={"multiple": True}),
label="Legg til flere bilder",
help_text="Last opp flere bilder her. Når du lagrer dukker de opp i oversikten under",
required=False,
)
def clean_parent(self):
"""Run when instance is saved"""
parent_obj = self.cleaned_data["parent"]
current_obj = self.instance
if parent_obj == current_obj:
raise ValidationError("Et album kan ikke være sin egen forelder")
# check that object is saved, else it shouold not have descendants
elif (
not current_obj._state.adding
and parent_obj in current_obj.get_descendants(include_self=True)
):
raise ValidationError(
"En node kan ikke være barn av noen av sine etterkommere."
)
return parent_obj
def clean_photos(self):
"""Make sure only images can be uploaded."""
for upload in self.files.getlist("photos"):
validate_image_file_extension(upload)
def save_photos(self, album):
"""Process each uploaded image."""
for file in self.files.getlist("photos"):
photo = AlbumImage(album=album, file=file)
photo.save() |
5,738 | test quesst14 subset docs | import os
from collections import defaultdict
from pathlib import Path
from parameterized import parameterized
from torchaudio.datasets import quesst14
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
def _get_filename(folder, index):
if folder == "Audio":
return f"quesst14_{index:05d}.wav"
elif folder == "dev_queries":
return f"quesst14_dev_{index:04d}.wav"
elif folder == "eval_queries":
return f"quesst14_eval_{index:04d}.wav"
return
def _get_key(folder):
folder_key_mapping = {
"Audio": "utterances",
"dev_queries": "dev",
"eval_queries": "eval",
}
return folder_key_mapping[folder]
def _save_sample(dataset_dir, folder, language, index, sample_rate, seed):
# create and save audio samples to corresponding files
path = os.path.join(dataset_dir, folder)
os.makedirs(path, exist_ok=True)
filename = _get_filename(folder, index)
file_path = os.path.join(path, filename)
data = get_whitenoise(
sample_rate=sample_rate,
duration=0.01,
n_channels=1,
seed=seed,
)
save_wav(file_path, data, sample_rate)
sample = (data, sample_rate, Path(file_path).with_suffix("").name)
# add audio files and language data to language key files
scoring_path = os.path.join(dataset_dir, "scoring")
os.makedirs(scoring_path, exist_ok=True)
wav_file = f"quesst14Database/{folder}/{filename}"
line = f"{wav_file} {language}"
key = _get_key(folder)
language_key_file = f"language_key_{key}.lst"
language_key_file = os.path.join(scoring_path, language_key_file)
with open(language_key_file, "a") as f:
f.write(line + "\n")
return sample
def _get_mocked_samples(dataset_dir, folder, sample_rate, seed):
samples_per_language = 2
samples_map = defaultdict(list)
samples_all = []
curr_idx = 0
for language in quesst14._LANGUAGES:
for _ in range(samples_per_language):
sample = _save_sample(dataset_dir, folder, language, curr_idx, sample_rate, seed)
samples_map[language].append(sample)
samples_all.append(sample)
curr_idx += 1
return samples_map, samples_all
def get_mock_dataset(dataset_dir):
"""
dataset_dir: directory to the mocked dataset
"""
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 8000
audio_seed = 0
dev_seed = 1
eval_seed = 2
mocked_utterances, mocked_utterances_all = _get_mocked_samples(dataset_dir, "Audio", sample_rate, audio_seed)
mocked_dev_samples, mocked_dev_samples_all = _get_mocked_samples(dataset_dir, "dev_queries", sample_rate, dev_seed)
mocked_eval_samples, mocked_eval_samples_all = _get_mocked_samples(
dataset_dir, "eval_queries", sample_rate, eval_seed
)
return (
mocked_utterances,
mocked_dev_samples,
mocked_eval_samples,
mocked_utterances_all,
mocked_dev_samples_all,
mocked_eval_samples_all,
)
class TestQuesst14(TempDirMixin, TorchaudioTestCase):
root_dir = None
backend = "default"
utterances = {}
dev_samples = {}
eval_samples = {}
utterances_all = []
dev_samples_all = []
eval_samples_all = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
dataset_dir = os.path.join(cls.root_dir, "quesst14Database")
(
cls.utterances,
cls.dev_samples,
cls.eval_samples,
cls.utterances_all,
cls.dev_samples_all,
cls.eval_samples_all,
) = get_mock_dataset(dataset_dir)
def _testQuesst14(self, dataset, data_samples):
num_samples = 0
for i, (data, sample_rate, name) in enumerate(dataset):
self.assertEqual(data, data_samples[i][0])
assert sample_rate == data_samples[i][1]
assert name == data_samples[i][2]
num_samples += 1
assert num_samples == len(data_samples)
def METHOD_NAME(self):
dataset = quesst14.QUESST14(self.root_dir, language=None, subset="docs")
self._testQuesst14(dataset, self.utterances_all)
def testQuesst14SubsetDev(self):
dataset = quesst14.QUESST14(self.root_dir, language=None, subset="dev")
self._testQuesst14(dataset, self.dev_samples_all)
def testQuesst14SubsetEval(self):
dataset = quesst14.QUESST14(self.root_dir, language=None, subset="eval")
self._testQuesst14(dataset, self.eval_samples_all)
@parameterized.expand(quesst14._LANGUAGES)
def testQuesst14DocsSingleLanguage(self, language):
dataset = quesst14.QUESST14(self.root_dir, language=language, subset="docs")
self._testQuesst14(dataset, self.utterances[language])
@parameterized.expand(quesst14._LANGUAGES)
def testQuesst14DevSingleLanguage(self, language):
dataset = quesst14.QUESST14(self.root_dir, language=language, subset="dev")
self._testQuesst14(dataset, self.dev_samples[language])
@parameterized.expand(quesst14._LANGUAGES)
def testQuesst14EvalSingleLanguage(self, language):
dataset = quesst14.QUESST14(self.root_dir, language=language, subset="eval")
self._testQuesst14(dataset, self.eval_samples[language]) |
5,739 | has parsers | #!/usr/bin/env python
"""Generic parsers (for GRR server and client code)."""
from typing import Iterator
from typing import Text
from typing import Type
from typing import TypeVar
from grr_response_core.lib import factory
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.parsers import abstract
from grr_response_core.lib.util import collection
from grr_response_core.lib.util import precondition
ParseError = abstract.ParseError
Parser = abstract.Parser
SingleResponseParser = abstract.SingleResponseParser
SingleFileParser = abstract.SingleFileParser
MultiResponseParser = abstract.MultiResponseParser
MultiFileParser = abstract.MultiFileParser
_Factory = factory.Factory
_RDFValue = rdfvalue.RDFValue
SINGLE_RESPONSE_PARSER_FACTORY: _Factory[SingleResponseParser[_RDFValue]] = (
_Factory(SingleResponseParser[_RDFValue]))
MULTI_RESPONSE_PARSER_FACTORY: _Factory[MultiResponseParser[_RDFValue]] = (
_Factory(MultiResponseParser[_RDFValue]))
SINGLE_FILE_PARSER_FACTORY: _Factory[SingleFileParser[_RDFValue]] = (
_Factory(SingleFileParser[_RDFValue]))
MULTI_FILE_PARSER_FACTORY: _Factory[MultiFileParser[_RDFValue]] = (
_Factory(MultiFileParser[_RDFValue]))
_P = TypeVar("_P", bound=Parser)
class ArtifactParserFactory(object):
"""A factory wrapper class that yields parsers for specific artifact."""
def __init__(self, artifact_name: Text) -> None:
"""Initializes the artifact parser factory.
Args:
artifact_name: A name of the artifact this factory is supposed to provide
parser instances for.
"""
precondition.AssertType(artifact_name, Text)
self._artifact_name = artifact_name
def METHOD_NAME(self) -> bool:
return (self.HasSingleResponseParsers() or self.HasMultiResponseParsers() or
self.HasSingleFileParsers() or self.HasMultiFileParsers())
def HasSingleResponseParsers(self) -> bool:
return any(self.SingleResponseParserTypes())
def SingleResponseParsers(self) -> Iterator[SingleResponseParser[_RDFValue]]:
return self._CreateSupportedParsers(SINGLE_RESPONSE_PARSER_FACTORY)
def SingleResponseParserNames(self) -> Iterator[str]:
return self._SupportedNames(SINGLE_RESPONSE_PARSER_FACTORY)
def SingleResponseParserTypes(
self) -> Iterator[Type[SingleResponseParser[_RDFValue]]]:
return self._SupportedTypes(SINGLE_RESPONSE_PARSER_FACTORY)
def HasMultiResponseParsers(self) -> bool:
return any(self.MultiResponseParserTypes())
def MultiResponseParsers(self) -> Iterator[MultiResponseParser[_RDFValue]]:
return self._CreateSupportedParsers(MULTI_RESPONSE_PARSER_FACTORY)
def MultiResponseParserNames(self) -> Iterator[str]:
return self._SupportedNames(MULTI_RESPONSE_PARSER_FACTORY)
def MultiResponseParserTypes(
self) -> Iterator[Type[MultiResponseParser[_RDFValue]]]:
return self._SupportedTypes(MULTI_RESPONSE_PARSER_FACTORY)
def HasSingleFileParsers(self) -> bool:
return any(self.SingleFileParserTypes())
def SingleFileParsers(self) -> Iterator[SingleFileParser[_RDFValue]]:
return self._CreateSupportedParsers(SINGLE_FILE_PARSER_FACTORY)
def SingleFileParserNames(self) -> Iterator[str]:
return self._SupportedNames(SINGLE_FILE_PARSER_FACTORY)
def SingleFileParserTypes(
self) -> Iterator[Type[SingleFileParser[_RDFValue]]]:
return self._SupportedTypes(SINGLE_FILE_PARSER_FACTORY)
def HasMultiFileParsers(self) -> bool:
return any(self.MultiFileParserTypes())
def MultiFileParsers(self) -> Iterator[MultiFileParser[_RDFValue]]:
return self._CreateSupportedParsers(MULTI_FILE_PARSER_FACTORY)
def MultiFileParserNames(self) -> Iterator[str]:
return self._SupportedNames(MULTI_FILE_PARSER_FACTORY)
def MultiFileParserTypes(self) -> Iterator[Type[MultiFileParser[_RDFValue]]]:
return self._SupportedTypes(MULTI_FILE_PARSER_FACTORY)
def AllParserTypes(self) -> Iterator[Type[Parser[_RDFValue]]]:
"""Returns all known parser types applicable for the artifact."""
return collection.Flatten([
self.SingleResponseParserTypes(),
self.MultiResponseParserTypes(),
self.SingleFileParserTypes(),
self.MultiFileParserTypes(),
])
def _CreateSupportedParsers(self, fac: _Factory[_P]) -> Iterator[_P]:
for name in self._SupportedNames(fac):
yield fac.Create(name)
def _SupportedTypes(self, fac: _Factory[_P]) -> Iterator[Type[_P]]:
for name in self._SupportedNames(fac):
yield fac.GetType(name)
def _SupportedNames(self, fac: _Factory[_P]) -> Iterator[str]:
for name in fac.Names():
cls = fac.GetType(name)
if self._artifact_name in cls.supported_artifacts:
yield name |
5,740 | start | #!/usr/bin/env python3
import math, random, time, requests, threading, sys, io
sys.stdout = io.StringIO()
sys.stderr = io.StringIO()
ROBOTINOIP = "127.0.0.1:80"
PARAMS = {'sid':'robertaProgram'}
MAXSPEED = 0.5
MAXROTATION = 0.57
def getAnalogPin(pos):
ANALOGPIN_URL = "http://" + ROBOTINOIP + "/data/analoginputarray"
r = requests.get(url = ANALOGPIN_URL, params = PARAMS)
if r.status_code == requests.codes.ok:
data = r.json()
return data[pos-1]
else:
return -1
def getCameraLine(RV):
value = RV.readFloatVector(5)
if value[0]:
return (value[1]/640) -0.5
else:
return -1
def getColourBlob(RV, inputs):
RV.writeFloatVector(6, inputs)
time.sleep(0.001)
value = RV.readFloatVector(6)
if value[3] <= 0:
value = [-1,-1,0,0]
else:
value[0] = (value[0]/640) -0.5
value[1] = (value[1]/480) -0.5
return value
def getDigitalPin(pos):
DIGITALPIN_URL = "http://" + ROBOTINOIP + "/data/digitalinputarray"
r = requests.get(url = DIGITALPIN_URL, params = PARAMS)
if r.status_code == requests.codes.ok:
data = r.json()
return data[pos-1]
else:
return -1
def getDistance(port):
DISTANCES_URL = "http://" + ROBOTINOIP + "/data/distancesensorarray"
r = requests.get(url = DISTANCES_URL, params = PARAMS)
if r.status_code == requests.codes.ok:
data = r.json()
return data[port-1] * 100
else:
return -1
def getMarkerInformation(RV, id):
RV.writeFloat(3,id)
time.sleep(0.001)
value = RV.readFloatVector(4)
if not value[0]:
value = [False, -1,-1,-1,-1,-1,-1]
else:
for i in range (1,4):
value[i] = value[i] * 100
return value[1:4]
def getMarkers(RV):
markers = RV.readFloatVector(3)
if len(markers) == 0:
return [-1]
return markers
def getOdometry(val):
ODOMETRY_URL = "http://" + ROBOTINOIP + "/data/odometry"
r = requests.get(url = ODOMETRY_URL, params = PARAMS)
if r.status_code == requests.codes.ok:
data = r.json()
#data: [x,y,rot,vx,vy,omega,seq]
if val == 'x':
return data[0]
elif val == 'y':
return data[1]
elif val == 'rot':
return data[2]
else:
return data
else:
return -1
def isBumped():
BUMPER_URL = "http://" + ROBOTINOIP + "/data/bumper"
r = requests.get(url = BUMPER_URL, params = PARAMS)
if r.status_code == requests.codes.ok:
data = r.json()
return data["value"]
else:
return -1
def resetOdometry(RV, x, y, z):
RV.writeFloatVector(1, [x, y, z, 1])
time.sleep(0.1)
RV.writeFloatVector(1, [])
_timer1 = None
_timer2 = None
_timer3 = None
_timer4 = None
_timer5 = None
___Element5 = None
___Element6 = None
___Element7 = None
___Element8 = None
___Element9 = None
___Element10 = None
___Element11 = None
___Element12 = None
___Element13 = None
___Element14 = None
___Element15 = None
___Element16 = None
___Element = None
___Element17 = None
___Element18 = None
___Element19 = None
___Element20 = None
___Element2 = []
def run(RV):
global _timer1, _timer2, _timer3, _timer4, _timer5, ___Element5, ___Element6, ___Element7, ___Element8, ___Element9, ___Element10, ___Element11, ___Element12, ___Element13, ___Element14, ___Element15, ___Element16, ___Element, ___Element17, ___Element18, ___Element19, ___Element20, ___Element2
time.sleep(1)
resetOdometry(RV, 0, 0, 0)
RV.writeFloat(4, 100)
time.sleep(0.05)
_timer1 = time.time()
_timer2 = time.time()
_timer3 = time.time()
_timer4 = time.time()
_timer5 = time.time()
___Element5 = getCameraLine(RV)
___Element6 = getDigitalPin(1)
___Element7 = getDigitalPin(3)
___Element8 = getDigitalPin(4)
___Element9 = getDigitalPin(2)
___Element10 = getAnalogPin(1)
___Element11 = getDigitalPin(6)
___Element12 = getOdometry('x') * 100
___Element13 = getOdometry('y') * 100
___Element14 = getOdometry('rot') * (180 / math.pi)
___Element15 = getDistance(1)
___Element16 = isBumped()
___Element = ((time.time() - _timer1)/1000)
___Element17 = ((time.time() - _timer2)/1000)
___Element18 = ((time.time() - _timer3)/1000)
___Element19 = ((time.time() - _timer4)/1000)
___Element20 = ((time.time() - _timer5)/1000)
___Element2 = getColourBlob(RV, [40, 56, 42, 100, 53, 100])
resetOdometry(RV, 0, RV.readFloatVector(1)[1], RV.readFloatVector(1)[2])
resetOdometry(RV, RV.readFloatVector(1)[0], 0, RV.readFloatVector(1)[2])
resetOdometry(RV, RV.readFloatVector(1)[0], RV.readFloatVector(1)[1], 0)
resetOdometry(RV, 0, 0, 0)
___Element2 = getMarkers(RV)
time.sleep(500/1000)
___Element2 = getMarkerInformation(RV, 0)
RV.writeFloat(4, 100)
time.sleep(0.005)
def step(RV):
pass
def main(RV):
try:
run(RV)
except Exception as e:
print(e)
raise
def METHOD_NAME(RV):
motorDaemon2 = threading.Thread(target=main, daemon=True, args=(RV,), name='mainProgram')
motorDaemon2.METHOD_NAME()
def stop(RV):
pass
def cleanup(RV):
pass
|
5,741 | text path | from typing import Any, Optional
class Context:
def __init__(self, target: Any) -> None: ...
def get_target(self): ...
def save(self) -> None: ...
def restore(self) -> None: ...
def __enter__(self): ...
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: ...
def push_group(self) -> None: ...
def push_group_with_content(self, content: Any) -> None: ...
def pop_group(self): ...
def pop_group_to_source(self) -> None: ...
def get_group_target(self): ...
def set_source_rgba(
self, red: float, green: float, blue: float, alpha: float = ...
) -> None: ...
def set_source_rgb(self, red: float, green: float, blue: float) -> None: ...
def set_source_surface(self, surface: Any, x: int = ..., y: int = ...) -> None: ...
def set_source(self, source: Any) -> None: ...
def get_source(self): ...
def set_antialias(self, antialias: Any) -> None: ...
def get_antialias(self): ...
def set_dash(self, dashes: Any, offset: int = ...) -> None: ...
def get_dash(self): ...
def get_dash_count(self): ...
def set_fill_rule(self, fill_rule: Any) -> None: ...
def get_fill_rule(self): ...
def set_line_cap(self, line_cap: Any) -> None: ...
def get_line_cap(self): ...
def set_line_join(self, line_join: Any) -> None: ...
def get_line_join(self): ...
def set_line_width(self, width: Any) -> None: ...
def get_line_width(self): ...
def set_miter_limit(self, limit: Any) -> None: ...
def get_miter_limit(self): ...
def set_operator(self, operator: Any) -> None: ...
def get_operator(self): ...
def set_tolerance(self, tolerance: Any) -> None: ...
def get_tolerance(self): ...
def translate(self, tx: Any, ty: Any) -> None: ...
def scale(self, sx: Any, sy: Optional[Any] = ...) -> None: ...
def rotate(self, radians: Any) -> None: ...
def transform(self, matrix: Any) -> None: ...
def set_matrix(self, matrix: Any) -> None: ...
def get_matrix(self): ...
def identity_matrix(self) -> None: ...
def user_to_device(self, x: Any, y: Any): ...
def user_to_device_distance(self, dx: Any, dy: Any): ...
def device_to_user(self, x: Any, y: Any): ...
def device_to_user_distance(self, dx: Any, dy: Any): ...
def has_current_point(self): ...
def get_current_point(self): ...
def new_path(self) -> None: ...
def new_sub_path(self) -> None: ...
def move_to(self, x: Any, y: Any) -> None: ...
def rel_move_to(self, dx: Any, dy: Any) -> None: ...
def line_to(self, x: Any, y: Any) -> None: ...
def rel_line_to(self, dx: Any, dy: Any) -> None: ...
def rectangle(self, x: Any, y: Any, width: Any, height: Any) -> None: ...
def arc(self, xc: Any, yc: Any, radius: Any, angle1: Any, angle2: Any) -> None: ...
def arc_negative(self, xc: Any, yc: Any, radius: Any, angle1: Any, angle2: Any) -> None: ...
def curve_to(self, x1: Any, y1: Any, x2: Any, y2: Any, x3: Any, y3: Any) -> None: ...
def rel_curve_to(
self, dx1: Any, dy1: Any, dx2: Any, dy2: Any, dx3: Any, dy3: Any
) -> None: ...
def METHOD_NAME(self, text: Any) -> None: ...
def glyph_path(self, glyphs: Any) -> None: ...
def close_path(self) -> None: ...
def copy_path(self): ...
def copy_path_flat(self): ...
def append_path(self, path: Any) -> None: ...
def path_extents(self): ...
def paint(self) -> None: ...
def paint_with_alpha(self, alpha: Any) -> None: ...
def mask(self, pattern: Any) -> None: ...
def mask_surface(self, surface: Any, surface_x: int = ..., surface_y: int = ...) -> None: ...
def fill(self) -> None: ...
def fill_preserve(self) -> None: ...
def fill_extents(self): ...
def in_fill(self, x: Any, y: Any): ...
def stroke(self) -> None: ...
def stroke_preserve(self) -> None: ...
def stroke_extents(self): ...
def in_stroke(self, x: Any, y: Any): ...
def clip(self) -> None: ...
def clip_preserve(self) -> None: ...
def clip_extents(self): ...
def copy_clip_rectangle_list(self): ...
def in_clip(self, x: Any, y: Any): ...
def reset_clip(self) -> None: ...
def select_font_face(
self, family: str = ..., slant: Any = ..., weight: Any = ...
) -> None: ...
def set_font_face(self, font_face: Any) -> None: ...
def get_font_face(self): ...
def set_font_size(self, size: Any) -> None: ...
def set_font_matrix(self, matrix: Any) -> None: ...
def get_font_matrix(self): ...
def set_font_options(self, font_options: Any) -> None: ...
def get_font_options(self): ...
def set_scaled_font(self, scaled_font: Any) -> None: ...
def get_scaled_font(self): ...
def font_extents(self): ...
def text_extents(self, text: Any): ...
def glyph_extents(self, glyphs: Any): ...
def show_text(self, text: Any) -> None: ...
def show_glyphs(self, glyphs: Any) -> None: ...
def show_text_glyphs(
self, text: Any, glyphs: Any, clusters: Any, cluster_flags: int = ...
) -> None: ...
def show_page(self) -> None: ...
def copy_page(self) -> None: ...
def tag_begin(self, tag_name: Any, attributes: Optional[Any] = ...) -> None: ...
def tag_end(self, tag_name: Any) -> None: ... |
5,742 | put | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for filesystemio."""
# pytype: skip-file
import io
import logging
import multiprocessing
import os
import threading
import unittest
from apache_beam.io import filesystemio
_LOGGER = logging.getLogger(__name__)
class FakeDownloader(filesystemio.Downloader):
def __init__(self, data):
self._data = data
self.last_read_size = -1
@property
def size(self):
return len(self._data)
def get_range(self, start, end):
self.last_read_size = end - start
return self._data[start:end]
class FakeUploader(filesystemio.Uploader):
def __init__(self):
self.data = b''
self.last_write_size = -1
self.finished = False
def last_error(self):
return None
def METHOD_NAME(self, data):
assert not self.finished
self.data += data.tobytes()
self.last_write_size = len(data)
def finish(self):
self.finished = True
class TestDownloaderStream(unittest.TestCase):
def test_file_attributes(self):
downloader = FakeDownloader(data=None)
stream = filesystemio.DownloaderStream(downloader)
self.assertEqual(stream.mode, 'rb')
self.assertTrue(stream.readable())
self.assertFalse(stream.writable())
self.assertTrue(stream.seekable())
def test_read_empty(self):
downloader = FakeDownloader(data=b'')
stream = filesystemio.DownloaderStream(downloader)
self.assertEqual(stream.read(), b'')
def test_read(self):
data = b'abcde'
downloader = FakeDownloader(data)
stream = filesystemio.DownloaderStream(downloader)
# Read size is exactly what was passed to read() (unbuffered).
self.assertEqual(stream.read(1), data[0:1])
self.assertEqual(downloader.last_read_size, 1)
self.assertEqual(stream.read(), data[1:])
self.assertEqual(downloader.last_read_size, len(data) - 1)
def test_read_buffered(self):
data = b'abcde'
downloader = FakeDownloader(data)
buffer_size = 2
stream = io.BufferedReader(
filesystemio.DownloaderStream(downloader), buffer_size)
# Verify that buffering works and is reading ahead.
self.assertEqual(stream.read(1), data[0:1])
self.assertEqual(downloader.last_read_size, buffer_size)
self.assertEqual(stream.read(), data[1:])
class TestUploaderStream(unittest.TestCase):
def test_file_attributes(self):
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
self.assertEqual(stream.mode, 'wb')
self.assertFalse(stream.readable())
self.assertTrue(stream.writable())
self.assertFalse(stream.seekable())
def test_write_empty(self):
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
data = b''
stream.write(memoryview(data))
self.assertEqual(uploader.data, data)
def test_write(self):
data = b'abcde'
uploader = FakeUploader()
stream = filesystemio.UploaderStream(uploader)
# Unbuffered writes.
stream.write(memoryview(data[0:1]))
self.assertEqual(uploader.data[0], data[0])
self.assertEqual(uploader.last_write_size, 1)
stream.write(memoryview(data[1:]))
self.assertEqual(uploader.data, data)
self.assertEqual(uploader.last_write_size, len(data) - 1)
def test_write_buffered(self):
data = b'abcde'
uploader = FakeUploader()
buffer_size = 2
stream = io.BufferedWriter(
filesystemio.UploaderStream(uploader), buffer_size)
# Verify that buffering works: doesn't write to uploader until buffer is
# filled.
stream.write(data[0:1])
self.assertEqual(-1, uploader.last_write_size)
stream.write(data[1:])
stream.close()
self.assertEqual(data, uploader.data)
class TestPipeStream(unittest.TestCase):
def _read_and_verify(self, stream, expected, buffer_size, success):
data_list = []
bytes_read = 0
seen_last_block = False
while True:
data = stream.read(buffer_size)
self.assertLessEqual(len(data), buffer_size)
if len(data) < buffer_size:
# Test the constraint that the pipe stream returns less than the buffer
# size only when at the end of the stream.
if data:
self.assertFalse(seen_last_block)
seen_last_block = True
if not data:
break
data_list.append(data)
bytes_read += len(data)
self.assertEqual(stream.tell(), bytes_read)
self.assertEqual(b''.join(data_list), expected)
success[0] = True
def _read_and_seek(self, stream, expected, buffer_size, success):
data_list = []
bytes_read = 0
while True:
data = stream.read(buffer_size)
# Test bad seek positions.
with self.assertRaises(NotImplementedError):
stream.seek(bytes_read + 1)
with self.assertRaises(NotImplementedError):
stream.seek(bytes_read - 1)
# Rewind stream and test that it reads back the same data again.
stream.seek(bytes_read)
data2 = stream.read(buffer_size)
self.assertEqual(data, data2)
if not data:
break
data_list.append(data)
bytes_read += len(data)
self.assertEqual(stream.tell(), bytes_read)
self.assertEqual(len(b''.join(data_list)), len(expected))
self.assertEqual(b''.join(data_list), expected)
success[0] = True
def test_pipe_stream(self):
block_sizes = list(4**i for i in range(0, 12))
data_blocks = list(os.urandom(size) for size in block_sizes)
expected = b''.join(data_blocks)
buffer_sizes = [100001, 512 * 1024, 1024 * 1024]
for buffer_size in buffer_sizes:
for target in [self._read_and_verify, self._read_and_seek]:
_LOGGER.info('buffer_size=%s, target=%s' % (buffer_size, target))
parent_conn, child_conn = multiprocessing.Pipe()
stream = filesystemio.PipeStream(child_conn)
success = [False]
child_thread = threading.Thread(
target=target, args=(stream, expected, buffer_size, success))
child_thread.start()
for data in data_blocks:
parent_conn.send_bytes(data)
parent_conn.close()
child_thread.join()
self.assertTrue(success[0], 'error in test thread')
def test_pipe_stream_rewind_buffer(self):
buffer_size = 512
data = os.urandom(buffer_size)
parent_conn, child_conn = multiprocessing.Pipe()
parent_conn.send_bytes(data)
parent_conn.close()
stream = filesystemio.PipeStream(child_conn)
# Regular read.
read_data = stream.read(buffer_size)
self.assertEqual(data, read_data)
# Rewind buffer_size bytes.
stream.seek(0)
read_data = stream.read(buffer_size)
self.assertEqual(data, read_data)
# Read 0 bytes. Rewind buffer still points to offset 0.
read_data = stream.read(buffer_size)
self.assertFalse(read_data)
stream.seek(0)
read_data = stream.read(buffer_size)
self.assertEqual(data, read_data)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main() |
5,743 | handler | #
# SPDX-FileCopyrightText:
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""This is a helper module for distributed training.
The code uses an official implementation of
distributed data parallel launcher as just a reference.
https://github.com/pytorch/pytorch/blob/v1.8.2/torch/distributed/launch.py
One main difference is this code focuses on
launching simple function with given arguments.
"""
import multiprocessing
import os
import signal
import socket
import time
if hasattr(signal, "valid_signals"):
_signalno_name_map = {
s.value: s.name for s in signal.valid_signals() if isinstance(s, signal.Signals)
}
else:
# TODO(lazykyama): It should be deprecated
# once Python 3.7 is removed from supported platform.
_signalno_name_map = dict(
[
(1, "SIGHUP"),
(2, "SIGINT"),
(3, "SIGQUIT"),
(4, "SIGILL"),
(5, "SIGTRAP"),
(6, "SIGABRT"),
(7, "SIGBUS"),
(8, "SIGFPE"),
(9, "SIGKILL"),
(10, "SIGUSR1"),
(11, "SIGSEGV"),
(12, "SIGUSR2"),
(13, "SIGPIPE"),
(14, "SIGALRM"),
(15, "SIGTERM"),
(17, "SIGCHLD"),
(18, "SIGCONT"),
(19, "SIGSTOP"),
(20, "SIGTSTP"),
(21, "SIGTTIN"),
(22, "SIGTTOU"),
(23, "SIGURG"),
(24, "SIGXCPU"),
(25, "SIGXFSZ"),
(26, "SIGVTALRM"),
(27, "SIGPROF"),
(28, "SIGWINCH"),
(29, "SIGIO"),
(30, "SIGPWR"),
(31, "SIGSYS"),
(34, "SIGRTMIN"),
(64, "SIGRTMAX"),
]
)
class WorkerError(multiprocessing.ProcessError):
"""An error happened within each worker."""
def __init__(self, *, msg, exitcode, worker_id):
"""Initialize error class."""
super(WorkerError, self).__init__(msg)
self._exitcode = exitcode
self._worker_id = worker_id
def __str__(self):
"""Construct and return a special error message."""
return f"worker[{self._worker_id}] failed with exitcode={self._exitcode}"
@property
def exitcode(self):
"""Return exitcode from worker process."""
return self._exitcode
@property
def worker_id(self):
"""Return worker ID related to a process causes this error."""
return self._worker_id
class MainProcessError(multiprocessing.ProcessError):
"""An error happened from main process."""
def __init__(self, *, signal_no):
"""Initialize error class."""
msg = (
f"{_signalno_name_map[signal_no]} received, "
f"exiting due to {signal.strsignal(signal_no)}."
)
super(MainProcessError, self).__init__(msg)
self._signal_no = signal_no
self._msg = msg
def __str__(self):
"""Return a custom error message."""
return self._msg
@property
def signal_no(self):
"""Return signal number which stops main process."""
return self._signal_no
def set_start_method(method):
"""Set multiprocess start method."""
assert method in ("fork", "spawn", "forkserver")
return multiprocessing.set_start_method(method)
def free_port():
"""Find free port using bind().
There are some interval between finding this port and using it
and the other process might catch the port by that time.
Thus it is not guaranteed that the port is really empty.
"""
# This method is copied from ESPnet v2's utility below.
# https://github.com/espnet/espnet/blob/43ce0c69fb32961235534b348700dc6c74ad5792/espnet2/train/distributed_utils.py#L187-L198
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.bind(("", 0))
return sock.getsockname()[1]
def _kill_processes(processes):
# TODO(lazykyama): This implementation can't stop all processes
# which have grandchildren processes launched
# within each child process directly forked from this script.
# Need improvement for more safe termination.
for p in processes:
try:
# NOTE: multiprocessing.Process.kill() was introduced in 3.7.
# https://docs.python.org/3.7/library/multiprocessing.html#multiprocessing.Process.kill
if not hasattr(p, "kill"):
p.terminate()
else:
p.kill()
except Exception: # noqa: E722
# NOTE: Ignore any exception happens during killing a process
# because this intends to send kill signal to *all* processes.
pass
def launch(func, args, nprocs, master_addr="localhost", master_port=None):
"""Launch processes with a given function and given arguments.
.. note:: Current implementaiton supports only single node case.
"""
if master_port is None:
master_port = free_port()
# Set PyTorch distributed related environmental variables
# NOTE: in contrast to subprocess.Popen,
# explicit environment variables can not be specified.
# It's necessary to add additional variables to
# current environment variable list.
original_env = os.environ.copy()
# TODO(lazykyama): multi-node support
os.environ["WORLD_SIZE"] = str(nprocs)
os.environ["MASTER_ADDR"] = master_addr
os.environ["MASTER_PORT"] = str(master_port)
processes = []
for local_rank in range(nprocs):
# Each process's rank
# TODO(lazykyama): multi-node support
os.environ["RANK"] = str(local_rank)
os.environ["LOCAL_RANK"] = str(local_rank)
process = multiprocessing.Process(target=func, args=(args,))
process.start()
processes.append(process)
# Set signal handler to capture signals sent to main process,
# and ensure that all children processes will be terminated.
def METHOD_NAME(signal_no, _):
_kill_processes(processes)
raise MainProcessError(signal_no=signal_no)
signal.signal(signal.SIGINT, METHOD_NAME)
signal.signal(signal.SIGTERM, METHOD_NAME)
# Recovery environment variables.
os.environ.clear()
os.environ.update(original_env)
# Monitor all workers.
worker_error = None
finished_process_ids = set()
while len(processes) > len(finished_process_ids):
for localrank, p in enumerate(processes):
if p.pid in finished_process_ids:
# Skip rest of checks becuase
# this process has been already finished.
continue
if p.is_alive():
# This process is still running.
continue
elif p.exitcode == 0:
# This process properly finished.
finished_process_ids.add(p.pid)
else:
# An error happens in one process.
# Will try to terminate all other processes.
worker_error = WorkerError(
msg=(f"{func.__name__} failed with error code: {p.exitcode}"),
exitcode=p.exitcode,
worker_id=localrank,
)
break
if worker_error is not None:
# Go out of this while loop to terminate all processes.
break
time.sleep(1.0)
if worker_error is not None:
# Trying to stop all workers.
_kill_processes(processes)
raise worker_error |
5,744 | fixture strict redis | # pylint: disable=redefined-outer-name
from unittest.mock import patch, Mock
import pytest
from redis import Redis
from gsy_framework.redis_channels import AggregatorChannels
from gsy_e.gsy_e_core.redis_connections.aggregator import AggregatorHandler
from gsy_e.gsy_e_core.redis_connections.area_market import ExternalConnectionCommunicator
@pytest.fixture(scope="function", autouse=True)
def METHOD_NAME():
with patch("gsy_e.gsy_e_core.redis_connections.area_market.Redis",
spec=Redis):
yield
@pytest.fixture(scope="function", autouse=True)
def fixture_aggregator_handler():
with patch(
"gsy_e.gsy_e_core.redis_connections.area_market.AggregatorHandler",
spec=AggregatorHandler):
yield
@pytest.fixture(scope="function", name="enabled_communicator")
def fixture_enabled_communicator():
return ExternalConnectionCommunicator(is_enabled=True)
@pytest.fixture(scope="function", name="disabled_communicator")
def fixture_disabled_communicator():
return ExternalConnectionCommunicator(is_enabled=False)
class TestExternalConnectionCommunicator:
@staticmethod
def test_init(enabled_communicator, disabled_communicator):
assert not disabled_communicator.is_enabled
assert disabled_communicator.aggregator is None
assert not hasattr(disabled_communicator, "redis_db")
assert not hasattr(disabled_communicator, "channel_callback_dict")
assert enabled_communicator.is_enabled
assert enabled_communicator.aggregator is not None
assert hasattr(enabled_communicator, "redis_db")
assert hasattr(enabled_communicator, "channel_callback_dict")
@staticmethod
def test_sub_to_channel(enabled_communicator, disabled_communicator):
callback = Mock()
disabled_communicator.sub_to_channel(channel="channel", callback=callback)
assert not hasattr(disabled_communicator, "pubsub")
enabled_communicator.sub_to_channel(channel="channel", callback=callback)
enabled_communicator.pubsub.subscribe.assert_called_once_with(**{"channel": callback})
@staticmethod
def test_sub_to_multiple_channels(enabled_communicator, disabled_communicator):
callback = Mock()
disabled_communicator.sub_to_multiple_channels({"channel": callback})
assert not hasattr(disabled_communicator, "pubsub")
enabled_communicator.sub_to_multiple_channels({"channel": callback})
enabled_communicator.pubsub.subscribe.assert_called_once_with(**{"channel": callback})
@staticmethod
def test_start_communication(enabled_communicator, disabled_communicator):
disabled_communicator.start_communication()
assert not hasattr(disabled_communicator, "pubsub")
enabled_communicator.pubsub.subscribed = False
enabled_communicator.start_communication()
enabled_communicator.pubsub.run_in_thread.assert_not_called()
enabled_communicator.pubsub.subscribed = True
enabled_communicator.start_communication()
enabled_communicator.pubsub.run_in_thread.assert_called_once()
@staticmethod
def test_sub_to_aggregator(enabled_communicator, disabled_communicator):
disabled_communicator.sub_to_aggregator()
assert not hasattr(disabled_communicator, "pubsub")
channel_names = AggregatorChannels()
channel_callback_dict = {
channel_names.batch_commands:
enabled_communicator.aggregator.receive_batch_commands_callback,
channel_names.commands: enabled_communicator.aggregator.aggregator_callback
}
enabled_communicator.sub_to_aggregator()
enabled_communicator.pubsub.psubscribe.assert_called_once_with(
**channel_callback_dict
)
@staticmethod
def test_approve_aggregator_commands(enabled_communicator, disabled_communicator):
disabled_communicator.approve_aggregator_commands()
assert disabled_communicator.aggregator is None
enabled_communicator.approve_aggregator_commands()
enabled_communicator.aggregator.approve_batch_commands.assert_called_once()
@staticmethod
def test_publish_aggregator_commands_responses_events(
enabled_communicator, disabled_communicator):
disabled_communicator.publish_aggregator_commands_responses_events()
assert disabled_communicator.aggregator is None
enabled_communicator.publish_aggregator_commands_responses_events()
enabled_communicator.aggregator.publish_all_commands_responses.assert_called_once()
enabled_communicator.aggregator.publish_all_events.assert_called_once() |
5,745 | test feed updated | from datetime import datetime, timedelta
from unittest import mock
import pytest
from h.feeds import atom
def test_feed_id():
feed = atom.feed_from_annotations([], "atom_url", mock.Mock())
assert feed["id"] == "atom_url"
def test_feed_title():
feed = atom.feed_from_annotations([], mock.Mock(), mock.Mock(), title="foo")
assert feed["title"] == "foo"
def test_feed_subtitle():
feed = atom.feed_from_annotations([], mock.Mock(), mock.Mock(), subtitle="bar")
assert feed["subtitle"] == "bar"
@mock.patch("h.feeds.atom._feed_entry_from_annotation")
def test_feed_contains_entries(_feed_entry_from_annotation, factories):
"""The feed should contain an entry for each annotation."""
annotations = [
factories.Annotation(),
factories.Annotation(),
factories.Annotation(),
]
annotations_url_function = mock.Mock()
annotations_api_url_function = mock.Mock()
entries = [
"feed entry for annotation 1",
"feed entry for annotation 2",
"feed entry for annotation 3",
]
def pop(*args, **kwargs): # pylint:disable=unused-argument
return entries.pop(0)
_feed_entry_from_annotation.side_effect = pop
feed = atom.feed_from_annotations(
annotations, annotations_url_function, annotations_api_url_function
)
assert feed["entries"] == [
"feed entry for annotation 1",
"feed entry for annotation 2",
"feed entry for annotation 3",
]
def test_atom_url_link():
"""The feed should contain a link to its Atom URL."""
feed = atom.feed_from_annotations([], "atom_url", mock.Mock())
assert feed["links"][0] == {
"rel": "self",
"type": "application/atom+xml",
"href": "atom_url",
}
def test_html_url_link():
"""The feed should contain a link to its corresponding HTML page."""
feed = atom.feed_from_annotations([], mock.Mock(), mock.Mock(), html_url="html_url")
assert feed["links"][1] == {
"rel": "alternate",
"type": "text/html",
"href": "html_url",
}
@mock.patch("h.feeds.util")
def test_entry_id(util, factories):
"""The ids of feed entries should come from tag_uri_for_annotation()."""
annotation = factories.Annotation()
feed = atom.feed_from_annotations(
[annotation], "atom_url", lambda _: "annotation url"
)
util.tag_uri_for_annotation.assert_called_once()
assert feed["entries"][0]["id"] == util.tag_uri_for_annotation.return_value
@pytest.mark.parametrize(
"userid,name",
(
("acct:username@hypothes.is", "username"),
("malformed", "malformed"),
),
)
def test_entry_author(factories, userid, name):
"""The authors of entries should come from the annotation usernames."""
annotation = factories.Annotation(userid=userid)
feed = atom.feed_from_annotations(
[annotation], "atom_url", lambda _: "annotation url"
)
assert feed["entries"][0]["author"]["name"] == name
def test_entry_title(factories):
"""The titles of feed entries should come from annotation.title."""
with mock.patch(
"h.feeds.atom.presenters.AnnotationHTMLPresenter.title",
new_callable=mock.PropertyMock,
) as mock_title:
annotation = factories.Annotation()
feed = atom.feed_from_annotations(
[annotation], "atom_url", lambda _: "annotation url"
)
mock_title.assert_called_once_with()
assert feed["entries"][0]["title"] == mock_title.return_value
def test_entry_dates(factories):
annotation = factories.Annotation(
created=datetime.utcnow(), updated=datetime.utcnow() + timedelta(hours=1)
)
feed = atom.feed_from_annotations(
[annotation], "atom_url", lambda annotation: "annotation url"
)
assert feed["entries"][0]["published"] == f"utc_iso8601_return:{annotation.created}"
assert feed["entries"][0]["updated"] == f"utc_iso8601_return:{annotation.updated}"
def test_entry_content(factories):
"""The contents of entries come from annotation.description."""
with mock.patch(
"h.feeds.atom.presenters.AnnotationHTMLPresenter.description",
new_callable=mock.PropertyMock,
) as mock_description:
annotation = factories.Annotation()
feed = atom.feed_from_annotations(
[annotation], "atom_url", lambda annotation: "annotation url"
)
mock_description.assert_called_once_with()
assert feed["entries"][0]["content"] == mock_description.return_value
@mock.patch("h.feeds.util")
def test_annotation_url_links(_, factories):
"""Entries should contain links to the HTML pages for the annotations."""
annotation = factories.Annotation()
annotation_url = mock.Mock()
feed = atom.feed_from_annotations([annotation], "atom_url", annotation_url)
annotation_url.assert_called_once_with(annotation)
assert feed["entries"][0]["links"][0] == {
"rel": "alternate",
"type": "text/html",
"href": annotation_url.return_value,
}
@mock.patch("h.feeds.util")
def test_annotation_api_url_links(_, factories):
"""Entries should contain links to the JSON pages for the annotations."""
annotation = factories.Annotation()
annotation_api_url = mock.Mock()
feed = atom.feed_from_annotations(
[annotation], "atom_url", mock.Mock(), annotation_api_url=annotation_api_url
)
annotation_api_url.assert_called_once_with(annotation)
assert feed["entries"][0]["links"][1] == {
"rel": "alternate",
"type": "application/json",
"href": annotation_api_url.return_value,
}
def METHOD_NAME(factories):
annotations = factories.Annotation.build_batch(3)
annotations[0].updated = datetime.utcnow()
feed = atom.feed_from_annotations(
annotations, "atom_url", lambda annotation: "annotation url"
)
assert feed["updated"] == f"utc_iso8601_return:{annotations[0].updated}"
@pytest.fixture(autouse=True)
def utc_iso8601(patch):
utc_iso8601 = patch("h.feeds.atom.utc_iso8601")
utc_iso8601.side_effect = lambda date: f"utc_iso8601_return:{date}"
return utc_iso8601 |
5,746 | add request | import logging
import requests
import os
import json
import re
from flask_login import current_user
from .app import use_predefined_user, app, use_tests_folder, verify_ssl
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def parseDict(data, socketio=None, httpManager=None):
if (socketio or httpManager) is None:
raise ValueError("socketio and httpManager are none.")
httpManager = httpManager or HTTPManager(socketio=socketio)
for key, value in data.items():
http = HTTPRequest(key)
for val in value:
http.METHOD_NAME(*val)
httpManager.addService(http)
return httpManager
class HTTPRequest:
def __init__(self, url):
self.url = url
self.requestList = {}
def METHOD_NAME(self, key, url, method="get", beforeFunction=None, afterFunction=None, clear=False):
"""This method adds a request to sciebo RDS.
Args:
key ([type]): [description]
url ([type]): [description]
method (str, optional): [description]. Defaults to "get".
beforeFunction ([type], optional): [description]. Defaults to None.
afterFunction ([type], optional): Function gets response. But not if the name starts with "refresh". Defaults to None.
clear (bool, optional): True, if the functions should get the response object itself, instead of already json unserialized object. Defaults to False.
"""
self.requestList[key] = {
"url": url,
"method": method,
"before": beforeFunction,
"after": afterFunction,
"giveResponseObject": clear
}
def makeRequest(self, key, data=None):
if data is None:
data = {}
if isinstance(data, str):
data = json.loads(data)
reqConf = self.requestList[key]
if reqConf["before"] is not None:
try:
data = reqConf["before"](data)
except:
pass
if use_predefined_user:
data["userId"] = os.getenv("DEV_FLASK_USERID")
else:
data["userId"] = current_user.userId
data["url"] = self.url
app.logger.debug(
"key: {}, data: {}, req: {}".format(key, data, reqConf))
sendEmptyData = False
group = re.findall(r"{\w*}", reqConf["url"])
app.logger.debug("url: {}, found groups: {}, len groups: {}, len data: {}, equal: {}".format(
reqConf["url"], group, len(group), len(
data), len(group) == len(data)
))
if len(group) == len(data):
sendEmptyData = True
url = reqConf["url"].format(**data)
app.logger.debug(f"empty data: {sendEmptyData}")
parameters = {
"verify": verify_ssl
}
if not sendEmptyData:
parameters["json"] = data
app.logger.debug("request url: {}".format(url))
if use_tests_folder:
req = AttrDict({
"text": open("dumps/{}.json".format(url.split("{}/".format(os.getenv("RDS_INSTALLATION_DOMAIN")))[-1])).read(),
"status_code": 200,
})
else:
req = getattr(requests, reqConf["method"])(
url, **parameters
)
response = req.text
app.logger.debug(
"status_code: {}, content: {}".format(req.status_code, response))
if req.status_code >= 300:
return None
if reqConf["after"] is not None:
if reqConf["after"].__name__.startswith("refresh"):
try:
reqConf["after"]()
except:
pass
else:
try:
data = json.loads(
response) if not reqConf["giveResponseObject"] else req
response = json.dumps(
reqConf["after"](data))
except:
pass
return response
class HTTPManager:
def __init__(self, socketio):
self.services = []
self.socketio = socketio
def addService(self, service: HTTPRequest):
if not isinstance(service, HTTPRequest):
raise ValueError
self.services.append(service)
for key in service.requestList.keys():
def outerFn(key):
def reqFn(*args):
try:
return service.makeRequest(key, *args)
except Exception as e:
app.logger.error(
"make request error: {}".format(e), exc_info=True)
return reqFn
self.socketio.on_event(key, outerFn(key))
def makeRequest(self, *args, **kwargs):
for service in self.services:
try:
return service.makeRequest(*args, **kwargs)
except Exception as e:
app.logger.error(e, exc_info=True)
raise ValueError("no service implements the given url.") |
5,747 | get file list | ## @file
# section base class
#
# Copyright (c) 2007-2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from CommonDataClass.FdfClass import SectionClassObject
from .GenFdsGlobalVariable import GenFdsGlobalVariable
import Common.LongFilePathOs as os, glob
from Common import EdkLogger
from Common.BuildToolError import *
from Common.DataType import *
## section base class
#
#
class Section (SectionClassObject):
SectionType = {
'RAW' : 'EFI_SECTION_RAW',
'FREEFORM' : 'EFI_SECTION_FREEFORM_SUBTYPE_GUID',
BINARY_FILE_TYPE_PE32 : 'EFI_SECTION_PE32',
BINARY_FILE_TYPE_PIC : 'EFI_SECTION_PIC',
BINARY_FILE_TYPE_TE : 'EFI_SECTION_TE',
'FV_IMAGE' : 'EFI_SECTION_FIRMWARE_VOLUME_IMAGE',
'COMPAT16' : 'EFI_SECTION_COMPATIBILITY16',
BINARY_FILE_TYPE_DXE_DEPEX : 'EFI_SECTION_DXE_DEPEX',
BINARY_FILE_TYPE_PEI_DEPEX : 'EFI_SECTION_PEI_DEPEX',
'GUIDED' : 'EFI_SECTION_GUID_DEFINED',
'COMPRESS' : 'EFI_SECTION_COMPRESSION',
BINARY_FILE_TYPE_UI : 'EFI_SECTION_USER_INTERFACE',
BINARY_FILE_TYPE_SMM_DEPEX : 'EFI_SECTION_SMM_DEPEX'
}
BinFileType = {
BINARY_FILE_TYPE_GUID : '.guid',
'ACPI' : '.acpi',
'ASL' : '.asl' ,
BINARY_FILE_TYPE_UEFI_APP : '.app',
BINARY_FILE_TYPE_LIB : '.lib',
BINARY_FILE_TYPE_PE32 : '.pe32',
BINARY_FILE_TYPE_PIC : '.pic',
BINARY_FILE_TYPE_PEI_DEPEX : '.depex',
'SEC_PEI_DEPEX' : '.depex',
BINARY_FILE_TYPE_TE : '.te',
BINARY_FILE_TYPE_UNI_VER : '.ver',
BINARY_FILE_TYPE_VER : '.ver',
BINARY_FILE_TYPE_UNI_UI : '.ui',
BINARY_FILE_TYPE_UI : '.ui',
BINARY_FILE_TYPE_BIN : '.bin',
'RAW' : '.raw',
'COMPAT16' : '.comp16',
BINARY_FILE_TYPE_FV : '.fv'
}
SectFileType = {
'SEC_GUID' : '.sec' ,
'SEC_PE32' : '.sec' ,
'SEC_PIC' : '.sec',
'SEC_TE' : '.sec',
'SEC_VER' : '.sec',
'SEC_UI' : '.sec',
'SEC_COMPAT16' : '.sec',
'SEC_BIN' : '.sec'
}
ToolGuid = {
'0xa31280ad-0x481e-0x41b6-0x95e8-0x127f-0x4c984779' : 'TianoCompress',
'0xee4e5898-0x3914-0x4259-0x9d6e-0xdc7b-0xd79403cf' : 'LzmaCompress'
}
## The constructor
#
# @param self The object pointer
#
def __init__(self):
SectionClassObject.__init__(self)
## GenSection() method
#
# virtual function
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
#
def GenSection(self, OutputPath, GuidName, SecNum, keyStringList, FfsInf = None, Dict = None):
pass
## GetFileList() method
#
# Generate compressed section
#
# @param self The object pointer
# @param FfsInf FfsInfStatement object that contains file list
# @param FileType File type to get
# @param FileExtension File extension to get
# @param Dict dictionary contains macro and its value
# @retval tuple (File list, boolean)
#
def METHOD_NAME(FfsInf, FileType, FileExtension, Dict = None, IsMakefile=False, SectionType=None):
IsSect = FileType in Section.SectFileType
if FileExtension is not None:
Suffix = FileExtension
elif IsSect :
Suffix = Section.SectionType.get(FileType)
else:
Suffix = Section.BinFileType.get(FileType)
if FfsInf is None:
EdkLogger.error("GenFds", GENFDS_ERROR, 'Inf File does not exist!')
FileList = []
if FileType is not None:
for File in FfsInf.BinFileList:
if File.Arch == TAB_ARCH_COMMON or FfsInf.CurrentArch == File.Arch:
if File.Type == FileType or (int(FfsInf.PiSpecVersion, 16) >= 0x0001000A \
and FileType == 'DXE_DPEX' and File.Type == BINARY_FILE_TYPE_SMM_DEPEX) \
or (FileType == BINARY_FILE_TYPE_TE and File.Type == BINARY_FILE_TYPE_PE32):
if TAB_STAR in FfsInf.TargetOverrideList or File.Target == TAB_STAR or File.Target in FfsInf.TargetOverrideList or FfsInf.TargetOverrideList == []:
FileList.append(FfsInf.PatchEfiFile(File.Path, File.Type))
else:
GenFdsGlobalVariable.InfLogger ("\nBuild Target \'%s\' of File %s is not in the Scope of %s specified by INF %s in FDF" %(File.Target, File.File, FfsInf.TargetOverrideList, FfsInf.InfFileName))
else:
GenFdsGlobalVariable.VerboseLogger ("\nFile Type \'%s\' of File %s in %s is not same with file type \'%s\' from Rule in FDF" %(File.Type, File.File, FfsInf.InfFileName, FileType))
else:
GenFdsGlobalVariable.InfLogger ("\nCurrent ARCH \'%s\' of File %s is not in the Support Arch Scope of %s specified by INF %s in FDF" %(FfsInf.CurrentArch, File.File, File.Arch, FfsInf.InfFileName))
elif FileType is None and SectionType == BINARY_FILE_TYPE_RAW:
for File in FfsInf.BinFileList:
if File.Ext == Suffix:
FileList.append(File.Path)
if (not IsMakefile and Suffix is not None and os.path.exists(FfsInf.EfiOutputPath)) or (IsMakefile and Suffix is not None):
if not FileList:
SuffixMap = FfsInf.GetFinalTargetSuffixMap()
if Suffix in SuffixMap:
FileList.extend(SuffixMap[Suffix])
#Process the file lists is alphabetical for a same section type
if len (FileList) > 1:
FileList.sort()
return FileList, IsSect
METHOD_NAME = staticmethod(METHOD_NAME) |
5,748 | get rest api url | # -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge-iot.readthedocs.io/
# FLEDGE_END
"""Command line JSON parsing utility Class"""
import sys
import json
"""
module name: json_parse.py
This module reads JSON data from STDIN an parse it with argv[1] method
using optional match argv[2]
It prints the requested JSON value.
In case of errors it prints the exception and returns 1 to the caller
Current implemented methods:
- get_rest_api_url() return the REST API url of Fledge
- get_category_item_default() returns the default value of a Fledge category name
- get_category_item_value() returns the value of a Fledge category name
- get_category_key() returns the match for a given category name
- get_config_item_value() returns the configuration item value of a Fledge category name
- get_schedule_id() returns the scheduled_id of a given schedule name
- get_current_schedule_id() returns the scheduled_id of new created schedule name
Usage:
$ echo $JSON_DATA | python3 -m json_parse $method_name $name
"""
__author__ = "Massimiliano Pinto, Ashish Jabble"
__copyright__ = "Copyright (c) 2018 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
# ExtractJson Class
class ExtractJson(object):
def __init__(self, json_input, method):
self._json = json_input
self._method = method
# Set error message for raising exceptions class methods
def set_error_message(self, name, err_exc):
return "Error parsing JSON in method: {} for '{}' with exception {}:{}".format(
self._method, name, err_exc.__class__.__name__, str(err_exc))
# Return REST API URL from 'Fledge' PID file in JSON input
def get_rest_api_url_from_pid(self, unused=None):
try:
json_data = self._json['adminAPI']
scheme = json_data['protocol'].lower()
port = str(json_data['port'])
address = "127.0.0.1" if json_data['addresses'][0] == "0.0.0.0" else json_data['addresses'][0]
url = "{}://{}:{}".format(scheme, address, port)
return url
except Exception as ex:
raise Exception(self.set_error_message("Fledge PID", ex))
# Return REST API URL from 'Fledge Core' service in JSON input
def METHOD_NAME(self, unused=None):
try:
scheme = self._json['services'][0]['protocol']
port = str(self._json['services'][0]['service_port'])
address = "127.0.0.1" if self._json['services'][0]['address'] == "0.0.0.0" else \
self._json['services'][0]['address']
url = "{}://{}:{}".format(scheme, address, port)
return url
except Exception as ex:
raise Exception(self.set_error_message("Fledge Core", ex))
# Return the default value of a Fledge category item from JSON input
def get_category_item_default(self, item):
try:
# Get the specified category item name
cat_json = self._json
return str(cat_json['value'][item]['default']).replace('"', '')
except Exception as ex:
raise Exception(self.set_error_message(item, ex))
# Return the default value of a Fledge category item from JSON input
def get_category_item_value(self, item):
try:
# Get the specified category item name
cat_json = self._json
return str(cat_json['value'][item]['value']).replace('"', '')
except Exception as ex:
raise Exception(self.set_error_message(item, ex))
# Return the value of a Fledge category name from JSON input
def get_category_key(self, key):
try:
# Get the specified category name
cat_json = self._json
# If no match return empty string
if cat_json['key'] == key:
return str(cat_json['key']).replace('"', '')
else:
return str("")
except KeyError as er:
raise Exception(self.set_error_message(key, er))
except Exception as ex:
raise Exception(self.set_error_message(key, ex))
# Return the value of configuration item of a Fledge category name
def get_config_item_value(self, item):
try:
# Get the specified JSON
cat_json = self._json
return str(cat_json[item]['value']).replace('"', '')
except Exception as ex:
raise Exception(self.set_error_message(item, ex))
# Return the ID of a Fledge schedule name just created
def get_current_schedule_id(self, name):
try:
# Get the specified schedule name
schedule_json = self._json['schedule']
if schedule_json['name'] == name:
# Scheduler found, return the id
return str(schedule_json['id'].replace('"', ''))
else:
# Name non found, return empty string
return str("")
except Exception as ex:
raise Exception(self.set_error_message(name, ex))
# Return the ID of a Fledge schedule name from JSON input with all schedules
def get_schedule_id(self, name):
try:
# Get the specified schedule name
schedules_json = self._json
found = False
# Look for _MATCH_SCHEDULE
for schedule in schedules_json['schedules']:
if schedule['name'] == name:
# Scheduler found, return the id
found = True
return str(schedule['id'].replace('"', ''))
# Nothing has been found, return empty string
return str("")
except Exception as ex:
raise Exception(self.set_error_message(name, ex))
# Main body
if __name__ == '__main__':
try:
# Read from STDIN
read_data = sys.stdin.readline()
method_name = str(sys.argv[1])
# Instantiate the class with a JSON object from input data
json_parse = ExtractJson(json.loads(read_data), method_name)
# Build the class method to call using argv[1]
if len(sys.argv) > 2:
call_method = "json_parse." + method_name + "('" + str(sys.argv[2]) + "')"
else:
call_method = "json_parse." + method_name + "()"
try:
# Return the output
output = eval(call_method)
print(output)
except Exception as err:
print("ERROR: " + str(err))
exit(1)
# Return success
exit(0)
except AttributeError:
print("ERROR: method '" + method_name + "' not implemented yet")
# Return failure
exit(1)
except Exception as exc:
if len(sys.argv) == 1:
print("ERROR: " + str(exc))
else:
print("ERROR: '" + str(sys.argv[1]) + "', " + str(exc))
# Return failure
exit(1) |
5,749 | parties | import sys
from _typeshed import ProfileFunction, TraceFunction
from collections.abc import Callable, Iterable, Mapping
from types import TracebackType
from typing import Any, TypeVar
from typing_extensions import final
_T = TypeVar("_T")
__all__ = [
"get_ident",
"active_count",
"Condition",
"current_thread",
"enumerate",
"main_thread",
"TIMEOUT_MAX",
"Event",
"Lock",
"RLock",
"Semaphore",
"BoundedSemaphore",
"Thread",
"Barrier",
"BrokenBarrierError",
"Timer",
"ThreadError",
"setprofile",
"settrace",
"local",
"stack_size",
]
if sys.version_info >= (3, 8):
__all__ += ["ExceptHookArgs", "excepthook", "get_native_id"]
if sys.version_info >= (3, 10):
__all__ += ["getprofile", "gettrace"]
if sys.version_info >= (3, 12):
__all__ += ["setprofile_all_threads", "settrace_all_threads"]
_profile_hook: ProfileFunction | None
def active_count() -> int: ...
def activeCount() -> int: ... # deprecated alias for active_count()
def current_thread() -> Thread: ...
def currentThread() -> Thread: ... # deprecated alias for current_thread()
def get_ident() -> int: ...
def enumerate() -> list[Thread]: ...
def main_thread() -> Thread: ...
if sys.version_info >= (3, 8):
from _thread import get_native_id as get_native_id
def settrace(func: TraceFunction) -> None: ...
def setprofile(func: ProfileFunction | None) -> None: ...
if sys.version_info >= (3, 12):
def setprofile_all_threads(func: ProfileFunction | None) -> None: ...
def settrace_all_threads(func: TraceFunction) -> None: ...
if sys.version_info >= (3, 10):
def gettrace() -> TraceFunction | None: ...
def getprofile() -> ProfileFunction | None: ...
def stack_size(size: int = ...) -> int: ...
TIMEOUT_MAX: float
class ThreadError(Exception): ...
class local:
def __getattribute__(self, __name: str) -> Any: ...
def __setattr__(self, __name: str, __value: Any) -> None: ...
def __delattr__(self, __name: str) -> None: ...
class Thread:
name: str
@property
def ident(self) -> int | None: ...
daemon: bool
def __init__(
self,
group: None = None,
target: Callable[..., object] | None = None,
name: str | None = None,
args: Iterable[Any] = (),
kwargs: Mapping[str, Any] | None = None,
*,
daemon: bool | None = None,
) -> None: ...
def start(self) -> None: ...
def run(self) -> None: ...
def join(self, timeout: float | None = None) -> None: ...
if sys.version_info >= (3, 8):
@property
def native_id(self) -> int | None: ... # only available on some platforms
def is_alive(self) -> bool: ...
if sys.version_info < (3, 9):
def isAlive(self) -> bool: ...
# the following methods are all deprecated
def getName(self) -> str: ...
def setName(self, name: str) -> None: ...
def isDaemon(self) -> bool: ...
def setDaemon(self, daemonic: bool) -> None: ...
class _DummyThread(Thread):
def __init__(self) -> None: ...
@final
class Lock:
def __enter__(self) -> bool: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ...
def acquire(self, blocking: bool = ..., timeout: float = ...) -> bool: ...
def release(self) -> None: ...
def locked(self) -> bool: ...
@final
class _RLock:
def acquire(self, blocking: bool = True, timeout: float = -1) -> bool: ...
def release(self) -> None: ...
__enter__ = acquire
def __exit__(self, t: type[BaseException] | None, v: BaseException | None, tb: TracebackType | None) -> None: ...
RLock = _RLock
class Condition:
def __init__(self, lock: Lock | _RLock | None = None) -> None: ...
def __enter__(self) -> bool: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ...
def acquire(self, blocking: bool = ..., timeout: float = ...) -> bool: ...
def release(self) -> None: ...
def wait(self, timeout: float | None = None) -> bool: ...
def wait_for(self, predicate: Callable[[], _T], timeout: float | None = None) -> _T: ...
def notify(self, n: int = 1) -> None: ...
def notify_all(self) -> None: ...
def notifyAll(self) -> None: ... # deprecated alias for notify_all()
class Semaphore:
_value: int
def __init__(self, value: int = 1) -> None: ...
def __exit__(self, t: type[BaseException] | None, v: BaseException | None, tb: TracebackType | None) -> None: ...
def acquire(self, blocking: bool = True, timeout: float | None = None) -> bool: ...
def __enter__(self, blocking: bool = True, timeout: float | None = None) -> bool: ...
if sys.version_info >= (3, 9):
def release(self, n: int = 1) -> None: ...
else:
def release(self) -> None: ...
class BoundedSemaphore(Semaphore): ...
class Event:
def is_set(self) -> bool: ...
def isSet(self) -> bool: ... # deprecated alias for is_set()
def set(self) -> None: ...
def clear(self) -> None: ...
def wait(self, timeout: float | None = None) -> bool: ...
if sys.version_info >= (3, 8):
from _thread import _excepthook, _ExceptHookArgs
excepthook = _excepthook
ExceptHookArgs = _ExceptHookArgs
class Timer(Thread):
args: Iterable[Any] # undocumented
finished: Event # undocumented
function: Callable[..., Any] # undocumented
interval: float # undocumented
kwargs: Mapping[str, Any] # undocumented
def __init__(
self,
interval: float,
function: Callable[..., object],
args: Iterable[Any] | None = None,
kwargs: Mapping[str, Any] | None = None,
) -> None: ...
def cancel(self) -> None: ...
class Barrier:
@property
def METHOD_NAME(self) -> int: ...
@property
def n_waiting(self) -> int: ...
@property
def broken(self) -> bool: ...
def __init__(self, METHOD_NAME: int, action: Callable[[], None] | None = None, timeout: float | None = None) -> None: ...
def wait(self, timeout: float | None = None) -> int: ...
def reset(self) -> None: ...
def abort(self) -> None: ...
class BrokenBarrierError(RuntimeError): ... |
5,750 | add mtz column |
from __future__ import absolute_import, division, print_function
import wxtbx.app
from libtbx.utils import Sorry
import wx
import sys
columns = [ "Order", "Label", "#valid", "%valid", "min", "max", "type", ]
column_widths = [ 60, 100, 80, 80, 100, 100, 100 ]
aln_flags = wx.ALL|wx.ALIGN_CENTER_VERTICAL
class MtzInspectionFrame(wx.Frame):
def __init__(self, *args, **kwds):
wx.Frame.__init__(self, *args, **kwds)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.sizer)
self.panel = MtzContentsPanel(self)
self.sizer.Add(self.panel, 1, wx.ALL|wx.EXPAND, 0)
self.sizer.Fit(self.panel)
self.Fit()
def SetMtzFile(self, *args, **kwds):
self.panel.SetMtzFile(*args, **kwds)
self.sizer.Fit(self.panel)
self.Fit()
def OnOpen(self, event):
from wxtbx import path_dialogs
file_name = path_dialogs.manager().select_file(
parent=self,
message="Choose an MTZ file to view",
wildcard="MTZ files (*.mtz)|*.mtz")
if (file_name is not None):
self.SetMtzFile(file_name)
class MtzContentsPanel(wx.Panel):
def __init__(self, *args, **kwds):
wx.Panel.__init__(self, *args, **kwds)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.sizer)
grid1 = wx.FlexGridSizer(cols=2)
lbl1 = wx.StaticText(self, label="Title:")
self.title_txt = wx.StaticText(self, label="", size=(300,-1))
lbl2 = wx.StaticText(self, label="Space group:")
self.sg_txt = wx.StaticText(self, label="", size=(300,-1))
lbl3 = wx.StaticText(self, label="Resolution:")
self.d_max_min_txt = wx.StaticText(self, label="", size=(300,-1))
lbl4 = wx.StaticText(self, label="Select dataset:")
self.dataset_chooser = wx.Choice(self, size=(400,-1))
self.Bind(wx.EVT_CHOICE, self.OnChooseDataset, self.dataset_chooser)
grid1.Add(lbl1, 0, aln_flags, 5)
grid1.Add(self.title_txt, 0, aln_flags, 5)
grid1.Add(lbl2, 0, aln_flags, 5)
grid1.Add(self.sg_txt, 0, aln_flags, 5)
grid1.Add(lbl3, 0, aln_flags, 5)
grid1.Add(self.d_max_min_txt, 0, aln_flags, 5)
grid1.Add(lbl4, 0, aln_flags, 5)
grid1.Add(self.dataset_chooser, 0, aln_flags, 5)
self.sizer.Add(grid1, 0, wx.ALL|wx.EXPAND)
self._dataset_panels = []
self._crystals_and_datasets = []
self._dataset_labels = []
self._mtz_obj = None
def SetMtzFile(self, file_name):
from iotbx import mtz
try :
self._mtz_obj = mtz.object(file_name=file_name)
except RuntimeError as e :
raise Sorry(("The file '%s' could not be read as an MTZ file "+
"(original error: %s)") % (file_name, str(e)))
self.title_txt.SetLabel(self._mtz_obj.title())
self.sg_txt.SetLabel(str(self._mtz_obj.space_group_name()))
self.d_max_min_txt.SetLabel("%g - %g Angstrom" %
self._mtz_obj.max_min_resolution())
self._dataset_labels = []
self._crystals_and_datasets = []
for i_crystal, crystal in enumerate(self._mtz_obj.crystals()):
if (crystal.name() == "HKL_base"):
continue
for i_dataset, dataset in enumerate(crystal.datasets()):
label = "/%s/%s" % (crystal.name(), dataset.name())
self._crystals_and_datasets.append((crystal, dataset))
self._dataset_labels.append(label)
self.dataset_chooser.SetItems(self._dataset_labels)
p = MtzDatasetPanel(self, style=wx.RAISED_BORDER)
self._dataset_panels.append(p)
self.sizer.Add(p, 1, wx.ALL|wx.EXPAND, 0)
if (len(self._dataset_labels) > 0):
self.dataset_chooser.SetSelection(0)
self.OnChooseDataset(None)
def OnChooseDataset(self, event):
if (len(self._dataset_panels) == 0) : return
sel = self.dataset_chooser.GetSelection()
crystal, dataset = self._crystals_and_datasets[sel]
p = self._dataset_panels[0]
p.SetMtzDataset(
crystal=crystal,
dataset=dataset,
n_refl=self._mtz_obj.n_reflections())
self.Layout()
self.sizer.Fit(self)
self.Fit()
class MtzDatasetPanel(wx.Panel):
def __init__(self, *args, **kwds):
wx.Panel.__init__(self, *args, **kwds)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.sizer)
grid = wx.FlexGridSizer(cols=2)
lbl1 = wx.StaticText(self, label="Unit cell:")
self.uc_txt = wx.StaticText(self, label="", size=(300,-1))
lbl2 = wx.StaticText(self, label="Wavelength:")
self.wl_txt = wx.StaticText(self, label="", size=(300,-1))
grid.Add(lbl1, 0, aln_flags, 5)
grid.Add(self.uc_txt, 0, aln_flags, 5)
grid.Add(lbl2, 0, aln_flags, 5)
grid.Add(self.wl_txt, 0, aln_flags, 5)
self.sizer.Add(grid, 0, wx.ALL|wx.EXPAND)
self.lc = MtzColumnList(self, -1, size=(720,240), style=wx.LC_REPORT)
self.sizer.Add(self.lc, 1, wx.ALL|wx.EXPAND, 5)
#self.sizer.Fit(self)
self.Fit()
def SetMtzDataset(self, crystal, dataset, n_refl):
self.lc.DeleteAllItems()
self.lc.SetNReflections(n_refl)
self.uc_txt.SetLabel("%g %g %g %g %g %g" % crystal.unit_cell().parameters())
self.wl_txt.SetLabel("%g" % dataset.wavelength())
self.lc.AddMtzDataset(dataset)
self.Layout()
self.Refresh()
class MtzColumnList(wx.ListCtrl):
def __init__(self, *args, **kwds):
style = kwds.get('style', 0)
if (not style & wx.LC_REPORT):
style &= wx.LC_REPORT
kwds['style'] = style
wx.ListCtrl.__init__(self, *args, **kwds)
self.n_refl = None
for i, label in enumerate(columns):
self.InsertColumn(i, label)
self.SetColumnWidth(i, column_widths[i])
def SetNReflections(self, n_refl):
self.n_refl = n_refl
def METHOD_NAME(self, fields):
assert (len(fields) == len(columns))
n = self.GetItemCount() + 1
item = self.InsertStringItem(sys.maxunicode, str(n))
for i, field in enumerate(fields[:-2]):
self.SetStringItem(item, i+1, field)
self.SetStringItem(item, len(fields)-1, "%s %s" % (fields[-2], fields[-1]))
def AddMtzDataset(self, dataset):
assert (self.n_refl is not None)
for i_col, column in enumerate(dataset.columns()):
fields = column.format_fields_for_mtz_dump(self.n_refl)
self.METHOD_NAME(fields)
if (__name__ == "__main__"):
app = wxtbx.app.CCTBXApp(0)
frame = MtzInspectionFrame(None, title="Inspect MTZ file contents")
if (len(sys.argv) == 0):
frame.OnOpen(None)
else :
frame.SetMtzFile(sys.argv[1])
frame.Show()
app.MainLoop() |
5,751 | get k8s resource status | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import base64
import functools
import json
import time
from django.conf import settings
from django.utils import timezone
from rest_framework.response import Response
from backend.components import paas_cc
from backend.templatesets.legacy_apps.configuration.constants import K8sResourceName
from backend.templatesets.legacy_apps.instance import constants as instance_constants
from backend.templatesets.legacy_apps.instance.models import InstanceConfig
from backend.utils.basic import getitems
from backend.utils.errcodes import ErrorCode
from backend.utils.error_codes import error_codes
from . import constants
STAG_ENV = 2
PROD_ENV = 1
class APIResponse(Response):
def __init__(self, data, *args, **kwargs):
data.setdefault('code', 0)
data.setdefault('message', '')
return super(APIResponse, self).__init__(data, *args, **kwargs)
def image_handler(image):
"""处理镜像,只展示用户填写的一部分"""
for env in constants.SPLIT_IMAGE:
info_split = image.split("/")
if env in info_split:
image = "/" + "/".join(info_split[info_split.index(env) :])
break
return image
def get_k8s_desired_ready_instance_count(info, resource_name):
"""获取应用期望/正常的实例数量"""
filter_keys = constants.RESOURCE_REPLICAS_KEYS[resource_name]
# 针对不同的模板获取不同key对应的值
ready_replicas = getitems(info, filter_keys['ready_replicas_keys'], default=0)
desired_replicas = getitems(info, filter_keys['desired_replicas_keys'], default=0)
return desired_replicas, ready_replicas
def cluster_env(env, ret_num_flag=True):
"""集群环境匹配"""
all_env = settings.CLUSTER_ENV_FOR_FRONT
front_env = all_env.get(env)
if ret_num_flag:
if front_env == "stag":
return STAG_ENV
else:
return PROD_ENV
else:
return front_env
def get_project_namespaces(access_token, project_id):
ns_resp = paas_cc.get_namespace_list(access_token, project_id, desire_all_data=True)
if ns_resp.get('code') != ErrorCode.NoError:
raise error_codes.APIError(ns_resp.get('message'))
data = ns_resp.get('data') or {}
return data.get('results') or []
def get_namespace_name_map(access_token, project_id):
project_ns_info = get_project_namespaces(access_token, project_id)
return {ns['name']: ns for ns in project_ns_info}
def base64_encode_params(info):
"""base64编码"""
json_extra = bytes(json.dumps(info), 'utf-8')
return base64.b64encode(json_extra)
def METHOD_NAME(resource_kind, resource, replicas, available):
"""获取资源(deployment/sts/job/ds)运行状态"""
status = constants.ResourceStatus.Unready.value
# 期望的数量和可用的数量都为0时,认为也是正常的
if (available == replicas and available > 0) or (available == replicas == 0):
status = constants.ResourceStatus.Running.value
# 针对job添加complete状态的判断
if resource_kind == constants.REVERSE_CATEGORY_MAP[K8sResourceName.K8sJob.value]:
# 获取completed的replica的数量
completed_replicas = getitems(resource, ['data', 'spec', 'completions'], default=0)
if completed_replicas == replicas and available > 0:
status = constants.ResourceStatus.Completed.value
return status
def delete_instance_records(online_instances, local_instances):
diff_insts = set(local_instances) - set(online_instances.keys())
instance_id_list = [local_instances[key].get('id') for key in diff_insts]
InstanceConfig.objects.filter(id__in=instance_id_list).exclude(oper_type=constants.REBUILD_INSTANCE).update(
is_deleted=True, deleted_time=timezone.now()
)
def get_instance_version_name(annotations, labels):
name_key = instance_constants.ANNOTATIONS_VERSION
return annotations.get(name_key) or labels.get(name_key)
def get_instance_version_id(annotations, labels):
id_key = instance_constants.ANNOTATIONS_VERSION_ID
return annotations.get(id_key) or labels.get(id_key)
def get_instance_version(annotations, labels):
name = get_instance_version_name(annotations, labels)
id = get_instance_version_id(annotations, labels)
return {'version': name, 'version_id': id}
def retry_requests(func, params=None, data=None, max_retries=2):
"""查询应用信息
因为现在通过接口以storage为数据源,因此,为防止接口失败或者接口为空的情况,增加请求次数
"""
for i in range(1, max_retries + 1):
try:
resp = func(params) if params else func(**data)
if i == max_retries:
return resp
# 如果为data为空时,code肯定不为0
if not resp.get("data"):
time.sleep(0.5)
continue
return resp
except Exception:
# 设置等待时间
time.sleep(0.5)
raise error_codes.APIError("query storage api error")
def exclude_records(
cluster_id_from_params: str,
cluster_id_from_instance: str,
cluster_type_from_params: str,
cluster_type_from_instance: str,
) -> bool:
"""判断是否排除记录
:param cluster_id_from_params: 请求参数中的集群 ID,用以过滤集群下的资源
:param cluster_id_from_instance: 实例中携带的集群 ID
:param cluster_type_from_params: 请求参数中的集群环境,包含正式环境和测试环境
:param cluster_type_from_instance: 实例中的集群环境类型
:returns: 返回True/False, 其中 True标识可以排除记录
"""
if not cluster_id_from_instance:
return True
if cluster_id_from_params:
if cluster_id_from_instance != cluster_id_from_params:
return True
elif str(cluster_type_from_params) != str(cluster_type_from_instance):
return True
return False |
5,752 | job template | # Python
import pytest
from unittest import mock
import json
# AWX
from awx.api.serializers import JobSerializer, JobOptionsSerializer
from awx.main.models import (
Label,
Job,
JobEvent,
ProjectUpdateEvent,
)
def mock_JT_resource_data():
return {}
@pytest.fixture
def METHOD_NAME(mocker):
mock_jt = mocker.MagicMock(pk=5)
mock_jt.validation_errors = mock_JT_resource_data
return mock_jt
@pytest.fixture
def project_update(mocker):
mock_pu = mocker.MagicMock(pk=1)
return mock_pu
@pytest.fixture
def job(mocker, METHOD_NAME, project_update):
return mocker.MagicMock(pk=5, METHOD_NAME=METHOD_NAME, project_update=project_update, workflow_job_id=None, execution_environment_id=None)
@pytest.fixture
def labels(mocker):
return [Label(id=x, name='label-%d' % x) for x in range(0, 25)]
@pytest.fixture
def jobs(mocker):
return [Job(id=x, name='job-%d' % x) for x in range(0, 25)]
@mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x, y: {})
@mock.patch('awx.api.serializers.JobOptionsSerializer.get_related', lambda x, y: {})
class TestJobSerializerGetRelated:
@pytest.mark.parametrize(
"related_resource_name",
[
'job_events',
'relaunch',
'labels',
],
)
def test_get_related(self, test_get_related, job, related_resource_name):
test_get_related(JobSerializer, job, 'jobs', related_resource_name)
def test_job_template_absent(self, job):
job.METHOD_NAME = None
serializer = JobSerializer()
related = serializer.get_related(job)
assert 'job_template' not in related
def test_job_template_present(self, get_related_mock_and_run, job):
related = get_related_mock_and_run(JobSerializer, job)
assert 'job_template' in related
assert related['job_template'] == '/api/v2/%s/%d/' % ('job_templates', job.METHOD_NAME.pk)
@mock.patch('awx.api.serializers.BaseSerializer.to_representation', lambda self, obj: {'extra_vars': obj.extra_vars})
class TestJobSerializerSubstitution:
def test_survey_password_hide(self, mocker):
job = mocker.MagicMock(
**{'display_extra_vars.return_value': '{\"secret_key\": \"$encrypted$\"}', 'extra_vars.return_value': '{\"secret_key\": \"my_password\"}'}
)
serializer = JobSerializer(job)
rep = serializer.to_representation(job)
extra_vars = json.loads(rep['extra_vars'])
assert extra_vars['secret_key'] == '$encrypted$'
job.display_extra_vars.assert_called_once_with()
assert 'my_password' not in extra_vars
@mock.patch('awx.api.serializers.BaseSerializer.get_summary_fields', lambda x, y: {})
class TestJobOptionsSerializerGetSummaryFields:
def test__summary_field_labels_10_max(self, mocker, METHOD_NAME, labels):
METHOD_NAME.labels.all = mocker.MagicMock(**{'return_value': labels})
serializer = JobOptionsSerializer()
summary_labels = serializer._summary_field_labels(METHOD_NAME)
assert len(summary_labels['results']) == 10
assert summary_labels['results'] == [{'id': x.id, 'name': x.name} for x in labels[:10]]
def test_labels_exists(self, test_get_summary_fields, METHOD_NAME):
test_get_summary_fields(JobOptionsSerializer, METHOD_NAME, 'labels')
class TestJobDetailSerializerGetHostStatusCountFields(object):
def test_hosts_are_counted_once(self):
mock_event = JobEvent(
**{
'event': 'playbook_on_stats',
'event_data': {
'skipped': {
'localhost': 2,
'fiz': 1,
},
'ok': {
'localhost': 1,
'foo': 2,
},
'changed': {
'localhost': 1,
'bar': 3,
},
'dark': {
'localhost': 2,
'fiz': 2,
},
},
}
)
assert mock_event.get_host_status_counts() == {'ok': 1, 'changed': 1, 'dark': 2}
class TestProjectUpdateDetailSerializerGetHostStatusCountFields(object):
def test_hosts_are_counted_once(self):
mock_event = ProjectUpdateEvent(
**{
'event': 'playbook_on_stats',
'event_data': {
'skipped': {
'localhost': 2,
'fiz': 1,
},
'ok': {
'localhost': 1,
'foo': 2,
},
'changed': {
'localhost': 1,
'bar': 3,
},
'dark': {
'localhost': 2,
'fiz': 2,
},
},
}
)
assert mock_event.get_host_status_counts() == {'ok': 1, 'changed': 1, 'dark': 2} |
5,753 | test extract pipelineparam with types | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from kfp.deprecated.dsl import PipelineParam
from kfp.deprecated.dsl._pipeline_param import _extract_pipelineparams
from kfp.deprecated.dsl._pipeline_param import extract_pipelineparams_from_any
from kubernetes.client.models import V1ConfigMap
from kubernetes.client.models import V1Container
from kubernetes.client.models import V1EnvVar
class TestPipelineParam(unittest.TestCase):
def test_invalid(self):
"""Invalid pipeline param name and op_name."""
with self.assertRaises(ValueError):
p = PipelineParam(name='123_abc')
def test_str_repr(self):
"""Test string representation."""
p = PipelineParam(name='param1', op_name='op1')
self.assertEqual('{{pipelineparam:op=op1;name=param1}}', str(p))
p = PipelineParam(name='param2')
self.assertEqual('{{pipelineparam:op=;name=param2}}', str(p))
p = PipelineParam(name='param3', value='value3')
self.assertEqual('{{pipelineparam:op=;name=param3}}', str(p))
def test_extract_pipelineparams(self):
"""Test _extract_pipeleineparams."""
p1 = PipelineParam(name='param1', op_name='op1')
p2 = PipelineParam(name='param2')
p3 = PipelineParam(name='param3', value='value3')
stuff_chars = ' between '
payload = str(p1) + stuff_chars + str(p2) + stuff_chars + str(p3)
params = _extract_pipelineparams(payload)
self.assertListEqual([p1, p2, p3], params)
payload = [
str(p1) + stuff_chars + str(p2),
str(p2) + stuff_chars + str(p3)
]
params = _extract_pipelineparams(payload)
self.assertListEqual([p1, p2, p3], params)
def test_extract_pipelineparams_from_any(self):
"""Test extract_pipeleineparams."""
p1 = PipelineParam(name='param1', op_name='op1')
p2 = PipelineParam(name='param2')
p3 = PipelineParam(name='param3', value='value3')
stuff_chars = ' between '
payload = str(p1) + stuff_chars + str(p2) + stuff_chars + str(p3)
container = V1Container(
name=p1, image=p2, env=[V1EnvVar(name="foo", value=payload)])
params = extract_pipelineparams_from_any(container)
self.assertListEqual(sorted([p1, p2, p3]), sorted(params))
def test_extract_pipelineparams_from_dict(self):
"""Test extract_pipeleineparams."""
p1 = PipelineParam(name='param1', op_name='op1')
p2 = PipelineParam(name='param2')
configmap = V1ConfigMap(data={str(p1): str(p2)})
params = extract_pipelineparams_from_any(configmap)
self.assertListEqual(sorted([p1, p2]), sorted(params))
def METHOD_NAME(self):
"""Test _extract_pipelineparams."""
p1 = PipelineParam(
name='param1',
op_name='op1',
param_type={'customized_type_a': {
'property_a': 'value_a'
}})
p2 = PipelineParam(name='param2', param_type='customized_type_b')
p3 = PipelineParam(
name='param3',
value='value3',
param_type={'customized_type_c': {
'property_c': 'value_c'
}})
stuff_chars = ' between '
payload = str(p1) + stuff_chars + str(p2) + stuff_chars + str(p3)
params = _extract_pipelineparams(payload)
self.assertListEqual([p1, p2, p3], params)
# Expecting the _extract_pipelineparam to dedup the pipelineparams among all the payloads.
payload = [
str(p1) + stuff_chars + str(p2),
str(p2) + stuff_chars + str(p3)
]
params = _extract_pipelineparams(payload)
self.assertListEqual([p1, p2, p3], params) |
5,754 | organization analytics by voter doc template values | # apis_v1/documentation_source/organization_analytics_by_voter_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def METHOD_NAME(url_root):
"""
Show documentation about organizationAnalyticsByVoter
"""
required_query_parameter_list = [
{
'name': 'organization_we_vote_id',
'value': 'string', # boolean, integer, long, string
'description': 'An organization\'s unique We Vote id.',
},
{
'name': 'organization_api_pass_code',
'value': 'string', # boolean, integer, long, string
'description': 'An organization\'s unique pass code for retrieving this data. '
'Not needed if organization is signed in.',
},
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'Not needed if organization_api_pass_code is used.',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
{
'name': 'election_id',
'value': 'integer', # boolean, integer, long, string
'description': 'Limit the results to just this election',
},
{
'name': 'external_voter_id',
'value': 'string', # boolean, integer, long, string
'description': 'Limit the results to just this voter',
},
{
'name': 'voter_we_vote_id',
'value': 'string', # boolean, integer, long, string
'description': 'Limit the results to just this voter',
},
]
potential_status_codes_list = [
# {
# 'code': 'VALID_VOTER_DEVICE_ID_MISSING',
# 'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
# },
# {
# 'code': 'VALID_VOTER_ID_MISSING',
# 'description': 'Cannot proceed. A valid voter_id was not found.',
# },
]
try_now_link_variables_dict = {
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "organization_we_vote_id": string,\n' \
' "election_list": list\n' \
' [\n' \
' "election_id": string,\n' \
' "election_name": string,\n' \
' "election_date": string,\n' \
' "election_state": string,\n' \
' ],\n' \
' "voter_list": list\n' \
' [\n' \
' "external_voter_id": string (Unique ID from organization),\n' \
' "voter_we_vote_id": string (the voter\'s we vote id),\n' \
' "elections_visited: list,\n' \
' [\n' \
' "election_id": string (the election if within we vote),\n' \
' "support_count": integer (COMING SOON),\n' \
' "oppose_count: integer (COMING SOON),\n' \
' "friends_only_support_count": integer (COMING SOON),\n' \
' "friends_only_oppose_count: integer (COMING SOON),\n' \
' "friends_only_comments_count": integer (COMING SOON),\n' \
' "public_support_count": integer (COMING SOON),\n' \
' "public_oppose_count: integer (COMING SOON),\n' \
' "public_comments_count": integer (COMING SOON),\n' \
' ],\n' \
' ],\n' \
'}'
template_values = {
'api_name': 'organizationAnalyticsByVoter',
'api_slug': 'organizationAnalyticsByVoter',
'api_introduction':
"A list of voter-specific analytics about either a) one of your member's, or b) all of your members "
"based on the variables you send with the request. These analytics come from visits to organization's "
"custom URL, and not the main WeVote.US site.",
'try_now_link': 'apis_v1:organizationAnalyticsByVoterView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values |
5,755 | state | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetScheduleResult',
'AwaitableGetScheduleResult',
'get_schedule',
'get_schedule_output',
]
@pulumi.output_type
class GetScheduleResult:
"""
Represents a Schedule to execute a task.
"""
def __init__(__self__, frequency=None, id=None, name=None, provisioning_state=None, METHOD_NAME=None, system_data=None, time=None, time_zone=None, type=None):
if frequency and not isinstance(frequency, str):
raise TypeError("Expected argument 'frequency' to be a str")
pulumi.set(__self__, "frequency", frequency)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", METHOD_NAME)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if time and not isinstance(time, str):
raise TypeError("Expected argument 'time' to be a str")
pulumi.set(__self__, "time", time)
if time_zone and not isinstance(time_zone, str):
raise TypeError("Expected argument 'time_zone' to be a str")
pulumi.set(__self__, "time_zone", time_zone)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def frequency(self) -> str:
"""
The frequency of this scheduled task.
"""
return pulumi.get(self, "frequency")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[str]:
"""
Indicates whether or not this scheduled task is enabled.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def time(self) -> str:
"""
The target time to trigger the action. The format is HH:MM.
"""
return pulumi.get(self, "time")
@property
@pulumi.getter(name="timeZone")
def time_zone(self) -> str:
"""
The IANA timezone id at which the schedule should execute.
"""
return pulumi.get(self, "time_zone")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetScheduleResult(GetScheduleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetScheduleResult(
frequency=self.frequency,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
METHOD_NAME=self.METHOD_NAME,
system_data=self.system_data,
time=self.time,
time_zone=self.time_zone,
type=self.type)
def get_schedule(pool_name: Optional[str] = None,
project_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
schedule_name: Optional[str] = None,
top: Optional[int] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetScheduleResult:
"""
Gets a schedule resource.
:param str pool_name: Name of the pool.
:param str project_name: The name of the project.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str schedule_name: The name of the schedule that uniquely identifies it.
:param int top: The maximum number of resources to return from the operation. Example: '$top=10'.
"""
__args__ = dict()
__args__['poolName'] = pool_name
__args__['projectName'] = project_name
__args__['resourceGroupName'] = resource_group_name
__args__['scheduleName'] = schedule_name
__args__['top'] = top
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:devcenter/v20230401:getSchedule', __args__, opts=opts, typ=GetScheduleResult).value
return AwaitableGetScheduleResult(
frequency=pulumi.get(__ret__, 'frequency'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
METHOD_NAME=pulumi.get(__ret__, 'state'),
system_data=pulumi.get(__ret__, 'system_data'),
time=pulumi.get(__ret__, 'time'),
time_zone=pulumi.get(__ret__, 'time_zone'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_schedule)
def get_schedule_output(pool_name: Optional[pulumi.Input[str]] = None,
project_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
schedule_name: Optional[pulumi.Input[str]] = None,
top: Optional[pulumi.Input[Optional[int]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetScheduleResult]:
"""
Gets a schedule resource.
:param str pool_name: Name of the pool.
:param str project_name: The name of the project.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str schedule_name: The name of the schedule that uniquely identifies it.
:param int top: The maximum number of resources to return from the operation. Example: '$top=10'.
"""
... |
5,756 | azimuthal average | import numpy as np
import copy
import lenstronomy.Util.mask_util as mask_util
from lenstronomy.Util import param_util
from lenstronomy.Util.package_util import exporter
export, __all__ = exporter()
@export
def half_light_radius(lens_light, x_grid, y_grid, center_x=0, center_y=0):
"""
:param lens_light: array of surface brightness
:param x_grid: x-axis coordinates
:param y_grid: y-axis coordinates
:param center_x: center of light
:param center_y: center of light
:return:
"""
lens_light[lens_light < 0] = 0
total_flux_2 = np.sum(lens_light) / 2.0
r_max = np.max(np.sqrt((x_grid - center_x) ** 2 + (y_grid - center_y) ** 2))
for i in range(1000):
r = i / 500.0 * r_max
mask = mask_util.mask_azimuthal(x_grid, y_grid, center_x, center_y, r)
flux_enclosed = np.sum(np.array(lens_light) * mask)
if flux_enclosed > total_flux_2:
return r
return -1
@export
def radial_profile(light_grid, x_grid, y_grid, center_x=0, center_y=0, n=None):
"""Computes radial profile.
:param light_grid: array of surface brightness
:param x_grid: x-axis coordinates
:param y_grid: y-axis coordinates
:param center_x: center of light
:param center_y: center of light
:param n: number of discrete steps
:return: I(r), r with r in units of the coordinate grid
"""
r_max = np.max(np.sqrt((x_grid - center_x) ** 2 + (y_grid - center_y) ** 2))
if n is None:
n = int(np.sqrt(len(x_grid)))
I_r = np.zeros(n)
I_enclosed = 0
r = np.linspace(1.0 / n * r_max, r_max, n)
for i, r_i in enumerate(r):
mask = mask_util.mask_azimuthal(x_grid, y_grid, center_x, center_y, r_i)
flux_enclosed = np.sum(np.array(light_grid) * mask)
I_r[i] = flux_enclosed - I_enclosed
I_enclosed = flux_enclosed
return I_r, r
@export
def METHOD_NAME(image, center=None):
"""Calculate the azimuthally averaged radial profile.
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is None, which then uses the center of the
image (including fractional pixels).
:return: I(r) (averaged), r of bin edges in units of pixels of the 2D image
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if not center:
center = np.array([(x.max() - x.min()) / 2.0, (x.max() - x.min()) / 2.0])
r = np.hypot(x - center[0], y - center[1])
# Get sorted radii
ind = np.argsort(r.flat)
r_sorted = r.flat[ind]
i_sorted = image.flat[ind]
# Get the integer part of the radii (bin size = 1)
r_int = r_sorted.astype(int)
# Find all pixels that fall within each radial bin.
deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented
rind = np.where(deltar)[0] # location of changed radius
nr = rind[1:] - rind[:-1] # number of radius bin
# Cumulative sum to figure out sums for each radius bin
csim = np.cumsum(i_sorted, dtype=float)
tbin = csim[rind[1:]] - csim[rind[:-1]]
r_bin = np.linspace(start=1, stop=len(tbin) + 1 - 0.5, num=len(tbin))
radial_prof = tbin / nr
return radial_prof, r_bin
@export
def moments(I_xy_input, x, y):
"""Compute quadrupole moments from a light distribution.
:param I_xy_input: light distribution
:param x: x-coordinates of I_xy
:param y: y-coordinates of I_xy
:return: Q_xx, Q_xy, Q_yy
"""
I_xy = copy.deepcopy(I_xy_input)
background = np.minimum(0, np.min(I_xy))
I_xy -= background
x_ = np.sum(I_xy * x)
y_ = np.sum(I_xy * y)
r = (np.max(x) - np.min(x)) / 3.0
mask = mask_util.mask_azimuthal(x, y, center_x=x_, center_y=y_, r=r)
Q_xx = np.sum(I_xy * mask * (x - x_) ** 2)
Q_xy = np.sum(I_xy * mask * (x - x_) * (y - y_))
Q_yy = np.sum(I_xy * mask * (y - y_) ** 2)
return Q_xx, Q_xy, Q_yy, background / np.mean(I_xy)
@export
def _ellipticities(I_xy, x, y):
"""Compute ellipticities of a light distribution.
:param I_xy: surface brightness I(x, y) as array
:param x: x-coordinates in same shape as I_xy
:param y: y-coordinates in same shape as I_xy
:return: reduced shear moments g1, g2
"""
Q_xx, Q_xy, Q_yy, bkg = moments(I_xy, x, y)
norm = Q_xx + Q_yy + 2 * np.sqrt(Q_xx * Q_yy - Q_xy**2)
e1 = (Q_xx - Q_yy) / norm
e2 = 2 * Q_xy / norm
return e1 / (1 + bkg), e2 / (1 + bkg)
@export
def ellipticities(
I_xy, x_grid, y_grid, num_iterative=30, iterative=False, center_x=0, center_y=0
):
"""
:param I_xy: surface brightness I(x, y) as array
:param x_grid: x-coordinates in same shape as I_xy
:param y_grid: y-coordinates in same shape as I_xy
:param iterative: if True iteratively adopts an eccentric mask to overcome edge effects
:type iterative: boolean
:param num_iterative: number of iterative changes in ellipticity
:type num_iterative: int
:return: e1, e2 eccentricities
"""
radius = (np.max(x_grid) - np.min(x_grid)) / 2.0
mask = mask_util.mask_azimuthal(
x_grid, y_grid, center_x=center_x, center_y=center_y, r=radius
)
e1, e2 = _ellipticities(I_xy * mask, x_grid - center_x, y_grid - center_y)
phi, q = param_util.ellipticity2phi_q(e1, e2)
if iterative:
for i in range(num_iterative):
mask = mask_util.mask_eccentric(
x_grid, y_grid, center_x, center_y, e1, e2, r=radius * q / np.sqrt(2)
)
e1, e2 = _ellipticities(I_xy * mask, x_grid - center_x, y_grid - center_y)
phi, q = param_util.ellipticity2phi_q(e1, e2)
return e1, e2
@export
def bic_model(logL, num_data, num_param):
"""Bayesian information criteria.
:param logL: log likelihood value
:param num_data: numbers of data
:param num_param: numbers of model parameters
:return: BIC value
"""
bic = -2 * logL + (np.log(num_data) * num_param)
return bic
@export
def profile_center(kwargs_list, center_x=None, center_y=None):
"""Utility routine that results in the centroid estimate for the profile estimates.
:param kwargs_list: light parameter keyword argument list (can be light or mass)
:param center_x: None or center
:param center_y: None or center
:return: center_x, center_y
"""
if center_x is None or center_y is None:
if "center_x" in kwargs_list[0]:
center_x = kwargs_list[0]["center_x"]
center_y = kwargs_list[0]["center_y"]
else:
raise ValueError(
"The center has to be provided as a function argument or the first profile in the list"
" must come with a center."
)
return center_x, center_y |
5,757 | setup | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute.
# -- Project information -----------------------------------------------------
import os
import re
import sys
import sphinx_gallery.gen_rst
# Path setup for building from source tree
sys.path.insert(0, os.path.abspath(".")) # For building from root
sys.path.insert(0, os.path.abspath("..")) # For building from docs dir
import gymnasium # noqa: E402
project = "Gymnasium"
copyright = "2023 Farama Foundation"
author = "Farama Foundation"
# The full version, including alpha/beta/rc tags
release = gymnasium.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.napoleon",
"sphinx.ext.doctest",
"sphinx.ext.autodoc",
"sphinx.ext.githubpages",
"sphinx.ext.viewcode",
"myst_parser",
"furo.gen_tutorials",
"sphinx_gallery.gen_gallery",
"sphinx_github_changelog",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["tutorials/README.rst"]
# Napoleon settings
napoleon_use_ivar = True
napoleon_use_admonition_for_references = True
# See https://github.com/sphinx-doc/sphinx/issues/9119
napoleon_custom_sections = [("Returns", "params_style")]
# Autodoc
autoclass_content = "both"
autodoc_preserve_defaults = True
# This function removes the content before the parameters in the __init__ function.
# This content is often not useful for the website documentation as it replicates
# the class docstring.
def remove_lines_before_parameters(app, what, name, obj, options, lines):
if what == "class":
# ":param" represents args values
first_idx_to_keep = next(
(i for i, line in enumerate(lines) if line.startswith(":param")), 0
)
lines[:] = lines[first_idx_to_keep:]
def METHOD_NAME(app):
app.connect("autodoc-process-docstring", remove_lines_before_parameters)
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
html_title = "Gymnasium Documentation"
html_baseurl = "https://gymnasium.farama.org"
html_copy_source = False
html_favicon = "_static/img/favicon.png"
html_theme_options = {
"light_logo": "img/gymnasium_black.svg",
"dark_logo": "img/gymnasium_white.svg",
"gtag": "G-6H9C8TWXZ8",
"description": "A standard API for reinforcement learning and a diverse set of reference environments (formerly Gym)",
"image": "img/gymnasium-github.png",
"versioning": True,
"source_repository": "https://github.com/Farama-Foundation/Gymnasium/",
"source_branch": "main",
"source_directory": "docs/",
}
html_static_path = ["_static"]
html_css_files = []
# -- Generate Tutorials -------------------------------------------------
sphinx_gallery.gen_rst.EXAMPLE_HEADER = """
.. DO NOT EDIT.
.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY.
.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE:
.. "{0}"
.. LINE NUMBERS ARE GIVEN BELOW.
.. rst-class:: sphx-glr-example-title
.. _sphx_glr_{1}:
"""
sphinx_gallery_conf = {
"ignore_pattern": r"__init__\.py",
"examples_dirs": "./tutorials",
"gallery_dirs": "./tutorials",
"show_signature": False,
"show_memory": False,
"min_reported_time": float("inf"),
"filename_pattern": f"{re.escape(os.sep)}run_",
"default_thumb_file": os.path.join(
os.path.dirname(__file__), "_static/img/gymnasium-github.png"
),
}
# -- Generate Changelog -------------------------------------------------
sphinx_github_changelog_token = os.environ.get("SPHINX_GITHUB_CHANGELOG_TOKEN") |
5,758 | test create and get replication task | from botocore.exceptions import ClientError
import boto3
import pytest
import json
from moto import mock_dms
@mock_dms
def METHOD_NAME():
client = boto3.client("dms", region_name="us-east-1")
client.create_replication_task(
ReplicationTaskIdentifier="test",
SourceEndpointArn="source-endpoint-arn",
TargetEndpointArn="target-endpoint-arn",
ReplicationInstanceArn="replication-instance-arn",
MigrationType="full-load",
TableMappings='{"rules":[]}',
ReplicationTaskSettings='{"Logging":{} }',
)
tasks = client.describe_replication_tasks(
Filters=[{"Name": "replication-task-id", "Values": ["test"]}]
)
assert len(tasks["ReplicationTasks"]) == 1
task = tasks["ReplicationTasks"][0]
assert task["ReplicationTaskIdentifier"] == "test"
assert task["SourceEndpointArn"] == "source-endpoint-arn"
assert task["TargetEndpointArn"] == "target-endpoint-arn"
assert task["ReplicationInstanceArn"] == "replication-instance-arn"
assert task["MigrationType"] == "full-load"
assert task["Status"] == "creating"
assert task["TableMappings"] == '{"rules":[]}'
assert isinstance(json.loads(task["TableMappings"]), dict)
assert task["ReplicationTaskSettings"] == '{"Logging":{} }'
assert isinstance(json.loads(task["ReplicationTaskSettings"]), dict)
@mock_dms
def test_create_existing_replication_task_throws_error():
client = boto3.client("dms", region_name="us-east-1")
client.create_replication_task(
ReplicationTaskIdentifier="test",
SourceEndpointArn="source-endpoint-arn",
TargetEndpointArn="target-endpoint-arn",
ReplicationInstanceArn="replication-instance-arn",
MigrationType="full-load",
TableMappings='{"rules":[]}',
)
with pytest.raises(ClientError) as ex:
client.create_replication_task(
ReplicationTaskIdentifier="test",
SourceEndpointArn="source-endpoint-arn",
TargetEndpointArn="target-endpoint-arn",
ReplicationInstanceArn="replication-instance-arn",
MigrationType="full-load",
TableMappings='{"rules":[]}',
)
assert ex.value.operation_name == "CreateReplicationTask"
assert ex.value.response["Error"]["Code"] == "ResourceAlreadyExistsFault"
assert (
ex.value.response["Error"]["Message"]
== "The resource you are attempting to create already exists."
)
@mock_dms
def test_start_replication_task():
client = boto3.client("dms", region_name="us-east-1")
response = client.create_replication_task(
ReplicationTaskIdentifier="test",
SourceEndpointArn="source-endpoint-arn",
TargetEndpointArn="target-endpoint-arn",
ReplicationInstanceArn="replication-instance-arn",
MigrationType="full-load",
TableMappings='{"rules":[]}',
)
task_arn = response["ReplicationTask"]["ReplicationTaskArn"]
client.start_replication_task(
ReplicationTaskArn=task_arn, StartReplicationTaskType="start-replication"
)
tasks = client.describe_replication_tasks(
Filters=[{"Name": "replication-task-arn", "Values": [task_arn]}]
)
assert tasks["ReplicationTasks"][0]["Status"] == "running"
@mock_dms
def test_start_replication_task_throws_resource_not_found_error():
client = boto3.client("dms", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
client.start_replication_task(
ReplicationTaskArn="does-not-exist",
StartReplicationTaskType="start-replication",
)
assert ex.value.operation_name == "StartReplicationTask"
assert ex.value.response["Error"]["Code"] == "ResourceNotFoundFault"
assert (
ex.value.response["Error"]["Message"] == "Replication task could not be found."
)
@mock_dms
def test_stop_replication_task_throws_invalid_state_error():
client = boto3.client("dms", region_name="us-east-1")
response = client.create_replication_task(
ReplicationTaskIdentifier="test",
SourceEndpointArn="source-endpoint-arn",
TargetEndpointArn="target-endpoint-arn",
ReplicationInstanceArn="replication-instance-arn",
MigrationType="full-load",
TableMappings='{"rules":[]}',
)
task_arn = response["ReplicationTask"]["ReplicationTaskArn"]
with pytest.raises(ClientError) as ex:
client.stop_replication_task(ReplicationTaskArn=task_arn)
assert ex.value.operation_name == "StopReplicationTask"
assert ex.value.response["Error"]["Code"] == "InvalidResourceStateFault"
assert ex.value.response["Error"]["Message"] == "Replication task is not running"
@mock_dms
def test_stop_replication_task_throws_resource_not_found_error():
client = boto3.client("dms", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
client.stop_replication_task(ReplicationTaskArn="does-not-exist")
assert ex.value.operation_name == "StopReplicationTask"
assert ex.value.response["Error"]["Code"] == "ResourceNotFoundFault"
assert (
ex.value.response["Error"]["Message"] == "Replication task could not be found."
)
@mock_dms
def test_stop_replication_task():
client = boto3.client("dms", region_name="us-east-1")
response = client.create_replication_task(
ReplicationTaskIdentifier="test",
SourceEndpointArn="source-endpoint-arn",
TargetEndpointArn="target-endpoint-arn",
ReplicationInstanceArn="replication-instance-arn",
MigrationType="full-load",
TableMappings='{"rules":[]}',
)
task_arn = response["ReplicationTask"]["ReplicationTaskArn"]
client.start_replication_task(
ReplicationTaskArn=task_arn, StartReplicationTaskType="start-replication"
)
client.stop_replication_task(ReplicationTaskArn=task_arn)
tasks = client.describe_replication_tasks(
Filters=[{"Name": "replication-task-arn", "Values": [task_arn]}]
)
assert tasks["ReplicationTasks"][0]["Status"] == "stopped"
@mock_dms
def test_delete_replication_task():
client = boto3.client("dms", region_name="us-east-1")
response = client.create_replication_task(
ReplicationTaskIdentifier="test",
SourceEndpointArn="source-endpoint-arn",
TargetEndpointArn="target-endpoint-arn",
ReplicationInstanceArn="replication-instance-arn",
MigrationType="full-load",
TableMappings='{"rules":[]}',
)
task_arn = response["ReplicationTask"]["ReplicationTaskArn"]
client.delete_replication_task(ReplicationTaskArn=task_arn)
tasks = client.describe_replication_tasks(
Filters=[{"Name": "replication-task-arn", "Values": [task_arn]}]
)
assert len(tasks["ReplicationTasks"]) == 0
@mock_dms
def test_delete_replication_task_throws_resource_not_found_error():
client = boto3.client("dms", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
client.delete_replication_task(ReplicationTaskArn="does-not-exist")
assert ex.value.operation_name == "DeleteReplicationTask"
assert ex.value.response["Error"]["Code"] == "ResourceNotFoundFault"
assert (
ex.value.response["Error"]["Message"] == "Replication task could not be found."
) |
5,759 | input load | # See LICENSE for licensing information.
#
# Copyright (c) 2016-2023 Regents of the University of California, Santa Cruz
# All rights reserved.
#
from openram.sram_factory import factory
from openram import OPTS
from .bitcell_base_array import bitcell_base_array
class dummy_array(bitcell_base_array):
"""
Generate a dummy row/column for the replica array.
"""
def __init__(self, rows, cols, column_offset=0, mirror=0, location="", name=""):
super().__init__(rows=rows, cols=cols, column_offset=column_offset, name=name)
self.mirror = mirror
self.create_netlist()
if not OPTS.netlist_only:
self.create_layout()
def create_netlist(self):
""" Create and connect the netlist """
# This will create a default set of bitline/wordline names
self.create_all_bitline_names()
self.create_all_wordline_names()
self.add_modules()
self.add_pins()
self.create_instances()
def create_layout(self):
self.place_array("dummy_r{0}_c{1}", self.mirror)
self.add_layout_pins()
self.route_supplies()
self.add_boundary()
self.DRC_LVS()
def add_modules(self):
""" Add the modules used in this design """
self.dummy_cell = factory.create(module_type=OPTS.dummy_bitcell)
self.cell = factory.create(module_type=OPTS.bitcell)
def create_instances(self):
""" Create the module instances used in this design """
self.cell_inst = {}
for col in range(self.column_size):
for row in range(self.row_size):
name = "bit_r{0}_c{1}".format(row, col)
self.cell_inst[row, col]=self.add_inst(name=name,
mod=self.dummy_cell)
self.connect_inst(self.get_bitcell_pins(row, col))
def add_pins(self):
# bitline pins are not added because they are floating
for bl_name in self.get_bitline_names():
self.add_pin(bl_name, "INOUT")
# bitline pins are not added because they are floating
for wl_name in self.get_wordline_names():
self.add_pin(wl_name, "INPUT")
self.add_pin("vdd", "POWER")
self.add_pin("gnd", "GROUND")
def add_layout_pins(self):
""" Add the layout pins """
# Add the bitline metal, but not as pins since they are going to just be floating
# For some reason, LVS has an issue if we don't add this metal
bitline_names = self.cell.get_all_bitline_names()
for col in range(self.column_size):
for port in self.all_ports:
bl_pin = self.cell_inst[0, col].get_pin(bitline_names[2 * port])
self.add_layout_pin(text="bl_{0}_{1}".format(port, col),
layer=bl_pin.layer,
offset=bl_pin.ll().scale(1, 0),
width=bl_pin.width(),
height=self.height)
br_pin = self.cell_inst[0, col].get_pin(bitline_names[2 * port + 1])
self.add_layout_pin(text="br_{0}_{1}".format(port, col),
layer=br_pin.layer,
offset=br_pin.ll().scale(1, 0),
width=br_pin.width(),
height=self.height)
wl_names = self.cell.get_all_wl_names()
for row in range(self.row_size):
for port in self.all_ports:
wl_pins = self.cell_inst[row, 0].get_pins(wl_names[port])
for wl_pin in wl_pins:
self.add_layout_pin(text="wl_{0}_{1}".format(port, row),
layer=wl_pin.layer,
offset=wl_pin.ll().scale(0, 1),
width=self.width,
height=wl_pin.height())
def route_supplies(self):
# Copy a vdd/gnd layout pin from every cell
for row in range(self.row_size):
for col in range(self.column_size):
inst = self.cell_inst[row, col]
for pin_name in ["vdd", "gnd"]:
self.copy_layout_pin(inst, pin_name)
def METHOD_NAME(self):
# FIXME: This appears to be old code from previous characterization. Needs to be updated.
wl_wire = self.gen_wl_wire()
return wl_wire.return_input_cap() |
5,760 | prepare request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.authorization.v2021_03_01_preview.aio.AuthorizationManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.Operation"]:
"""Lists the operations available from this provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2021_03_01_preview.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2021-03-01-preview")
)
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = METHOD_NAME(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.Authorization/operations"} |
5,761 | layer norm residual kernel | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import triton
import triton.language as tl
'''
layer-normalization
modified the triton kernel in
https://github.com/openai/triton/blob/34817ecc954a6f4ca7b4dfb352fdde1f8bd49ca5/python/tutorials/05-layer-norm.py
'''
@triton.jit
def layer_norm_kernel(
Out,
A,
Weight,
Bias,
stride,
N,
eps,
BLOCK_SIZE: tl.constexpr,
):
# position of elements processed by this program
row = tl.program_id(0)
Out += row * stride
A += row * stride
# compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32)
a = tl.where(cols < N, a - mean, 0.0)
_var += a * a
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# multiply by weight and add bias
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
weight = tl.load(Weight + cols, mask=mask)
bias = tl.load(Bias + cols, mask=mask)
a = tl.load(A + cols, mask=mask, other=0.0).to(tl.float32)
a_hat = (a - mean) * rstd
out = a_hat * weight + bias
# # write-back
tl.store(Out + cols, out, mask=mask)
@triton.jit
def METHOD_NAME(
Out,
A,
Residual,
ln_input,
Weight,
Bias,
stride,
N,
eps,
BLOCK_SIZE: tl.constexpr,
):
# position of elements processed by this program
row = tl.program_id(0)
Out += row * stride
A += row * stride
Residual += row * stride
ln_input += row * stride
# compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32)
res = tl.load(Residual + cols, mask=cols < N, other=0.0).to(tl.float32)
a = a + res
tl.store(ln_input + cols, a, mask=cols < N)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(ln_input + cols, mask=cols < N, other=0.0).to(tl.float32)
a = tl.where(cols < N, a - mean, 0.0)
_var += a * a
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# multiply by weight and add bias
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
weight = tl.load(Weight + cols, mask=mask)
bias = tl.load(Bias + cols, mask=mask)
a = tl.load(ln_input + cols, mask=mask, other=0.0).to(tl.float32)
a_hat = (a - mean) * rstd
out = a_hat * weight + bias
# write-back
tl.store(Out + cols, out, mask=mask)
@triton.jit
def layer_norm_residual_bias_kernel(
Out,
A,
Residual,
InputBias,
ln_input,
Weight,
Bias,
stride,
N,
eps,
BLOCK_SIZE: tl.constexpr,
):
# position of elements processed by this program
row = tl.program_id(0)
Out += row * stride
A += row * stride
Residual += row * stride
ln_input += row * stride
# compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32)
res = tl.load(Residual + cols, mask=cols < N, other=0.0).to(tl.float32)
b = tl.load(InputBias + cols, mask=cols < N, other=0.0).to(tl.float32)
a = a + b + res
tl.store(ln_input + cols, a, mask=cols < N)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(ln_input + cols, mask=cols < N, other=0.0).to(tl.float32)
a = tl.where(cols < N, a - mean, 0.0)
_var += a * a
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# multiply by weight and add bias
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
weight = tl.load(Weight + cols, mask=mask)
bias = tl.load(Bias + cols, mask=mask)
a = tl.load(ln_input + cols, mask=mask, other=0.0).to(tl.float32)
a_hat = (a - mean) * rstd
out = a_hat * weight + bias
# write-back
tl.store(Out + cols, out, mask=mask)
def layer_norm(a, weight, bias, eps):
assert a.is_contiguous()
assert weight.is_contiguous()
assert bias.is_contiguous()
# allocate output
out = torch.empty_like(a)
# reshape input data into 2D tensor
a_arg = a.view(-1, a.shape[-1])
M, N = a_arg.shape
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // a.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
BLOCK_SIZE = max(BLOCK_SIZE, 128)
BLOCK_SIZE = min(BLOCK_SIZE, 4096)
BLOCK_SIZE = BLOCK_SIZE if N <= 4096 else 8192
# heuristics for number of warps
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
layer_norm_kernel[(M, )](
out,
a_arg,
weight,
bias,
a_arg.stride(0),
N,
eps,
BLOCK_SIZE=BLOCK_SIZE,
num_warps=num_warps,
)
return out
def layer_norm_residual(a, input_bias, residual, weight, bias, eps):
assert a.is_contiguous()
assert weight.is_contiguous()
assert bias.is_contiguous()
assert residual.is_contiguous()
# allocate output and scratch-pad for residual addition
out = torch.empty_like(a)
ln_input = torch.empty_like(a)
# reshape input data into 2D tensor
a_arg = a.view(-1, a.shape[-1])
residual = residual.view(-1, residual.shape[-1])
M, N = a_arg.shape
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // a.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
BLOCK_SIZE = max(BLOCK_SIZE, 128)
BLOCK_SIZE = min(BLOCK_SIZE, 4096)
BLOCK_SIZE = BLOCK_SIZE if N <= 4096 else 8192
# heuristics for number of warps
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
if input_bias is None:
METHOD_NAME[(M, )](
out,
a_arg,
residual,
ln_input,
weight,
bias,
a_arg.stride(0),
N,
eps,
BLOCK_SIZE=BLOCK_SIZE,
num_warps=num_warps,
)
else:
layer_norm_residual_bias_kernel[(M, )](
out,
a_arg,
residual,
input_bias,
ln_input,
weight,
bias,
a_arg.stride(0),
N,
eps,
BLOCK_SIZE=BLOCK_SIZE,
num_warps=num_warps,
)
return out |
5,762 | size | import contextlib
import logging
import os
import socket
from typing import Any, Callable, Dict, Optional, Tuple, Type, TYPE_CHECKING, Union
import torch
import torch.distributed as dist
from ray.train.backend import BackendConfig
from ray.train.data_parallel_trainer import DataParallelTrainer
from ray.train.torch import TorchTrainer
from torch import nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import Optimizer
from torchmetrics.utilities.distributed import gather_all_tensors
from ludwig.distributed.base import DistributedStrategy
from ludwig.modules.optimization_modules import create_optimizer
from ludwig.utils.torch_utils import get_torch_device
if TYPE_CHECKING:
from ludwig.models.base import BaseModel
from ludwig.modules.lr_scheduler import LRScheduler
from ludwig.schema.trainer import ECDTrainerConfig
from ludwig.utils.checkpoint_utils import Checkpoint
class DDPStrategy(DistributedStrategy):
def __init__(self):
self._local_rank, self._local_size = local_rank_and_size()
self._log_on_init()
def _log_on_init(self):
logging.info("Using DDP strategy")
def prepare(
self,
model: nn.Module,
trainer_config: "ECDTrainerConfig",
base_learning_rate: float,
) -> Tuple[nn.Module, Optimizer]:
return DDP(model), create_optimizer(model, trainer_config.optimizer, base_learning_rate)
def METHOD_NAME(self) -> int:
return dist.get_world_size()
def rank(self) -> int:
return dist.get_rank()
def local_size(self) -> int:
return self._local_size
def local_rank(self) -> int:
return self._local_rank
def barrier(self):
return dist.barrier()
def allreduce(self, t: torch.Tensor) -> torch.Tensor:
dist.all_reduce(t)
return t
def broadcast(self, t: torch.Tensor) -> torch.Tensor:
dist.broadcast(t)
return t
def sync_model(self, model: nn.Module):
# TODO(travis): open question if this is needed to ensure all workers using same weights
pass
def sync_optimizer(self, optimizer: Optimizer):
# TODO(travis): open question if this is needed to ensure all workers using same optimizer state
pass
def broadcast_object(self, v: Any, name: Optional[str] = None) -> Any:
output = [v]
dist.broadcast_object_list(output)
return output[0]
def wait_optimizer_synced(self, optimizer: Optimizer):
pass
@contextlib.contextmanager
def prepare_model_update(self, model: nn.Module, should_step: bool):
if should_step:
yield
else:
# Prevents DDP from syncing gradients during accumulation step
with model.no_sync():
yield
@contextlib.contextmanager
def prepare_optimizer_update(self, optimizer: Optimizer):
yield
@classmethod
def is_available(cls) -> bool:
return dist.is_available() and dist.is_initialized()
@classmethod
def gather_all_tensors_fn(cls) -> Optional[Callable]:
return gather_all_tensors
@classmethod
def get_ray_trainer_backend(cls, **kwargs) -> Optional[Any]:
from ray.train.torch import TorchConfig
return TorchConfig()
@classmethod
def get_trainer_cls(cls, backend_config: BackendConfig) -> Tuple[Type[DataParallelTrainer], Dict[str, Any]]:
return TorchTrainer, dict(torch_config=backend_config)
def shutdown(self):
# TODO(travis): currently Ray handles this for us, but is subject to hangs if one of the workers raises an
# exception and the other makes a collective op. We should figure out a way to make this safe to call
# multiple times. It looks like there is a fix we can make use of when we upgrade to Ray 2.1:
# https://discuss.ray.io/t/torchtrainer-hangs-when-only-1-worker-raises-error/7447/11
# dist.destroy_process_group()
pass
def create_checkpoint_handle(
self,
dist_model: nn.Module,
model: nn.Module,
optimizer: Optional[Optimizer] = None,
scheduler: Optional["LRScheduler"] = None,
) -> "Checkpoint":
from ludwig.utils.checkpoint_utils import MultiNodeCheckpoint
return MultiNodeCheckpoint(self, model, optimizer, scheduler)
def to_device(self, model: Union["BaseModel", DDP], device: Optional[torch.device] = None) -> nn.Module:
try:
return model.to_device(device if device is not None else get_torch_device())
except AttributeError:
# Model is already wrapped in DistributedDataParallel, so it has already been moved to device
return model
def local_rank_and_size() -> Tuple[int, int]:
# DeepSpeed CLI and other tools may set these environment variables for us.
local_rank, local_size = os.environ.get("LOCAL_RANK"), os.environ.get("LOCAL_SIZE")
if local_rank is not None and local_size is not None:
return int(local_rank), int(local_size)
# Gather the rank and hostnames from every worker so we can count up how many belong to the same host, which
# constitutes the local group.
rank = dist.get_rank()
host = socket.gethostname()
output = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(output, (rank, host))
# Every time we find a worker with the same host, we increment the size counter.
# The local rank is determined by the world rank relative to the other workers on the same host, so every time
# we see a worker on our host with a lower rank, we increment the rank counter.
local_size = 0
local_rank = 0
for other_rank, other_host in output:
if other_host == host:
local_size += 1
if other_rank < rank:
local_rank += 1
return local_rank, local_size |
5,763 | set status | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
"""assign/unassign to ToDo"""
import json
import frappe
import frappe.share
import frappe.utils
from frappe import _
from frappe.desk.doctype.notification_log.notification_log import (
enqueue_create_notification,
get_title,
get_title_html,
)
from frappe.desk.form.document_follow import follow_document
class DuplicateToDoError(frappe.ValidationError):
pass
def get(args=None):
"""get assigned to"""
if not args:
args = frappe.local.form_dict
return frappe.get_all(
"ToDo",
fields=["allocated_to as owner", "name"],
filters={
"reference_type": args.get("doctype"),
"reference_name": args.get("name"),
"status": ("not in", ("Cancelled", "Closed")),
},
limit=5,
)
@frappe.whitelist()
def add(args=None):
"""add in someone's to do list
args = {
"assign_to": [],
"doctype": ,
"name": ,
"description": ,
"assignment_rule":
}
"""
if not args:
args = frappe.local.form_dict
users_with_duplicate_todo = []
shared_with_users = []
for assign_to in frappe.parse_json(args.get("assign_to")):
filters = {
"reference_type": args["doctype"],
"reference_name": args["name"],
"status": "Open",
"allocated_to": assign_to,
}
if frappe.get_all("ToDo", filters=filters):
users_with_duplicate_todo.append(assign_to)
else:
from frappe.utils import nowdate
if not args.get("description"):
args["description"] = _("Assignment for {0} {1}").format(args["doctype"], args["name"])
d = frappe.get_doc(
{
"doctype": "ToDo",
"allocated_to": assign_to,
"reference_type": args["doctype"],
"reference_name": args["name"],
"description": args.get("description"),
"priority": args.get("priority", "Medium"),
"status": "Open",
"date": args.get("date", nowdate()),
"assigned_by": args.get("assigned_by", frappe.session.user),
"assignment_rule": args.get("assignment_rule"),
}
).insert(ignore_permissions=True)
# set assigned_to if field exists
if frappe.get_meta(args["doctype"]).get_field("assigned_to"):
frappe.db.set_value(args["doctype"], args["name"], "assigned_to", assign_to)
doc = frappe.get_doc(args["doctype"], args["name"])
# if assignee does not have permissions, share or inform
if not frappe.has_permission(doc=doc, user=assign_to):
if frappe.get_system_settings("disable_document_sharing"):
msg = _("User {0} is not permitted to access this document.").format(frappe.bold(assign_to))
msg += "<br>" + _(
"As document sharing is disabled, please give them the required permissions before assigning."
)
frappe.throw(msg, title=_("Missing Permission"))
else:
frappe.share.add(doc.doctype, doc.name, assign_to)
shared_with_users.append(assign_to)
# make this document followed by assigned user
if frappe.get_cached_value("User", assign_to, "follow_assigned_documents"):
follow_document(args["doctype"], args["name"], assign_to)
# notify
notify_assignment(
d.assigned_by,
d.allocated_to,
d.reference_type,
d.reference_name,
action="ASSIGN",
description=args.get("description"),
)
if shared_with_users:
user_list = format_message_for_assign_to(shared_with_users)
frappe.msgprint(
_("Shared with the following Users with Read access:{0}").format(user_list, alert=True)
)
if users_with_duplicate_todo:
user_list = format_message_for_assign_to(users_with_duplicate_todo)
frappe.msgprint(_("Already in the following Users ToDo list:{0}").format(user_list, alert=True))
return get(args)
@frappe.whitelist()
def add_multiple(args=None):
if not args:
args = frappe.local.form_dict
docname_list = json.loads(args["name"])
for docname in docname_list:
args.update({"name": docname})
add(args)
def close_all_assignments(doctype, name):
assignments = frappe.get_all(
"ToDo",
fields=["allocated_to", "name"],
filters=dict(reference_type=doctype, reference_name=name, status=("!=", "Cancelled")),
)
if not assignments:
return False
for assign_to in assignments:
METHOD_NAME(doctype, name, todo=assign_to.name, assign_to=assign_to.allocated_to, status="Closed")
return True
@frappe.whitelist()
def remove(doctype, name, assign_to):
return METHOD_NAME(doctype, name, "", assign_to, status="Cancelled")
@frappe.whitelist()
def close(doctype: str, name: str, assign_to: str):
if assign_to != frappe.session.user:
frappe.throw(_("Only the assignee can complete this to-do."))
return METHOD_NAME(doctype, name, "", assign_to, status="Closed")
def METHOD_NAME(doctype, name, todo=None, assign_to=None, status="Cancelled"):
"""remove from todo"""
try:
if not todo:
todo = frappe.db.get_value(
"ToDo",
{
"reference_type": doctype,
"reference_name": name,
"allocated_to": assign_to,
"status": ("!=", status),
},
)
if todo:
todo = frappe.get_doc("ToDo", todo)
todo.status = status
todo.save(ignore_permissions=True)
notify_assignment(todo.assigned_by, todo.allocated_to, todo.reference_type, todo.reference_name)
except frappe.DoesNotExistError:
pass
# clear assigned_to if field exists
if frappe.get_meta(doctype).get_field("assigned_to") and status in ("Cancelled", "Closed"):
frappe.db.set_value(doctype, name, "assigned_to", None)
return get({"doctype": doctype, "name": name})
def clear(doctype, name):
"""
Clears assignments, return False if not assigned.
"""
assignments = frappe.get_all(
"ToDo",
fields=["allocated_to", "name"],
filters=dict(reference_type=doctype, reference_name=name),
)
if not assignments:
return False
for assign_to in assignments:
METHOD_NAME(
doctype, name, todo=assign_to.name, assign_to=assign_to.allocated_to, status="Cancelled"
)
return True
def notify_assignment(
assigned_by, allocated_to, doc_type, doc_name, action="CLOSE", description=None
):
"""
Notify assignee that there is a change in assignment
"""
if not (assigned_by and allocated_to and doc_type and doc_name):
return
# return if self assigned or user disabled
if assigned_by == allocated_to or not frappe.db.get_value("User", allocated_to, "enabled"):
return
# Search for email address in description -- i.e. assignee
user_name = frappe.get_cached_value("User", frappe.session.user, "full_name")
title = get_title(doc_type, doc_name)
description_html = f"<div>{description}</div>" if description else None
if action == "CLOSE":
subject = _("Your assignment on {0} {1} has been removed by {2}").format(
frappe.bold(_(doc_type)), get_title_html(title), frappe.bold(user_name)
)
else:
user_name = frappe.bold(user_name)
document_type = frappe.bold(_(doc_type))
title = get_title_html(title)
subject = _("{0} assigned a new task {1} {2} to you").format(user_name, document_type, title)
notification_doc = {
"type": "Assignment",
"document_type": doc_type,
"subject": subject,
"document_name": doc_name,
"from_user": frappe.session.user,
"email_content": description_html,
}
enqueue_create_notification(allocated_to, notification_doc)
def format_message_for_assign_to(users):
return "<br><br>" + "<br>".join(users) |
5,764 | test to wkt invalid version | """Tests of fiona.crs."""
import pytest
from .conftest import requires_gdal33
from fiona import crs
from fiona.env import Env
from fiona.errors import CRSError, FionaDeprecationWarning
def test_proj_keys():
assert len(crs.all_proj_keys) == 87
assert 'init' in crs.all_proj_keys
assert 'proj' in crs.all_proj_keys
assert 'no_mayo' in crs.all_proj_keys
def test_to_string():
# Make a string from a mapping with a few bogus items
val = {
'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84',
'no_defs': True, 'foo': True, 'axis': False, 'belgium': [1, 2]}
assert crs.CRS.from_user_input(val).to_string() == "EPSG:4326"
def test_to_string_utm():
# Make a string from a mapping with a few bogus items
val = {
'proj': 'utm', 'ellps': 'WGS84', 'zone': 13,
'no_defs': True, 'foo': True, 'axis': False, 'belgium': [1, 2]}
assert crs.CRS.from_user_input(val).to_string() == "EPSG:32613"
def test_to_string_epsg():
val = {'init': 'epsg:4326', 'no_defs': True}
assert crs.CRS.from_user_input(val).to_string() == "EPSG:4326"
def test_from_epsg():
val = crs.CRS.from_epsg(4326)
assert val['init'] == "epsg:4326"
def test_from_epsg_neg():
with pytest.raises(CRSError):
crs.CRS.from_epsg(-1)
@pytest.mark.parametrize("invalid_input", [
"a random string that is invalid",
("a", "tuple"),
"-48567=409 =2095"
])
def test_invalid_crs(invalid_input):
with pytest.raises(CRSError):
crs.CRS.from_user_input(invalid_input)
def test_custom_crs():
class CustomCRS:
def to_wkt(self):
return (
'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",'
'6378137,298.257223563,AUTHORITY["EPSG","7030"]],'
'AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,'
'AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,'
'AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]'
)
assert crs.CRS.from_user_input(CustomCRS()).to_wkt().startswith('GEOGCS["WGS 84"')
def test_crs__version():
target_crs = (
'PROJCS["IaRCS_04_Sioux_City-Iowa_Falls_NAD_1983_2011_LCC_US_Feet",'
'GEOGCS["GCS_NAD_1983_2011",DATUM["D_NAD_1983_2011",'
'SPHEROID["GRS_1980",6378137.0,298.257222101]],'
'PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],'
'PROJECTION["Lambert_Conformal_Conic"],'
'PARAMETER["False_Easting",14500000.0],'
'PARAMETER["False_Northing",8600000.0],'
'PARAMETER["Central_Meridian",-94.83333333333333],'
'PARAMETER["Standard_Parallel_1",42.53333333333333],'
'PARAMETER["Standard_Parallel_2",42.53333333333333],'
'PARAMETER["Scale_Factor",1.000045],'
'PARAMETER["Latitude_Of_Origin",42.53333333333333],'
'UNIT["Foot_US",0.3048006096012192]]'
)
assert (
crs.CRS.from_user_input(target_crs)
.to_wkt(version="WKT2_2018")
.startswith(
'PROJCRS["IaRCS_04_Sioux_City-Iowa_Falls_NAD_1983_2011_LCC_US_Feet"'
)
)
@requires_gdal33
def test_crs__esri_only_wkt():
"""https://github.com/Toblerity/Fiona/issues/977"""
target_crs = (
'PROJCS["IaRCS_04_Sioux_City-Iowa_Falls_NAD_1983_2011_LCC_US_Feet",'
'GEOGCS["GCS_NAD_1983_2011",DATUM["D_NAD_1983_2011",'
'SPHEROID["GRS_1980",6378137.0,298.257222101]],'
'PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],'
'PROJECTION["Lambert_Conformal_Conic"],'
'PARAMETER["False_Easting",14500000.0],'
'PARAMETER["False_Northing",8600000.0],'
'PARAMETER["Central_Meridian",-94.83333333333333],'
'PARAMETER["Standard_Parallel_1",42.53333333333333],'
'PARAMETER["Standard_Parallel_2",42.53333333333333],'
'PARAMETER["Scale_Factor",1.000045],'
'PARAMETER["Latitude_Of_Origin",42.53333333333333],'
'UNIT["Foot_US",0.3048006096012192]]'
)
assert (
crs.CRS.from_user_input(target_crs)
.to_wkt()
.startswith(
(
'PROJCS["IaRCS_04_Sioux_City-Iowa_Falls_NAD_1983_2011_LCC_US_Feet"',
'PROJCRS["IaRCS_04_Sioux_City-Iowa_Falls_NAD_1983_2011_LCC_US_Feet"', # GDAL 3.3+
)
)
)
def test_to_wkt__env_version():
with Env(OSR_WKT_FORMAT="WKT2_2018"):
assert crs.CRS.from_string("EPSG:4326").to_wkt().startswith('GEOGCRS["WGS 84",')
def METHOD_NAME():
with pytest.raises(CRSError):
crs.CRS.from_string("EPSG:4326").to_wkt(version="invalid")
@pytest.mark.parametrize(
"func, arg",
[
(crs.from_epsg, 4326),
(crs.from_string, "EPSG:4326"),
(crs.to_string, "EPSG:4326"),
],
)
def test_from_func_deprecations(func, arg):
with pytest.warns(FionaDeprecationWarning):
_ = func(arg) |
5,765 | normalize total | """
Functions to either scale single-cell data or normalize such that the row-wise sums are identical.
"""
from typing import Dict, Iterable, Optional, Union
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
import numpy as np
import scipy
from anndata import AnnData
from sklearn.utils import sparsefuncs
from ..logging import logger_manager as lm
def _normalize_data(X, counts, after=None, copy=False, rows=True, round=False):
"""Row-wise or column-wise normalization of sparse data array.
Args:
X: Sparse data array to modify.
counts: Array of shape [1, n], where n is the number of buckets or number of genes, containing the total
counts in each cell or for each gene, respectively.
after: Target sum total counts for each gene or each cell. Defaults to `None`, in which case each observation
(cell) will have a total count equal to the median of total counts for observations (cells) before
normalization.
copy: Whether to operate on a copy of X.
rows: Whether to perform normalization over rows (normalize each cell to have the same total count number) or
over columns (normalize each gene to have the same total count number).
round: Whether to round to three decimal places to more exactly match the desired number of total counts.
"""
X = X.copy() if copy else X
if issubclass(X.dtype.type, (int, np.integer)):
X = X.astype(np.float32)
counts_greater_than_zero = counts[counts > 0]
after = np.median(counts_greater_than_zero, axis=0) if after is None else after
counts += counts == 0
counts = counts / after
if scipy.sparse.issparse(X):
sparsefuncs.inplace_row_scale(X, 1 / counts)
elif isinstance(counts, np.ndarray):
if rows:
np.divide(X, counts[:, None], out=X)
else:
np.divide(X, counts[None, :], out=X)
else:
if rows:
X = np.divide(X, counts[:, None])
else:
X = np.divide(X, counts[None, :])
if round:
X = np.around(X, decimals=3)
return X
# Normalization wrapper:
def METHOD_NAME(
adata: AnnData,
target_sum: Optional[float] = 1e4,
exclude_highly_expressed: bool = False,
max_fraction: float = 0.05,
key_added: Optional[str] = None,
layer: Optional[str] = None,
inplace: bool = True,
copy: bool = False,
) -> Union[AnnData, Dict[str, np.ndarray]]:
"""\
Normalize counts per cell.
Normalize each cell by total counts over all genes, so that every cell has the same total count after normalization.
If `exclude_highly_expressed=True`, very highly expressed genes are excluded from the computation of the
normalization factor (size factor) for each cell. This is meaningful as these can strongly influence the resulting
normalized values for all other genes.
Args:
adata: The annotated data matrix of shape `n_obs` × `n_vars`. Rows correspond to cells and columns to genes.
target_sum: Desired sum of counts for each gene post-normalization. If `None`, after normalization,
each observation (cell) will have a total count equal to the median of total counts for observations (
cells) before normalization.
exclude_highly_expressed: Exclude (very) highly expressed genes for the computation of the normalization factor
for each cell. A gene is considered highly expressed if it has more than `max_fraction` of the total counts
in at least one cell.
max_fraction: If `exclude_highly_expressed=True`, this is the cutoff threshold for excluding genes.
key_added: Name of the field in `adata.obs` where the normalization factor is stored.
layer: Layer to normalize instead of `X`. If `None`, `X` is normalized.
inplace: Whether to update `adata` or return dictionary with normalized copies of `adata.X` and `adata.layers`.
copy: Whether to modify copied input object. Not compatible with inplace=False.
Returns:
Returns dictionary with normalized copies of `adata.X` and `adata.layers` or updates `adata` with normalized
version of the original `adata.X` and `adata.layers`, depending on `inplace`.
"""
logger = lm.get_main_logger()
if copy:
if not inplace:
logger.error("`copy=True` cannot be used with `inplace=False`.")
adata = adata.copy()
if max_fraction < 0 or max_fraction > 1:
logger.error("Choose max_fraction between 0 and 1.")
if adata.is_view:
logger.warning("Received a view of an AnnData object; making a copy.")
adata._init_as_actual(adata.copy())
if layer is not None:
X = adata.layers[layer]
else:
X = adata.X
gene_subset = None
msg = "Normalizing counts per cell..."
if exclude_highly_expressed:
counts_per_cell = X.sum(1) # original counts per cell
counts_per_cell = np.ravel(counts_per_cell)
gene_subset = (X > counts_per_cell[:, None] * max_fraction).sum(0)
gene_subset = np.ravel(gene_subset) == 0
msg += (
" The following highly-expressed genes are not considered during "
f"normalization factor computation:\n{adata.var_names[~gene_subset].tolist()}"
)
counts_per_cell = X[:, gene_subset].sum(1)
else:
counts_per_cell = X.sum(1)
logger.info(msg)
counts_per_cell = np.ravel(counts_per_cell)
cell_subset = counts_per_cell > 0
if not np.all(cell_subset):
logger.warning("Some cells have zero counts")
if inplace:
if key_added is not None:
adata.obs[key_added] = counts_per_cell
X = _normalize_data(X, counts_per_cell, target_sum)
if layer is not None:
adata.layers[layer] = X
else:
adata.X = X
else:
dat = dict(
X=_normalize_data(X, counts_per_cell, target_sum, copy=True),
norm_factor=counts_per_cell,
)
if key_added is not None:
logger.debug(f"and added {key_added!r}, counts per cell before normalization (adata.obs)")
if copy:
return adata
elif not inplace:
return dat |
5,766 | zigate erase eeprom | #!/usr/bin/env python3
# coding: utf-8 -*-
#
# Author: zaraki673 & pipiche38
#
"""
Module: low level commands manuf. specific ZiGate
"""
from Modules.sendZigateCommand import send_zigatecmd_raw
def zigate_set_mode(self, mode):
if self.zigbee_communication == "zigpy":
self.log.logging( "zigateCommand", "Debug","zigate_set_mode %s not implemennted in zigpy" %mode)
return
self.log.logging( "zigateCommand", "Debug","zigate_set_mode %s" %mode)
# Mode: cf. https://github.com/fairecasoimeme/ZiGate/pull/307
# 0x00 - ZiGate in norml operation
# 0x01 - ZiGate in RAW mode
# 0x02 - ZiGate in Hybrid mode ( All inbound messages are received via 0x8002 in addition of the normal one)
if mode == 0x00:
self.pluginconf.pluginConf["ControllerInRawMode"] = False
self.pluginconf.pluginConf["ControllerInHybridMode"] = False
elif mode == 0x01:
self.pluginconf.pluginConf["ControllerInRawMode"] = True
self.pluginconf.pluginConf["ControllerInHybridMode"] = False
elif mode == 0x02:
self.pluginconf.pluginConf["ControllerInHybridMode"] = True
self.pluginconf.pluginConf["ControllerInRawMode"] = False
return send_zigatecmd_raw(self, "0002", "%02x" % mode)
def zigate_set_loglevel(self, loglevel):
if self.zigbee_communication == "zigpy":
self.log.logging( "zigateCommand", "Debug","zigate_set_loglevel %s not implemennted in zigpy" %loglevel)
return
self.log.logging( "zigateCommand", "Debug","zigate_set_loglevel %s" %loglevel)
def zigate_firmware_default_response(self, enable="00"):
if self.zigbee_communication == "zigpy":
self.log.logging( "zigateCommand", "Debug","zigate_firmware_default_response %s not implemennted in zigpy" %enable)
return
self.log.logging( "zigateCommand", "Debug","zigate_firmware_default_response %s" %enable)
return send_zigatecmd_raw(self, "0003", enable)
def zigate_get_nwk_state(self):
self.log.logging( "zigateCommand", "Debug","zigate_get_nwk_state")
if self.zigbee_communication == "zigpy":
# Should be done during zigpy layer startup()
return self.ControllerLink.sendData( "REQ-NWK-STATUS", None)
return send_zigatecmd_raw(self, "0009", "")
def zigate_get_firmware_version(self):
self.log.logging( "zigateCommand", "Debug","zigate_get_firmware_version")
if self.zigbee_communication == "zigpy":
# Should be done during zigpy startup()
return
return send_zigatecmd_raw(self, "0010", "")
def zigate_soft_reset(self):
self.log.logging( "zigateCommand", "Debug","zigate_soft_reset")
if self.zigbee_communication == "zigpy":
return self.ControllerLink.sendData( "SOFT-RESET", None)
return send_zigatecmd_raw(self, "0011", "" )
def METHOD_NAME(self):
self.log.logging( "zigateCommand", "Debug","zigate_erase_eeprom")
if self.zigbee_communication == "zigpy":
return self.ControllerLink.sendData( "ERASE-PDM", None)
return send_zigatecmd_raw(self, "0012", "")
def zigate_get_list_active_devices(self):
self.log.logging( "zigateCommand", "Debug","zigate_get_list_active_devices")
if self.zigbee_communication == "zigpy":
return
return send_zigatecmd_raw(self, "0015", "")
def zigate_set_time(self, timeUTC):
self.log.logging( "zigateCommand", "Debug","zigate_set_time %s" %timeUTC)
if self.zigbee_communication == "zigpy":
return self.ControllerLink.sendData( "SET-TIME", {"Param1": int(timeUTC,16)})
return send_zigatecmd_raw(self, "0016", timeUTC)
def zigate_get_time(self):
self.log.logging( "zigateCommand", "Debug","zigate_get_time")
if self.zigbee_communication == "zigpy":
return self.ControllerLink.sendData( "GET-TIME", None)
return send_zigatecmd_raw(self, "0017", "")
def zigate_blueled(self, OnOff):
self.log.logging( "zigateCommand", "Debug","zigate_blueled %s" %OnOff)
if self.zigbee_communication == "zigpy":
return self.ControllerLink.sendData( "SET-LED", {"Param1": int(OnOff,16)})
return send_zigatecmd_raw(self, "0018", OnOff)
def zigate_set_certificate(self, certification_code ):
self.log.logging( "zigateCommand", "Debug","zigate_set_certificate %s" %certification_code)
if self.zigbee_communication == "zigpy":
value = 'FCC' if certification_code == '02' else 'CE'
self.log.logging( "zigateCommand", "Debug","zigate_set_certificate value: %s" %value)
return self.ControllerLink.sendData( "SET-CERTIFICATION", {"Param1": value})
return send_zigatecmd_raw(self, "0019", certification_code)
def zigate_set_extended_PanID(self, extPanID):
self.log.logging( "zigateCommand", "Debug","zigate_set_extended_PanID %s" %extPanID)
if self.zigbee_communication == "zigpy":
return self.ControllerLink.sendData( "SET-EXTPANID", {"Param1": int(extPanID,16)})
return send_zigatecmd_raw(self, "0020", extPanID)
def zigate_set_channel(self, mask):
self.log.logging( "zigateCommand", "Debug","zigate_set_channel %s" %mask)
if self.zigbee_communication == "zigpy":
return self.ControllerLink.sendData( "SET-CHANNEL", {"Param1": int(mask,16)})
return send_zigatecmd_raw(self, "0021", mask)
def zigate_start_nwk(self):
self.log.logging( "zigateCommand", "Debug","zigate_start_nwk")
if self.zigbee_communication == "zigpy":
return
return send_zigatecmd_raw(self, "0024", "")
def zigate_remove_device(self, target_short_addr, extended_addr):
self.log.logging( "zigateCommand", "Debug","zigate_remove_device %s %s" %(target_short_addr, extended_addr))
if self.zigbee_communication == "zigpy":
return self.ControllerLink.sendData( "REMOVE-DEVICE", {"Param1": int(extended_addr,16)})
return send_zigatecmd_raw(self, "0026", target_short_addr + extended_addr)
def zigate_set_tx_power(self, value):
self.log.logging( "zigateCommand", "Debug","zigate_set_tx_power %s" %value)
return send_zigatecmd_raw(self, "0806", value)
def zigate_get_tx_power(self):
self.log.logging( "zigateCommand", "Debug","zigate_get_tx_power")
if self.zigbee_communication == "zigpy":
return self.ControllerLink.sendData( "GET-EXTPANID", None)
return send_zigatecmd_raw(self, "0807", "") |
5,767 | make initial publications | #!/usr/bin/env python
# -*- coding: utf-8 -*-
## Copyright (C) 2021 University of Oxford
##
## This file is part of Cockpit.
##
## Cockpit is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Cockpit is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Cockpit. If not, see <http://www.gnu.org/licenses/>.
## Copyright 2013, The Regents of University of California
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions
## are met:
##
## 1. Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
##
## 2. Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
##
## 3. Neither the name of the copyright holder nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
## FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
## COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
## CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
## LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
## ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
class Device:
"""Base class for Cockpit devices.
This serves as the base class for any Device subclass. Devices
are as close as Cockpit gets to speaking directly to hardware.
Device implementation is largely left up to the client; this class
simply provides a framework of stub functions that must be
implemented.
Args:
name: name of the device. In the depot configuration file this
is the name of the section where the device is declared.
config: map of the device configuration to their values as
strings. This is the key/values read from the device
section on the depot configuration file.
"""
_config_types = {
'port': int,
}
# Define __lt__ to make handlers sortable.
def __lt__(self, other):
return self.name.lower() < other.name.lower()
def __init__(self, name='', config={}):
self.name = name
self.config = config
# Convert config strings to types specified on device class.
for k, t in self._config_types.items():
if k in self.config:
self.config[k] = t(self.config[k])
ip = config.get('ipaddress', False)
if ip:
self.ipAddress = ip
port = config.get('port', False)
if port:
self.port = port
uri = config.get('uri', False)
if uri:
self.uri = uri
## Perform any necessary initialization (e.g. connecting to hardware).
def initialize(self):
pass
## Generate a list of DeviceHandlers representing the various capabilities
# we are responsible for. Each DeviceHandler represents an abstract bit
# of hardware -- for example, a generic camera, or a stage mover along
# a single axis, or a light source. Take a look at the
# "handlers/deviceHandler.py" file for more information.
def getHandlers(self):
return []
## Construct any special UI the Device needs. Most Devices will not need
# to do anything here, but if you have settings that the user needs to be
# able to manipulate and that the normal UI will not handle, then this
# is where you create your specific UI.
# \return a WX Sizer or Panel that will be inserted into the main controls
# window, or None if nothing is to be inserted.
def makeUI(self, parent):
return None
## Subscribe to any events we care about.
def performSubscriptions(self):
pass
## Publish any needed information. This is called after all UI widgets
# have been generated, so they are able to respond to these publications.
def METHOD_NAME(self):
pass
## Do any final actions needed, now that all of the devices are set up
# and all initial publications and subscriptions have been made.
def finalizeInitialization(self):
pass
def onExit(self) -> None:
pass |
5,768 | test gds | from __future__ import annotations
import jsondiff
import numpy as np
import pytest
from pytest_regressions.data_regression import DataRegressionFixture
import gdsfactory as gf
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.difftest import difftest
def test_append() -> None:
"""Append paths."""
P = gf.Path()
P.append(gf.path.arc(radius=10, angle=90)) # Circular arc
P.append(gf.path.straight(length=10)) # Straight section
P.append(
gf.path.euler(radius=3, angle=-90, p=1)
) # Euler bend (aka "racetrack" curve)
P.append(gf.path.straight(length=40))
P.append(gf.path.arc(radius=8, angle=-45))
P.append(gf.path.straight(length=10))
P.append(gf.path.arc(radius=8, angle=45))
P.append(gf.path.straight(length=10))
assert np.round(P.length(), 3) == 107.697, P.length()
def looploop(num_pts=1000):
"""Simple limacon looping curve."""
t = np.linspace(-np.pi, 0, num_pts)
r = 20 + 25 * np.sin(t)
x = r * np.cos(t)
y = r * np.sin(t)
return np.array((x, y)).T
@cell
def double_loop() -> Component:
# Create the path points
P = gf.Path()
P.append(gf.path.arc(radius=10, angle=90))
P.append(gf.path.straight())
P.append(gf.path.arc(radius=5, angle=-90))
P.append(looploop(num_pts=1000))
P.rotate(-45)
# Create the crosssection
s1 = gf.Section(width=0.5, offset=2, layer=(0, 0))
s2 = gf.Section(width=0.5, offset=4, layer=(1, 0))
s3 = gf.Section(width=1, offset=0, layer=(3, 0))
X = gf.CrossSection(
width=1.5,
offset=0,
layer=(2, 0),
port_names=["in", "out"],
sections=[s1, s2, s3],
)
return gf.path.extrude(P, X, simplify=0.3)
@cell
def transition() -> Component:
c = gf.Component()
s1 = gf.Section(width=2.2, offset=0, layer=(3, 0), name="etch")
s2 = gf.Section(width=1.1, offset=3, layer=(1, 0), name="wg2")
X1 = gf.CrossSection(
width=1.2,
offset=0,
layer=(2, 0),
name="wg",
port_names=("in1", "out1"),
sections=[s1, s2],
)
# Create the second CrossSection that we want to transition to
s1 = gf.Section(width=3.5, offset=0, layer=(3, 0), name="etch")
s2 = gf.Section(width=3, offset=5, layer=(1, 0), name="wg2")
X2 = gf.CrossSection(
width=1,
offset=0,
layer=(2, 0),
name="wg",
port_names=("in1", "out1"),
sections=[s1, s2],
)
Xtrans = gf.path.transition(cross_section1=X1, cross_section2=X2, width_type="sine")
# Xtrans = gf.cross_section.strip(port_names=('in1', 'out1'))
P1 = gf.path.straight(length=5)
P2 = gf.path.straight(length=5)
wg1 = gf.path.extrude(P1, X1)
wg2 = gf.path.extrude(P2, X2)
P4 = gf.path.euler(radius=25, angle=45, p=0.5, use_eff=False)
wg_trans = gf.path.extrude(P4, Xtrans)
wg1_ref = c << wg1
wgt_ref = c << wg_trans
wgt_ref.connect("in1", wg1_ref.ports["out1"])
wg2_ref = c << wg2
wg2_ref.connect("in1", wgt_ref.ports["out1"])
return c
component_factory = dict(
transition=transition,
)
component_names = component_factory.keys()
@pytest.fixture(params=component_names, scope="function")
def component(request) -> Component:
return component_factory[request.param]()
def METHOD_NAME(component: Component) -> None:
"""Avoid regressions in GDS geometry shapes and layers."""
difftest(component)
def test_settings(component: Component, data_regression: DataRegressionFixture) -> None:
"""Avoid regressions when exporting settings."""
data_regression.check(component.to_dict())
def test_layers1() -> None:
P = gf.path.straight(length=10.001)
X = gf.CrossSection(width=0.5, offset=0, layer=(3, 0), port_names=("in", "out"))
c = gf.path.extrude(P, X, simplify=5e-3)
assert c.ports["in"].layer == (3, 0)
assert c.ports["out"].center[0] == 10.001, c.ports["out"].center[0]
def test_layers2() -> None:
P = gf.path.straight(length=10.001)
X = gf.cross_section.strip(snap_to_grid=5e-3)
c = gf.path.extrude(P, X, simplify=5e-3)
assert c.ports["o1"].layer == (1, 0)
assert c.ports["o2"].center[0] == 10.0, c.ports["o2"].center[0]
def test_copy() -> None:
x1 = gf.CrossSection(width=0.5, offset=0, layer=(3, 0), port_names=("in", "out"))
x2 = x1.copy()
d = jsondiff.diff(x1.model_dump(), x2.model_dump())
assert len(d) == 0, d
def test_path_add() -> None:
p1 = gf.path.straight(length=5)
p2 = gf.path.straight(length=5)
p3 = p1 + p2
assert p3.length() == 10
p2 += p1
assert p2.length() == 10
p1 = gf.path.straight(length=5)
p2 = gf.path.euler(radius=5, angle=45, p=0.5, use_eff=False)
p = p2 + p1
assert p.start_angle == 0
assert p.end_angle == 45
if __name__ == "__main__":
# test_append()
# c = transition()
p1 = gf.path.straight(length=5)
p2 = gf.path.euler(radius=5, angle=45, p=0.5, use_eff=False)
p = p2 + p1
# assert p.start_angle == 45
# assert p.end_angle == 0
c = p.extrude(cross_section="strip")
c.show(show_ports=False) |
5,769 | on new ticket | import pytz
from celery import shared_task
from discord_webhook import DiscordEmbed, DiscordWebhook
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from judge.jinja2.gravatar import gravatar
from judge.models import BlogPost, Comment, Contest, Problem, Tag, TagProblem, Ticket, TicketMessage
__all__ = ('on_new_ticket', 'on_new_comment', 'on_new_problem', 'on_new_tag_problem', 'on_new_tag', 'on_new_contest',
'on_new_blogpost', 'on_new_ticket_message')
def get_webhook_url(event_name):
default = settings.DISCORD_WEBHOOK.get('default', None)
webhook = settings.DISCORD_WEBHOOK.get(event_name, default)
return webhook
def send_webhook(webhook, title, description, author, color='03b2f8'):
webhook = DiscordWebhook(url=webhook)
embed = DiscordEmbed(
title=title,
description=description,
color=color,
)
if author is not None:
embed.set_author(
name=author.user.username,
url=settings.SITE_FULL_URL + '/user/' + author.user.username,
icon_url=gravatar(author),
)
webhook.add_embed(embed)
webhook.execute()
@shared_task
def METHOD_NAME(ticket_id, content_type_id, object_id, message):
webhook = get_webhook_url('on_new_ticket')
if webhook is None or settings.SITE_FULL_URL is None:
return
ticket = Ticket.objects.get(pk=ticket_id)
obj = ContentType.objects.get_for_id(content_type_id).get_object_for_this_type(
pk=object_id,
)
url = obj.get_absolute_url()
# for internal links, we add the site url
if url[0] == '/':
url = settings.SITE_FULL_URL + url
ticket_url = settings.SITE_FULL_URL + '/ticket/' + str(ticket_id)
title = f'Title: [{ticket.title}]({ticket_url})'
message = f'Message: {message}'
send_webhook(webhook, f'New ticket on {url}', title + '\n' + message[:100], ticket.user)
@shared_task
def on_new_ticket_message(message_id, ticket_id, message):
webhook = get_webhook_url('on_new_ticket_message')
if webhook is None or settings.SITE_FULL_URL is None:
return
ticket_message = TicketMessage.objects.get(pk=message_id)
ticket_url = settings.SITE_FULL_URL + '/ticket/' + str(ticket_id)
message = f'Message: {message}'
send_webhook(webhook, f'New ticket reply on {ticket_url}', message[:100], ticket_message.user)
@shared_task
def on_new_comment(comment_id):
webhook = get_webhook_url('on_new_comment')
if webhook is None or settings.SITE_FULL_URL is None:
return
comment = Comment.objects.get(pk=comment_id)
url = settings.SITE_FULL_URL + comment.get_absolute_url()
send_webhook(webhook, f'New comment {url}', comment.body[:200], comment.author)
@shared_task
def on_new_problem(problem_code, is_suggested=False):
event_name = 'on_new_suggested_problem' if is_suggested else 'on_new_problem'
webhook = get_webhook_url(event_name)
if webhook is None or settings.SITE_FULL_URL is None:
return
problem = Problem.objects.get(code=problem_code)
author = problem.suggester or problem.authors.first()
url = settings.SITE_FULL_URL + problem.get_absolute_url()
title = f'New {"suggested" if is_suggested else "organization"} problem {url}'
description = [
('Title', problem.name),
('Statement', problem.description[:100] + '...\n'),
('Time limit', problem.time_limit),
('Memory limit (MB)', problem.memory_limit / 1024),
('Points', problem.points),
]
if problem.is_organization_private:
orgs_link = [
f'[{org.name}]({settings.SITE_FULL_URL + org.get_absolute_url()})'
for org in problem.organizations.all()
]
description.append(('Organizations', ' '.join(orgs_link)))
description = '\n'.join(f'{opt}: {val}' for opt, val in description)
send_webhook(webhook, title, description, author)
@shared_task
def on_new_tag_problem(problem_code):
webhook = get_webhook_url('on_new_tag_problem')
if webhook is None or settings.SITE_FULL_URL is None:
return
problem = TagProblem.objects.get(code=problem_code)
url = settings.SITE_FULL_URL + problem.get_absolute_url()
description = f'Title: {problem.name}\n'
description += f'Judge: {problem.judge}'
send_webhook(webhook, f'New tag problem {url}', description, None)
@shared_task
def on_new_tag(problem_code, tag_list):
webhook = get_webhook_url('on_new_tag')
if webhook is None or settings.SITE_FULL_URL is None:
return
problem = TagProblem.objects.get(code=problem_code)
tags = []
for tag in tag_list:
tags.append(Tag.objects.get(code=tag).name)
url = settings.SITE_FULL_URL + problem.get_absolute_url()
description = f'Title: {problem.name}\n'
description += f'New tag: {", ".join(tags)}'
send_webhook(webhook, f'New tag added for problem {url}', description, None)
@shared_task
def on_new_contest(contest_key):
webhook = get_webhook_url('on_new_contest')
if webhook is None or settings.SITE_FULL_URL is None:
return
contest = Contest.objects.get(key=contest_key)
author = contest.authors.first()
url = settings.SITE_FULL_URL + contest.get_absolute_url()
title = f'New contest {url}'
tz = pytz.timezone(settings.DEFAULT_USER_TIME_ZONE)
description = [
('Title', contest.name),
('Statement', contest.description[:100] + '...\n'),
('Start time', contest.start_time.astimezone(tz).strftime('%Y-%m-%d %H:%M')),
('End time', contest.end_time.astimezone(tz).strftime('%Y-%m-%d %H:%M')),
('Duration', contest.end_time - contest.start_time),
]
if contest.is_organization_private:
orgs_link = [
f'[{org.name}]({settings.SITE_FULL_URL + org.get_absolute_url()})'
for org in contest.organizations.all()
]
description.append(('Organizations', ' '.join(orgs_link)))
description = '\n'.join(f'{opt}: {val}' for opt, val in description)
send_webhook(webhook, title, description, author)
@shared_task
def on_new_blogpost(blog_id):
webhook = get_webhook_url('on_new_blogpost')
if webhook is None or settings.SITE_FULL_URL is None:
return
blog = BlogPost.objects.get(pk=blog_id)
url = settings.SITE_FULL_URL + blog.get_absolute_url()
description = f'Title: {blog.title}\n'
description += f'Description: {blog.content[:200]}'
send_webhook(webhook, f'New blog post {url}', description, blog.authors.first()) |
5,770 | test add and remove simple case | """ This is a test of the chain
SiteStatus -> ResourceStatusClient -> ResourceStatusDB
It supposes that the DB is present, and that the service is running
"""
# pylint: disable=wrong-import-position, missing-docstring
from datetime import datetime
import pytest
import DIRAC
DIRAC.initialize() # Initialize configuration
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
Datetime = datetime.now()
testSite = "test1234.test.test"
@pytest.fixture(name="stClient")
def fixtureSiteStatus():
siteStatus = SiteStatus()
siteStatus.rssFlag = True
yield siteStatus
def METHOD_NAME(stClient):
# make sure that the test sites are not presented in the db
rsClient = ResourceStatusClient()
rsClient.deleteStatusElement("Site", "Status", testSite)
rsClient.deleteStatusElement("Site", "Status", "testActive1.test.test")
rsClient.deleteStatusElement("Site", "Status", "testActive.test.test")
rsClient.deleteStatusElement("Site", "Status", "testBanned.test.test")
# add test site
result = rsClient.insertStatusElement(
"Site",
"Status",
testSite,
"all",
"Active",
"Site",
"Synchronized",
Datetime,
Datetime,
"tokenOwner",
Datetime,
)
assert result["OK"] is True, result["Message"]
stClient.rssCache.refreshCache()
# TEST getSites
# ...............................................................................
result = stClient.getSites()
assert result["OK"] is True, result["Message"]
assert testSite in result["Value"]
# TEST getSiteStatuses
# ...............................................................................
result = stClient.getSiteStatuses([testSite])
assert result["OK"] is True, result["Message"]
assert result["Value"][testSite] == "Active"
# TEST getUsableSites
# ...............................................................................
result = stClient.getUsableSites([testSite])
assert result["OK"] is True, result["Message"]
assert testSite in result["Value"]
# finally delete the test site
result = rsClient.deleteStatusElement("Site", "Status", testSite)
assert result["OK"] is True, result["Message"]
def test_addAndRemove_complicatedTest(stClient):
rsClient = ResourceStatusClient()
result = rsClient.insertStatusElement(
"Site",
"Status",
"testActive.test.test",
"all",
"Active",
"Site",
"Synchronized",
Datetime,
Datetime,
"tokenOwner",
Datetime,
)
assert result["OK"] is True, result["Message"]
result = rsClient.insertStatusElement(
"Site",
"Status",
"testActive1.test.test",
"all",
"Active",
"Site",
"Synchronized",
Datetime,
Datetime,
"tokenOwner",
Datetime,
)
assert result["OK"] is True, result["Message"]
result = rsClient.insertStatusElement(
"Site",
"Status",
"testBanned.test.test",
"all",
"Banned",
"Site",
"Synchronized",
Datetime,
Datetime,
"tokenOwner",
Datetime,
)
assert result["OK"] is True, result["Message"]
stClient.rssCache.refreshCache()
# TEST getSites
# ...............................................................................
result = stClient.getSites()
assert result["OK"] is True, result["Message"]
assert "testActive1.test.test" in result["Value"]
assert "testActive.test.test" in result["Value"]
assert "testBanned.test.test" not in result["Value"]
# TEST getSites
# ...............................................................................
result = stClient.getSites("All")
assert result["OK"] is True, result["Message"]
assert "testActive1.test.test" in result["Value"]
assert "testActive.test.test" in result["Value"]
assert "testBanned.test.test" in result["Value"]
# TEST getUsableSites
# ...............................................................................
result = stClient.getUsableSites()
assert result["OK"] is True, result["Message"]
assert "testActive1.test.test" in result["Value"]
assert "testActive.test.test" in result["Value"]
# setting a status
result = stClient.setSiteStatus("testBanned.test.test", "Probing")
assert result["OK"] is True, result["Message"]
stClient.rssCache.refreshCache()
result = stClient.getSites("Probing")
assert result["OK"] is True, result["Message"]
assert "testBanned.test.test" in result["Value"]
assert "testActive.test.test" not in result["Value"] |
5,771 | text | #!/usr/bin/env python
# Build script for Sphinx documentation
import os
import shlex
import shutil
import subprocess
import sys
from collections import OrderedDict
# You can set these variables from the command line.
SPHINXOPTS = os.getenv('SPHINXOPTS', '')
SPHINXBUILD = os.getenv('SPHINXBUILD', 'sphinx-build')
PAPER = os.getenv('PAPER', None)
BUILDDIR = os.getenv('BUILDDIR', '_build')
TARGETS = OrderedDict()
def target(function):
TARGETS[function.__name__] = function
return function
# User-friendly check for sphinx-build
def check_sphinx_build():
with open(os.devnull, 'w') as devnull:
try:
if subprocess.call([SPHINXBUILD, '--version'],
stdout=devnull, stderr=devnull) == 0:
return
except FileNotFoundError:
pass
print("The '{0}' command was not found. Make sure you have Sphinx "
"installed, then set the SPHINXBUILD environment variable to point "
"to the full path of the '{0}' executable. Alternatively you can "
"add the directory with the executable to your PATH. If you don't "
"have Sphinx installed, grab it from http://sphinx-doc.org/)"
.format(SPHINXBUILD))
sys.exit(1)
@target
def all():
"""the default target"""
html()
return rinoh()
@target
def clean():
"""remove the build directory"""
shutil.rmtree(BUILDDIR, ignore_errors=True)
def build(builder, success_msg=None, extra_opts=None, outdir=None,
doctrees=True):
builddir = os.path.join(BUILDDIR, outdir or builder)
command = [SPHINXBUILD, '-b', builder]
if doctrees:
command.extend(['-d', os.path.join(BUILDDIR, 'doctrees')])
if extra_opts:
command.extend(extra_opts)
command.extend(shlex.split(SPHINXOPTS))
command.extend(['.', builddir])
print(' '.join(command))
if subprocess.call(command) == 0:
print('Build finished. ' + success_msg.format(builddir))
else:
print('Error running {}. Aborting.'.format(SPHINXBUILD))
sys.exit(1)
@target
def html():
"""make standalone HTML files"""
return build('html', 'The HTML pages are in {}.')
@target
def dirhtml():
"""make HTML files named index.html in directories"""
return build('dirhtml', 'The HTML pages are in {}')
@target
def singlehtml():
"""make a single large HTML file"""
return build('singlehtml', 'The HTML page is in {}.')
@target
def pickle():
"""make pickle files"""
return build('pickle', 'Now you can process the pickle files.')
@target
def json():
"""make JSON files"""
return build('json', 'Now you can process the JSON files.')
@target
def htmlhelp():
"""make HTML files and a HTML help project"""
build('htmlhelp', 'Now you can run HTML Help Workshop with the .hhp '
'project file in {}.')
print('Running HTML Help Workshop...')
builddir = os.path.join(BUILDDIR, 'htmlhelp')
rc = subprocess.call(['hhc', os.path.join(builddir, 'rinohtype.hhp')])
if rc != 1:
print('Error running HTML Help Workshop. Aborting.')
sys.exit(1)
print('HTML Help Workshop finished; the CHM file is in {}.'
.format(builddir))
@target
def qthelp():
"""make HTML files and a qthelp project"""
return build('qthelp', 'Now you can run "qcollectiongenerator" with the '
'.qhcp project file in {0}, like this: \n'
'# qcollectiongenerator {0}/RinohType.qhcp\n'
'To view the help file:\n'
'# assistant -collectionFile {0}/RinohType.qhc')
@target
def devhelp():
"""make HTML files and a Devhelp project"""
return build('devhelp', 'To view the help file:\n'
'# mkdir -p $HOME/.local/share/devhelp/RinohType\n'
'# ln -s {} $HOME/.local/share/devhelp/RinohType\n'
'# devhelp')
@target
def epub():
"""make an epub"""
return build('epub', 'The epub file is in {}.')
@target
def rinoh():
"""make a PDF using rinohtype"""
return build('rinoh', 'The PDF file is in {}.')
@target
def latex():
"""make LaTeX files, you can set PAPER=a4 or PAPER=letter"""
extra_opts = ['-D', 'latex_paper_size={}'.format(PAPER)] if PAPER else None
return build('latex', 'The LaTeX files are in {}.\n'
"Run 'make' in that directory to run these through "
"(pdf)latex (use the 'latexpdf' target to do that "
"automatically).", extra_opts)
@target
def latexpdf():
"""make LaTeX files and run them through pdflatex"""
rc = latex()
print('Running LaTeX files through pdflatex...')
builddir = os.path.join(BUILDDIR, 'latex')
subprocess.call(['make', '-C', builddir, 'all-pdf'])
print('pdflatex finished; the PDF files are in {}.'.format(builddir))
@target
def latexpdfja():
"""make LaTeX files and run them through platex/dvipdfmx"""
rc = latex()
print('Running LaTeX files through platex and dvipdfmx...')
builddir = os.path.join(BUILDDIR, 'latex')
subprocess.call(['make', '-C', builddir, 'all-pdf-ja'])
print('pdflatex finished; the PDF files are in {}.'.format(builddir))
@target
def METHOD_NAME():
"""make text files"""
return build('text', 'The text files are in {}.')
@target
def man():
"""make manual pages"""
return build('man', 'The manual pages are in {}.')
@target
def texinfo():
"""make Texinfo files"""
return build('texinfo', 'The Texinfo files are in {}.\n'
"Run 'make' in that directory to run these "
"through makeinfo (use the 'info' target to do "
"that automatically).")
@target
def info():
"""make Texinfo files and run them through makeinfo"""
rc = texinfo()
print('Running Texinfo files through makeinfo...')
builddir = os.path.join(BUILDDIR, 'texinfo')
subprocess.call(['make', '-C', builddir, 'info'])
print('makeinfo finished; the Info files are in {}.'.format(builddir))
@target
def gettext():
"""make PO message catalogs"""
return build('gettext', 'The message catalogs are in {}.', outdir='locale',
doctrees=False)
@target
def changes():
"""make an overview of all changed/added/deprecated items"""
return build('changes', 'The overview file is in {}.')
@target
def xml():
"""make Docutils-native XML files"""
return build('xml', 'The XML files are in {}.')
@target
def pseudoxml():
"""make pseudoxml-XML files for display purposes"""
return build('pseudoxml', 'The pseudo-XML files are in {}.')
@target
def linkcheck():
"""check all external links for integrity"""
return build('linkcheck', 'Look for any errors in the above output or in '
'{}/output.txt.')
@target
def doctest():
"""run all doctests embedded in the documentation (if enabled)"""
return build('doctest', 'Look at the results in {}/output.txt.')
@target
def help():
"""List all targets"""
print("Please use '{} <target>' where <target> is one of"
.format(sys.argv[0]))
width = max(len(name) for name in TARGETS)
for name, target in TARGETS.items():
print(' {name:{width}} {descr}'.format(name=name, width=width,
descr=target.__doc__))
if __name__ == '__main__':
check_sphinx_build()
docdir = os.path.dirname(__file__)
if docdir:
os.chdir(docdir)
args = sys.argv[1:] or ['all']
for arg in args:
TARGETS[arg]() |
5,772 | parse rttm | import random
import re
import xml.etree.ElementTree as ET
from collections import defaultdict
from itertools import accumulate
from pathlib import Path
import torch
from torch.utils.data.dataset import Dataset
from torchaudio.sox_effects import apply_effects_file, apply_effects_tensor
class QUESST14Trainset(Dataset):
"""QUESST 2014 training dataset."""
def __init__(self, split, **kwargs):
dataset_root = Path(kwargs["quesst2014_root"])
scoring_root = dataset_root / "scoring"
split_root = scoring_root / f"groundtruth_quesst14_{split}"
# parse infos
query2positives = METHOD_NAME(split_root / f"quesst14_{split}.rttm")
audio_names = parse_lst(scoring_root / "language_key_utterances.lst")
query_names = parse_lst(scoring_root / f"language_key_{split}.lst")
print(f"[QUESST2014] # of audios: {len(audio_names)}")
print(f"[QUESST2014] # of queries: {len(query_names)}")
# find complement set
audio_set = set(audio_names)
query2negatives = {
query_name: list(
audio_set
- set(
query2positives[query_name] if query_name in query2positives else []
)
)
for query_name in query_names
}
# form positive & negative pairs
positive_pairs = [
(query_name, audio_name)
for query_name in query_names
for audio_name in set(query2positives[query_name]) & audio_set
]
negative_pairs = [
(query_name, list(negative_audio_set))
for query_name, negative_audio_set in query2negatives.items()
]
print(f"[QUESST2014] # of positive pairs: {len(positive_pairs)}")
print(f"[QUESST2014] # of negative pairs: {len(negative_pairs)}")
self.audio_root = dataset_root / "Audio"
self.query_root = dataset_root / f"{split}_queries"
self.max_dur = 3.0
self.positive_pairs = positive_pairs
self.negative_pairs = negative_pairs
def __len__(self):
return len(self.positive_pairs) + len(self.negative_pairs)
def __getitem__(self, idx):
if idx < len(self.positive_pairs): # positive pair
query_name, audio_name = self.positive_pairs[idx]
else: # negative pair
query_name, audio_names = self.negative_pairs[
idx - len(self.positive_pairs)
]
audio_name = random.sample(audio_names, 1)[0]
query_path = (self.query_root / query_name).with_suffix(".wav")
audio_path = (self.audio_root / audio_name).with_suffix(".wav")
query_tensor = path2tensor(query_path)
audio_tensor = path2tensor(audio_path)
query_segment = crop_segment(query_tensor, self.max_dur)
audio_segments = unfold_segments(audio_tensor, self.max_dur)
label = torch.LongTensor([1 if idx < len(self.positive_pairs) else -1])
return query_segment, audio_segments, label
def collate_fn(self, samples):
"""Collate a mini-batch of data."""
query_segments, segments_list, labels = zip(*samples)
flattened = [segment for segments in segments_list for segment in segments]
lengths = [len(segments) for segments in segments_list]
prefix_sums = list(accumulate(lengths, initial=0))
return list(query_segments) + flattened, (prefix_sums, labels)
@property
def sample_weights(self):
"""Sample weights to balance positive and negative data."""
n_pos = len(self.positive_pairs)
n_neg = len(self.negative_pairs)
return [1 / n_pos] * n_pos + [1 / n_neg] * n_neg
def METHOD_NAME(rttm_path):
"""Parse audio and query pairs from *.rttm."""
# e.g. "LEXEME quesst14_12345 ... quesst14_dev_123 ..."
pattern = re.compile(r"LEXEME\s+(quesst14_[0-9]+).*?(quesst14_(dev|eval)_[0-9]+)")
query2audios = defaultdict(list)
with open(rttm_path) as fd:
for line in fd:
match = pattern.match(line)
if match is None:
continue
query2audios[match.group(2)].append(match.group(1))
return query2audios
def parse_lst(lst_path):
"""Extract audio names of nnenglish."""
audio_names = []
with open(lst_path) as fd:
for line in fd:
audio_path, lang = tuple(line.strip().split())
if lang != "nnenglish":
continue
audio_name = Path(audio_path).with_suffix("").name
audio_names.append(audio_name)
return audio_names
def path2tensor(filepath):
tensor, _ = apply_effects_file(
str(filepath),
[
["channels", "1"],
["rate", "16000"],
["norm"],
],
)
return tensor.squeeze(0)
def crop_segment(tensor, tgt_dur, sample_rate=16000):
src_dur = len(tensor) / sample_rate
random_shift = random.uniform(0, src_dur - tgt_dur)
audio_tensor, _ = apply_effects_tensor(
tensor.unsqueeze(0),
sample_rate,
[
["pad", f"{tgt_dur}", f"{tgt_dur}"],
[
"trim",
f"{tgt_dur + random_shift}",
f"{tgt_dur}",
],
],
)
return audio_tensor.squeeze(0)
def unfold_segments(tensor, tgt_dur, sample_rate=16000):
seg_len = int(tgt_dur * sample_rate)
src_len = len(tensor)
hop_len = seg_len // 4
tgt_len = seg_len if src_len <= seg_len else (src_len // hop_len + 1) * hop_len
pad_len = tgt_len - src_len
front_pad_len = random.randint(0, pad_len)
tail_pad_len = pad_len - front_pad_len
padded_tensor = torch.cat(
[torch.zeros(front_pad_len), tensor, torch.zeros(tail_pad_len)]
)
segments = padded_tensor.unfold(0, seg_len, hop_len).unbind(0)
return segments |
5,773 | type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetSkusNestedResourceTypeSecondResult',
'AwaitableGetSkusNestedResourceTypeSecondResult',
'get_skus_nested_resource_type_second',
'get_skus_nested_resource_type_second_output',
]
@pulumi.output_type
class GetSkusNestedResourceTypeSecondResult:
def __init__(__self__, id=None, name=None, properties=None, system_data=None, METHOD_NAME=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.SkuResourceResponseProperties':
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSkusNestedResourceTypeSecondResult(GetSkusNestedResourceTypeSecondResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSkusNestedResourceTypeSecondResult(
id=self.id,
name=self.name,
properties=self.properties,
system_data=self.system_data,
METHOD_NAME=self.METHOD_NAME)
def get_skus_nested_resource_type_second(nested_resource_type_first: Optional[str] = None,
nested_resource_type_second: Optional[str] = None,
provider_namespace: Optional[str] = None,
resource_type: Optional[str] = None,
sku: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSkusNestedResourceTypeSecondResult:
"""
Gets the sku details for the given resource type and sku name.
Azure REST API version: 2021-09-01-preview.
:param str nested_resource_type_first: The first child resource type.
:param str nested_resource_type_second: The second child resource type.
:param str provider_namespace: The name of the resource provider hosted within ProviderHub.
:param str resource_type: The resource type.
:param str sku: The SKU.
"""
__args__ = dict()
__args__['nestedResourceTypeFirst'] = nested_resource_type_first
__args__['nestedResourceTypeSecond'] = nested_resource_type_second
__args__['providerNamespace'] = provider_namespace
__args__['resourceType'] = resource_type
__args__['sku'] = sku
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:providerhub:getSkusNestedResourceTypeSecond', __args__, opts=opts, typ=GetSkusNestedResourceTypeSecondResult).value
return AwaitableGetSkusNestedResourceTypeSecondResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_skus_nested_resource_type_second)
def get_skus_nested_resource_type_second_output(nested_resource_type_first: Optional[pulumi.Input[str]] = None,
nested_resource_type_second: Optional[pulumi.Input[str]] = None,
provider_namespace: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSkusNestedResourceTypeSecondResult]:
"""
Gets the sku details for the given resource type and sku name.
Azure REST API version: 2021-09-01-preview.
:param str nested_resource_type_first: The first child resource type.
:param str nested_resource_type_second: The second child resource type.
:param str provider_namespace: The name of the resource provider hosted within ProviderHub.
:param str resource_type: The resource type.
:param str sku: The SKU.
"""
... |
5,774 | initialize | #
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import queue
from typing import Tuple
import numpy as np
import threading
import time
import multiprocessing
from logger import get_logger
from inference_executor import InferenceExecutor
from inference_pipeline_handler import InferencePipelineHandler
class InferenceManager:
def __init__(self, ovms_url, model_name, model_version, num_inference_executors, binary_input,
buffer_size):
self.exit_event = threading.Event()
self.abort_event = threading.Event()
self.logger = get_logger(__name__)
model_version_str = "latest" if model_version == 0 else model_version
self.logger.info(f"OVMS Endpoint spec - ovms_url: {ovms_url}; model_name: {model_name}; model_version: {model_version_str}")
ovms_info = {
"ovms_url": ovms_url,
"model_name": model_name,
"model_version": model_version
}
self.logger.info(f"Input buffer capacity set to: {buffer_size} frames")
self.inputs_queue = queue.Queue(maxsize=buffer_size)
self.results_queue = queue.Queue(maxsize=buffer_size)
if binary_input:
self.logger.info("Using binary input switched on")
self.logger.info(f"Number of Inference Executors: {num_inference_executors}")
self.inference_executors = [InferenceExecutor(i, ovms_info, binary_input,
input_queue=multiprocessing.Queue(buffer_size),
result_queue=multiprocessing.Queue(buffer_size))
for i in range(num_inference_executors)]
def METHOD_NAME(self) -> None:
self.logger.info("Initializing Inference Manager...")
self.logger.info("Starting Inference Executors...")
for inference_executor in self.inference_executors:
inference_executor.start()
self.logger.info("Starting inference pipeline thread")
self.inference_pipeline_thread = threading.Thread(target=self._inference_pipeline_thread)
self.inference_pipeline_thread.start()
self.logger.info("Starting inference executors monitoring thread")
self.inference_executors_monitoring_thread = threading.Thread(target=self._inference_executors_monitoring_thread)
self.inference_executors_monitoring_thread.start()
self.logger.info("Inference Manager initialized successfully")
def shutdown(self) -> None:
self.logger.info("Shutting down Inference Manager...")
self.logger.info("Exiting Inference Manager thread...")
self.exit_event.set()
self.inference_pipeline_thread.join()
self.logger.info("Inference pipeline thread exited successfully")
self.inference_executors_monitoring_thread.join()
self.logger.info("Inference executors monitoring thread exited successfully")
self.logger.info("Shutting down inference executors...")
for inference_executor in self.inference_executors:
inference_executor.shutdown()
inference_executor.join()
self.logger.info(f"Inference-Executor-{inference_executor.id} shut down successfully")
self.logger.info("Inference Executors shut down successfully")
self.logger.info("Inference Manager shut down successfully")
def schedule_inference(self, frame) -> bool:
# Non blocking inference scheduling method. Returns True on success.
# Returns False if buffer is full and new data cannot be scheduled for inference at that moment.
try:
self.inputs_queue.put_nowait(frame)
return True
except queue.Full:
return False
def pull_result(self) -> Tuple[bool, Tuple[np.ndarray, np.ndarray]]:
# Non blocking results pull method. Returns tuple (status, (frame, result))
# status == True informs that pull was successful
# status == False informs that there are no results to be pulled
# (frame, result) tuple is the actual element pulled from the results queue
# For status == False, it's set to (None, None) as it's N/A
try:
return True, self.results_queue.get_nowait()
except queue.Empty:
return False, (None, None)
def _inference_pipeline_thread(self):
num_inference_executors = len(self.inference_executors)
# In the first iteration only fill executors inputs
i = 0
while i < num_inference_executors:
try:
input = self.inputs_queue.get(timeout=1)
self.inference_executors[i].input_queue.put(input)
except queue.Empty:
if self.exit_event.is_set():
return
continue
i += 1
inference_pipeline_handler = InferencePipelineHandler(self.inputs_queue, self.results_queue)
initial_pipeline_step = InferencePipelineHandler.PipelineStep.PULL_RESULT
execution_result = InferencePipelineHandler.ExecutionResult(initial_pipeline_step, None)
i = 0
while not self.exit_event.is_set():
inference_executor = self.inference_executors[i]
previous_execution_result = execution_result
execution_result = inference_pipeline_handler.run_inference_pipeline(inference_executor, previous_execution_result)
if execution_result.pipeline_step == InferencePipelineHandler.PipelineStep.FINISHED:
i = (i + 1) % num_inference_executors
execution_result.pipeline_step = initial_pipeline_step
def _inference_executors_monitoring_thread(self):
while not self.exit_event.is_set():
time.sleep(1)
if not self.abort_event.is_set():
for inference_executor in self.inference_executors:
if inference_executor.abort_event.is_set():
self.logger.info(f"Received abort signal from Inference-Executor-{inference_executor.id}. Notifying Stream Analyzer...")
self.abort_event.set() |
5,775 | connect s3 | import logging
from _typeshed import Incomplete
from typing import Any
from .s3.connection import S3Connection
Version: Any
UserAgent: Any
config: Any
BUCKET_NAME_RE: Any
TOO_LONG_DNS_NAME_COMP: Any
GENERATION_RE: Any
VERSION_RE: Any
ENDPOINTS_PATH: Any
def init_logging(): ...
class NullHandler(logging.Handler):
def emit(self, record): ...
log: Any
perflog: Any
def set_file_logger(name, filepath, level: Any = 20, format_string: Incomplete | None = None): ...
def set_stream_logger(name, level: Any = 10, format_string: Incomplete | None = None): ...
def connect_sqs(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def METHOD_NAME(aws_access_key_id: str | None = None, aws_secret_access_key: str | None = None, **kwargs) -> S3Connection: ...
def connect_gs(gs_access_key_id: Incomplete | None = None, gs_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_ec2(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_elb(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_autoscale(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_cloudwatch(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_sdb(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_fps(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_mturk(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_cloudfront(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_vpc(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_rds(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_rds2(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_emr(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_sns(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_iam(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_route53(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_cloudformation(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_euca(
host: Incomplete | None = None,
aws_access_key_id: Incomplete | None = None,
aws_secret_access_key: Incomplete | None = None,
port: int = 8773,
path: str = "/services/Eucalyptus",
is_secure: bool = False,
**kwargs,
): ...
def connect_glacier(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_ec2_endpoint(
url, aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_walrus(
host: Incomplete | None = None,
aws_access_key_id: Incomplete | None = None,
aws_secret_access_key: Incomplete | None = None,
port: int = 8773,
path: str = "/services/Walrus",
is_secure: bool = False,
**kwargs,
): ...
def connect_ses(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_sts(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_ia(
ia_access_key_id: Incomplete | None = None, ia_secret_access_key: Incomplete | None = None, is_secure: bool = False, **kwargs
): ...
def connect_dynamodb(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_swf(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_cloudsearch(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_cloudsearch2(
aws_access_key_id: Incomplete | None = None,
aws_secret_access_key: Incomplete | None = None,
sign_request: bool = False,
**kwargs,
): ...
def connect_cloudsearchdomain(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_beanstalk(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_elastictranscoder(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_opsworks(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_redshift(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_support(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_cloudtrail(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_directconnect(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_kinesis(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_logs(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_route53domains(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_cognito_identity(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_cognito_sync(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_kms(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_awslambda(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_codedeploy(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_configservice(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_cloudhsm(aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs): ...
def connect_ec2containerservice(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def connect_machinelearning(
aws_access_key_id: Incomplete | None = None, aws_secret_access_key: Incomplete | None = None, **kwargs
): ...
def storage_uri(
uri_str,
default_scheme: str = "file",
debug: int = 0,
validate: bool = True,
bucket_storage_uri_class: Any = ...,
suppress_consec_slashes: bool = True,
is_latest: bool = False,
): ...
def storage_uri_for_key(key): ...
# Explicitly mark this package as incomplete.
def __getattr__(name: str) -> Incomplete: ... |
5,776 | test ssl auth failure self signed | import unittest
import multiprocessing
import sys
import time
import os.path
import broker
def data_path(file):
base = os.path.realpath(__file__)
return os.path.join(os.path.join(os.path.dirname(base), "certs"), file)
class TestSSL(unittest.TestCase):
def check_ping(self, ep1, s1, ep2, s2):
ep2.publish("/test", ["ping"])
(t, d) = s1.get()
self.assertEqual(t, "/test")
self.assertEqual(d[0], "ping")
ep1.publish(t, ["pong"])
(t, d) = s2.get()
self.assertEqual(t, "/test")
self.assertEqual(d[0], "pong")
def test_ssl_auth_success_ca(self):
cfg = broker.Configuration(broker.BrokerOptions())
cfg.openssl_certificate = data_path("cert.1.pem")
cfg.openssl_key = data_path("key.1.pem")
cfg.openssl_cafile = data_path("ca.pem")
with broker.Endpoint(cfg) as ep1, \
broker.Endpoint(cfg) as ep2, \
ep1.make_subscriber("/test") as s1, \
ep2.make_subscriber("/test") as s2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, True)
self.check_ping(ep1, s1, ep2, s2)
def test_ssl_auth_success_ca_pw(self):
cfg = broker.Configuration(broker.BrokerOptions())
cfg.openssl_certificate = data_path("cert.1.pem")
cfg.openssl_key = data_path("key.1.enc.pem")
cfg.openssl_cafile = data_path("ca.pem")
cfg.openssl_passphrase = "12345"
with broker.Endpoint(cfg) as ep1, \
broker.Endpoint(cfg) as ep2, \
ep1.make_subscriber("/test") as s1, \
ep2.make_subscriber("/test") as s2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, True)
self.check_ping(ep1, s1, ep2, s2)
def test_ssl_auth_success_self_signed(self):
cfg = broker.Configuration(broker.BrokerOptions())
cfg.openssl_certificate = data_path("cert.self-signed.pem")
cfg.openssl_key = data_path("key.self-signed.pem")
cfg.openssl_cafile = data_path("cert.self-signed.pem")
with broker.Endpoint(cfg) as ep1, \
broker.Endpoint(cfg) as ep2, \
ep1.make_subscriber("/test") as s1, \
ep2.make_subscriber("/test") as s2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, True)
self.check_ping(ep1, s1, ep2, s2)
def METHOD_NAME(self):
cfg1 = broker.Configuration(broker.BrokerOptions())
cfg1.openssl_certificate = data_path("cert.1.pem")
cfg1.openssl_key = data_path("key.1.pem")
cfg1.openssl_cafile = data_path("ca.pem")
cfg2 = broker.Configuration(broker.BrokerOptions())
cfg2.openssl_certificate = data_path("cert.self-signed.pem")
cfg2.openssl_key = data_path("key.self-signed.pem")
cfg2.openssl_cafile = data_path("cert.self-signed.pem")
with broker.Endpoint(cfg1) as ep1, \
broker.Endpoint(cfg2) as ep2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
with broker.Endpoint(cfg2) as ep1, \
broker.Endpoint(cfg1) as ep2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
def test_ssl_auth_failure_no_auth(self):
cfg1 = broker.Configuration(broker.BrokerOptions())
cfg1.openssl_certificate = data_path("cert.1.pem")
cfg1.openssl_key = data_path("key.1.pem")
cfg1.openssl_cafile = data_path("ca.pem")
cfg2 = broker.Configuration(broker.BrokerOptions())
with broker.Endpoint(cfg1) as ep1, \
broker.Endpoint(cfg2) as ep2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
with broker.Endpoint(cfg2) as ep1, \
broker.Endpoint(cfg1) as ep2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
def test_ssl_auth_failure_no_ssl(self):
cfg1 = broker.Configuration(broker.BrokerOptions())
cfg1.openssl_certificate = data_path("cert.1.pem")
cfg1.openssl_key = data_path("key.1.pem")
cfg1.openssl_cafile = data_path("ca.pem")
cfg2 = broker.Configuration(broker.BrokerOptions())
with broker.Endpoint(cfg1) as ep1, \
broker.Endpoint(cfg2) as ep2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
with broker.Endpoint(cfg2) as ep1, \
broker.Endpoint(cfg1) as ep2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
def XXXtest_ssl_auth_failure_ca_pw(self):
cfg = broker.Configuration(broker.BrokerOptions())
cfg.openssl_certificate = data_path("cert.1.pem")
cfg.openssl_key = data_path("key.1.enc.pem")
cfg.openssl_cafile = data_path("ca.pem")
cfg.openssl_passphrase = "WRONG PASSWORD"
with broker.Endpoint(cfg) as ep1, \
broker.Endpoint(cfg) as ep2:
port = ep1.listen("127.0.0.1", 0)
# TODO: This correctly generates an exception in CAF, for which I
# don't know where to catch it.
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
if __name__ == '__main__':
unittest.main(verbosity=3) |
5,777 | get mineral group | """
Submodule for accessing the rock forming mineral database.
Notes
-----
Accessing and modifying the database across multiple with multiple threads/processes
*could* result in database corruption (e.g. through repeated truncation etc).
"""
import functools
from pathlib import Path
import pandas as pd
import periodictable as pt
from tinydb import Query, TinyDB
from ..util.database import _list_tindyb_unique_values
from ..util.log import Handle
from ..util.meta import pyrolite_datafolder
from .transform import formula_to_elemental, merge_formulae
logger = Handle(__name__)
__dbpath__ = pyrolite_datafolder(subfolder="mineral") / "mindb.json"
@functools.lru_cache(maxsize=None) # cache outputs for speed
def list_groups():
"""
List the mineral groups present in the mineral database.
Returns
----------
:class:`list`
"""
return _list_tindyb_unique_values("group", dbpath=__dbpath__)
@functools.lru_cache(maxsize=None) # cache outputs for speed
def list_minerals():
"""
List the minerals present in the mineral database.
Returns
----------
:class:`list`
"""
return _list_tindyb_unique_values("name", dbpath=__dbpath__)
@functools.lru_cache(maxsize=None) # cache outputs for speed
def list_formulae():
"""
List the mineral formulae present in the mineral database.
Returns
----------
:class:`list`
"""
return _list_tindyb_unique_values("formula", dbpath=__dbpath__)
def get_mineral(name="", dbpath=None):
"""
Get a specific mineral from the database.
Parameters
------------
name : :class:`str`
Name of the desired mineral.
dbpath : :class:`pathlib.Path`, :class:`str`
Optional overriding of the default database path.
Returns
--------
:class:`pd.Series`
"""
if dbpath is None:
dbpath = __dbpath__
assert name in list_minerals()
with TinyDB(str(dbpath), access_mode="r") as db:
out = db.get(Query().name == name)
return pd.Series(out)
def parse_composition(composition, drop_zeros=True):
"""
Parse a composition reference to provide an ionic elemental version in the form of a
:class:`~pandas.Series`. Currently accepts :class:`pandas.Series`,
:class:`periodictable.formulas.Formula`
and structures which will directly convert to :class:`pandas.Series`
(list of tuples, dict).
Parameters
-----------
composition : :class:`str` | :class:`periodictable.formulas.Formula` | :class:`pandas.Series`
Name of a mineral, a formula or composition as a series
drop_zeros : :class:`bool`
Whether to drop compositional zeros.
Returns
--------
mineral : :class:`pandas.Series`
Composition formatted as a series.
"""
mineral = None
if composition is not None:
if isinstance(composition, pd.Series):
# convert to molecular oxides, then to formula, then to wt% elemental
components = [pt.formula(c) for c in composition.index]
values = composition.values
formula = merge_formulae(
[v / c.mass * c for v, c in zip(values, components)]
)
mineral = pd.Series(formula_to_elemental(formula))
elif isinstance(composition, pt.formulas.Formula):
mineral = pd.Series(formula_to_elemental(composition))
elif isinstance(composition, str):
if composition in list_minerals():
mineral = get_mineral(composition)
else:
try: # formulae
form = pt.formula(composition)
mineral = pd.Series(formula_to_elemental(form))
# could also check for formulae in the database, using f.atoms
except:
pass
else:
mineral = parse_composition(pd.Series(composition))
if drop_zeros and mineral is not None:
mineral = mineral[mineral != 0]
return mineral
def METHOD_NAME(group=""):
"""
Extract a mineral group from the database.
Parameters
-----------
group : :class:`str`
Group to extract from the mineral database.
Returns
---------
:class:`pandas.DataFrame`
Dataframe of group members and compositions.
"""
assert group in list_groups()
with TinyDB(str(__dbpath__), access_mode="r") as db:
grp = db.search(Query().group == group)
df = pd.DataFrame(grp)
meta, chem = (
["name", "formula"],
[i for i in df.columns if i not in ["name", "formula", "group"]],
)
df = df.reindex(columns=meta + chem)
df.loc[:, chem] = df.loc[:, chem].apply(pd.to_numeric)
df = df.loc[:, (df != 0).any(axis=0)] # remove zero-only columns
return df
def update_database(path=None, **kwargs):
"""
Update the mineral composition database.
Parameters
-----------
path : :class:`str` | :class:`pathlib.Path`
The desired filepath for the JSON database.
Notes
------
This will take the 'mins.csv' file from the mineral pyrolite data folder
and construct a document-based JSON database.
"""
mindf = pd.read_csv(pyrolite_datafolder(subfolder="mineral") / "mins.csv")
mindf = mindf.reindex(
columns=mindf.columns.tolist()
+ [str(a) for a in pt.formula(" ".join(list(mindf.formula.values))).atoms]
)
for ix in mindf.index: # add elemental compositions
el = parse_composition(pt.formula(mindf.loc[ix, "formula"]))
mindf.loc[ix, el.index] = el
mindf = mindf.fillna(0.0)
if path is None:
path = __dbpath__
path = Path(path).with_suffix(".json")
# name group formula composition
# needs write access
with TinyDB(str(path)) as db:
db.truncate()
for k, v in mindf.T.to_dict().items():
db.insert(v) |
5,778 | test str works when username is unavailable | from unittest.mock import patch, MagicMock
import pytest
from ..models import JustfixUser, create_random_phone_number
from .factories import UserFactory
from onboarding.tests.factories import OnboardingInfoFactory
from texting import twilio
def test_create_random_phone_number_works():
pn = create_random_phone_number()
assert pn.startswith("555")
assert len(pn) == 10
@pytest.mark.django_db
class TestGenerateRandomUsername:
def generate(self, prefix="", **kwargs):
user = JustfixUser.objects.create_user(
username=JustfixUser.objects.generate_random_username(prefix=prefix), **kwargs
)
return user
def test_it_applies_a_prefix_if_provided(self):
with patch("users.models.get_random_string", side_effect=["boop"]):
assert self.generate(prefix="bleh_").username == "bleh_boop"
def test_it_retries_until_a_unique_one_is_found(self):
with patch("users.models.get_random_string", side_effect=["boop", "boop", "blap"]):
user = self.generate(phone_number="1234567890")
assert user.username == "boop"
user2 = self.generate(phone_number="1234567891")
assert user2.username == "blap"
def test_formatted_phone_number_works():
assert JustfixUser().formatted_phone_number() == ""
user = JustfixUser(phone_number="5551234567")
assert user.formatted_phone_number() == "(555) 123-4567"
user = JustfixUser(phone_number="999999999999999999")
assert user.formatted_phone_number() == "999999999999999999"
@pytest.mark.django_db
def test_admin_url_works():
user = UserFactory()
assert user.admin_url == f"https://example.com/admin/users/justfixuser/{user.pk}/change/"
def test_str_works_when_username_is_available():
user = JustfixUser(username="boop")
assert str(user) == "boop"
def METHOD_NAME():
user = JustfixUser()
assert str(user) == "<unnamed user>"
def test_full_legal_name_only_renders_if_both_first_and_last_are_present():
user = JustfixUser(first_name="Bobby", last_name="Denver")
assert user.full_legal_name == "Bobby Denver"
assert JustfixUser(first_name="Bobby").full_legal_name == ""
assert JustfixUser(last_name="Denver").full_legal_name == ""
def test_full_preferred_name_uses_correct_first_name():
user = JustfixUser(first_name="Bobby", last_name="Denver", preferred_first_name="Martha")
assert user.full_legal_name == "Bobby Denver"
assert user.full_preferred_name == "Martha Denver"
assert JustfixUser(first_name="Bobby", last_name="Denver").full_preferred_name == "Bobby Denver"
@pytest.mark.parametrize(
"user_kwargs, expected",
[
({"first_name": "Roberta"}, "Roberta"),
(
{"first_name": "Roberta", "preferred_first_name": "Bobbie"},
"Bobbie",
),
],
)
def test_best_first_name(user_kwargs, expected):
assert JustfixUser(**user_kwargs).best_first_name == expected
def test_send_sms_does_nothing_if_user_has_no_onboarding_info(smsoutbox):
user = JustfixUser(phone_number="5551234500")
assert user.send_sms("hello there") == twilio.SendSmsResult(
err_code=twilio.TWILIO_USER_OPTED_OUT_ERR
)
user.send_sms_async("hello there")
user.chain_sms_async(["hello there"])
assert len(smsoutbox) == 0
@pytest.mark.django_db
def test_send_sms_does_nothing_if_user_does_not_allow_it(smsoutbox):
user = OnboardingInfoFactory(can_we_sms=False).user
assert user.send_sms("hello there") == twilio.SendSmsResult(
err_code=twilio.TWILIO_USER_OPTED_OUT_ERR
)
user.send_sms_async("hello there")
user.chain_sms_async(["hello there"])
assert len(smsoutbox) == 0
@pytest.mark.django_db
def test_send_sms_works_if_user_allows_it(smsoutbox):
def assert_sms_was_sent():
assert len(smsoutbox) == 1
assert smsoutbox[0].to == "+15551234500"
assert smsoutbox[0].body == "hello there"
smsoutbox[:] = []
user = OnboardingInfoFactory(can_we_sms=True, user__phone_number="5551234500").user
assert user.send_sms("hello there")
assert_sms_was_sent()
user.send_sms_async("hello there")
assert_sms_was_sent()
user.chain_sms_async(["hello there"])
assert_sms_was_sent()
class TestTriggerFollowupCampaign:
@pytest.fixture(autouse=True)
def setup_fixture(self, monkeypatch):
from rapidpro import followup_campaigns
self.trigger = MagicMock()
monkeypatch.setattr(followup_campaigns, "trigger_followup_campaign_async", self.trigger)
def test_it_does_nothing_if_user_prohibits_sms(self, db):
OnboardingInfoFactory(can_we_sms=False).user.trigger_followup_campaign_async("LOC")
self.trigger.assert_not_called()
def test_it_triggers_followup_campaign_if_user_allows_sms(self, db):
OnboardingInfoFactory(can_we_sms=True).user.trigger_followup_campaign_async("LOC")
self.trigger.assert_called_once_with("Bip Jones", "5551234567", "LOC", locale="en") |
5,779 | prepare | """Implementation of the JSON adaptation objects
This module exists to avoid a circular import problem: pyscopg2.extras depends
on psycopg2.extension, so I can't create the default JSON typecasters in
extensions importing register_json from extras.
"""
# psycopg/_json.py - Implementation of the JSON adaptation objects
#
# Copyright (C) 2012-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
# Copyright (C) 2020-2021 The Psycopg Team
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import json
from psycopg2._psycopg import ISQLQuote, QuotedString
from psycopg2._psycopg import new_type, new_array_type, register_type
# oids from PostgreSQL 9.2
JSON_OID = 114
JSONARRAY_OID = 199
# oids from PostgreSQL 9.4
JSONB_OID = 3802
JSONBARRAY_OID = 3807
class Json:
"""
An `~psycopg2.extensions.ISQLQuote` wrapper to adapt a Python object to
:sql:`json` data type.
`!Json` can be used to wrap any object supported by the provided *dumps*
function. If none is provided, the standard :py:func:`json.dumps()` is
used.
"""
def __init__(self, adapted, dumps=None):
self.adapted = adapted
self._conn = None
self._dumps = dumps or json.dumps
def __conform__(self, proto):
if proto is ISQLQuote:
return self
def dumps(self, obj):
"""Serialize *obj* in JSON format.
The default is to call `!json.dumps()` or the *dumps* function
provided in the constructor. You can override this method to create a
customized JSON wrapper.
"""
return self._dumps(obj)
def METHOD_NAME(self, conn):
self._conn = conn
def getquoted(self):
s = self.dumps(self.adapted)
qs = QuotedString(s)
if self._conn is not None:
qs.METHOD_NAME(self._conn)
return qs.getquoted()
def __str__(self):
# getquoted is binary
return self.getquoted().decode('ascii', 'replace')
def register_json(conn_or_curs=None, globally=False, loads=None,
oid=None, array_oid=None, name='json'):
"""Create and register typecasters converting :sql:`json` type to Python objects.
:param conn_or_curs: a connection or cursor used to find the :sql:`json`
and :sql:`json[]` oids; the typecasters are registered in a scope
limited to this object, unless *globally* is set to `!True`. It can be
`!None` if the oids are provided
:param globally: if `!False` register the typecasters only on
*conn_or_curs*, otherwise register them globally
:param loads: the function used to parse the data into a Python object. If
`!None` use `!json.loads()`, where `!json` is the module chosen
according to the Python version (see above)
:param oid: the OID of the :sql:`json` type if known; If not, it will be
queried on *conn_or_curs*
:param array_oid: the OID of the :sql:`json[]` array type if known;
if not, it will be queried on *conn_or_curs*
:param name: the name of the data type to look for in *conn_or_curs*
The connection or cursor passed to the function will be used to query the
database and look for the OID of the :sql:`json` type (or an alternative
type if *name* if provided). No query is performed if *oid* and *array_oid*
are provided. Raise `~psycopg2.ProgrammingError` if the type is not found.
"""
if oid is None:
oid, array_oid = _get_json_oids(conn_or_curs, name)
JSON, JSONARRAY = _create_json_typecasters(
oid, array_oid, loads=loads, name=name.upper())
register_type(JSON, not globally and conn_or_curs or None)
if JSONARRAY is not None:
register_type(JSONARRAY, not globally and conn_or_curs or None)
return JSON, JSONARRAY
def register_default_json(conn_or_curs=None, globally=False, loads=None):
"""
Create and register :sql:`json` typecasters for PostgreSQL 9.2 and following.
Since PostgreSQL 9.2 :sql:`json` is a builtin type, hence its oid is known
and fixed. This function allows specifying a customized *loads* function
for the default :sql:`json` type without querying the database.
All the parameters have the same meaning of `register_json()`.
"""
return register_json(conn_or_curs=conn_or_curs, globally=globally,
loads=loads, oid=JSON_OID, array_oid=JSONARRAY_OID)
def register_default_jsonb(conn_or_curs=None, globally=False, loads=None):
"""
Create and register :sql:`jsonb` typecasters for PostgreSQL 9.4 and following.
As in `register_default_json()`, the function allows to register a
customized *loads* function for the :sql:`jsonb` type at its known oid for
PostgreSQL 9.4 and following versions. All the parameters have the same
meaning of `register_json()`.
"""
return register_json(conn_or_curs=conn_or_curs, globally=globally,
loads=loads, oid=JSONB_OID, array_oid=JSONBARRAY_OID, name='jsonb')
def _create_json_typecasters(oid, array_oid, loads=None, name='JSON'):
"""Create typecasters for json data type."""
if loads is None:
loads = json.loads
def typecast_json(s, cur):
if s is None:
return None
return loads(s)
JSON = new_type((oid, ), name, typecast_json)
if array_oid is not None:
JSONARRAY = new_array_type((array_oid, ), f"{name}ARRAY", JSON)
else:
JSONARRAY = None
return JSON, JSONARRAY
def _get_json_oids(conn_or_curs, name='json'):
# lazy imports
from psycopg2.extensions import STATUS_IN_TRANSACTION
from psycopg2.extras import _solve_conn_curs
conn, curs = _solve_conn_curs(conn_or_curs)
# Store the transaction status of the connection to revert it after use
conn_status = conn.status
# column typarray not available before PG 8.3
typarray = conn.info.server_version >= 80300 and "typarray" or "NULL"
# get the oid for the hstore
curs.execute(
"SELECT t.oid, %s FROM pg_type t WHERE t.typname = %%s;"
% typarray, (name,))
r = curs.fetchone()
# revert the status of the connection as before the command
if conn_status != STATUS_IN_TRANSACTION and not conn.autocommit:
conn.rollback()
if not r:
raise conn.ProgrammingError(f"{name} data type not found")
return r |
5,780 | test linear smooth no ci | import numpy as np
import pandas as pd
import pytest
import statsmodels.api as sm
from plotnine import (
aes,
coord_trans,
geom_point,
geom_smooth,
ggplot,
stat_smooth,
)
from plotnine.exceptions import PlotnineWarning
random_state = np.random.RandomState(1234567890)
n = 100
# linear relationship
x = np.linspace(0, 1, n)
y = 4 * x + 5
y_noisy = y + 0.1 * random_state.randn(n)
linear_data = pd.DataFrame({"x": x, "y": y, "y_noisy": y_noisy})
# non-linear relationship
x = np.linspace(-2 * np.pi, 2 * np.pi, n)
y = np.sin(x)
y_noisy = y + 0.1 * random_state.randn(n)
non_linear_data = pd.DataFrame({"x": x, "y": y, "y_noisy": y_noisy})
# discrete_x
discrete_data_x = pd.DataFrame(
{"x": range(10), "y": [1, 2, 3, 4, 4, 5, 6, 7, 8, 9]}
)
# continuous_x
continuous_data_x = pd.DataFrame(
{"x": np.arange(1, 21) + 0.2, "y": range(1, 21)}
)
# linear relationship, values greater than zero
n = 10
x = np.arange(1, 1 + n)
y = x + 11
y_noisy = y + random_state.rand(n)
linear_data_gtz = pd.DataFrame({"x": x, "y": y, "y_noisy": y_noisy})
def test_linear_smooth():
p = (
ggplot(linear_data, aes("x"))
+ geom_point(aes(y="y_noisy"))
+ geom_smooth(aes(y="y_noisy"), method="lm", span=0.3, color="blue")
)
assert p == "linear_smooth"
def METHOD_NAME():
p = (
ggplot(linear_data, aes("x"))
+ geom_point(aes(y="y_noisy"))
+ geom_smooth(
aes(y="y_noisy"), method="lm", span=0.3, color="blue", se=False
)
)
assert p == "linear_smooth_no_ci"
def test_non_linear_smooth():
p = (
ggplot(linear_data, aes("x"))
+ geom_point(aes(y="y_noisy"))
+ geom_smooth(aes(y="y_noisy"), method="loess", span=0.3, color="blue")
)
assert p == "non_linear_smooth"
def test_non_linear_smooth_no_ci():
p = (
ggplot(linear_data, aes("x"))
+ geom_point(aes(y="y_noisy"))
+ geom_smooth(
aes(y="y_noisy"), method="loess", span=0.3, color="blue", se=False
)
)
assert p == "non_linear_smooth_no_ci"
def test_discrete_x():
p = (
ggplot(discrete_data_x, aes("x", "y"))
+ geom_point()
+ geom_smooth(color="blue")
)
assert p == "discrete_x"
def test_discrete_x_fullrange():
p = (
ggplot(discrete_data_x, aes("x", "y"))
+ geom_point()
+ geom_smooth(color="blue", fullrange=True)
)
assert p == "discrete_x_fullrange"
def test_continuous_x():
n = len(continuous_data_x)
p = (
ggplot(continuous_data_x, aes("x", "y"))
+ geom_point()
+ geom_smooth(
continuous_data_x[3 : n - 3],
method="loess",
color="blue",
fullrange=False,
)
)
assert p == "continuous_x"
def test_continuous_x_fullrange():
n = len(continuous_data_x)
p = (
ggplot(continuous_data_x, aes("x", "y"))
+ geom_point()
+ geom_smooth(
continuous_data_x[3 : n - 3],
method="loess",
color="blue",
fullrange=True,
method_args={"surface": "direct"},
)
)
assert p == "continuous_x_fullrange"
def test_coord_trans_se_false():
# scatter plot with LM fit using log-log coordinates
p = (
ggplot(linear_data_gtz, aes(x="x", y="y_noisy"))
+ geom_point()
+ coord_trans(x="log10", y="log10")
+ geom_smooth(method="lm", se=False)
)
assert p == "coord_trans_se_false"
class TestOther:
p = ggplot(linear_data, aes("x")) + geom_point(aes(y="y_noisy"))
def test_wls(self):
p = self.p + geom_smooth(aes(y="y_noisy"), method="wls")
p.draw_test()
def test_rlm(self):
p = self.p + geom_smooth(aes(y="y_noisy"), method="rlm")
with pytest.warns(PlotnineWarning):
p.draw_test()
def test_glm(self):
p = self.p + geom_smooth(aes(y="y_noisy"), method="glm")
p.draw_test()
def test_gls(self):
p = self.p + geom_smooth(aes(y="y_noisy"), method="gls")
p.draw_test()
def test_lowess(self):
p = self.p + geom_smooth(aes(y="y_noisy"), method="lowess")
with pytest.warns(PlotnineWarning):
p.draw_test()
def test_mavg(self):
p = self.p + geom_smooth(
aes(y="y_noisy"), method="mavg", method_args={"window": 10}
)
p.draw_test()
def test_gpr(self):
try:
from sklearn import gaussian_process # noqa: F401
except ImportError:
return
p = self.p + geom_smooth(aes(y="y_noisy"), method="gpr")
with pytest.warns(UserWarning):
p.draw_test()
def test_sorts_by_x():
data = pd.DataFrame({"x": [5, 0, 1, 2, 3, 4], "y": range(6)})
p = ggplot(data, aes("x", "y")) + geom_smooth(stat="identity")
assert p == "sorts_by_x"
def test_legend_fill_ratio():
p = (
ggplot(linear_data, aes("x", color="x<0.5"))
+ geom_point(aes(y="y_noisy"))
+ geom_smooth(aes(y="y_noisy"), method="lm", size=0.5, span=0.3)
)
assert p == "legend_fill_ratio"
def test_init_and_fit_kwargs():
data = pd.DataFrame(
{
"x": np.arange(11),
"y": [0, 0, 0, 0.05, 0.25, 0.5, 0.75, 0.95, 1, 1, 1],
}
)
p = (
ggplot(data, aes("x", "y"))
+ geom_point()
+ geom_smooth(
method="glm",
method_args={
"family": sm.families.Binomial(), # init parameter
"method": "minimize", # fit parameter
},
se=False,
)
)
assert p == "init_and_fit_kwargs"
n = 100
random_state = np.random.RandomState(123)
mu = 0
sigma = 0.065
noise = random_state.randn(n) * sigma + mu
x = np.linspace(-2 * np.pi, 2 * np.pi, n)
data = pd.DataFrame(
{
"x": x,
"y": np.sin(x) + noise,
}
)
class TestFormula:
p = ggplot(data, aes("x", "y")) + geom_point()
def test_lm(self):
p = self.p + stat_smooth(
method="lm", formula="y ~ np.sin(x)", fill="red", se=True
)
assert p == "lm_formula"
def test_lm_weights(self):
p = (
self.p
+ aes(weight="x.abs()")
+ stat_smooth(
method="lm", formula="y ~ np.sin(x)", fill="red", se=True
)
)
assert p == "lm_formula_weights"
def test_glm(self):
p = self.p + stat_smooth(
method="glm", formula="y ~ np.sin(x)", fill="red", se=True
)
assert p == "glm_formula"
def test_rlm(self):
p = self.p + stat_smooth(
method="rlm", formula="y ~ np.sin(x)", fill="red", se=False
)
assert p == "rlm_formula"
def test_gls(self):
p = self.p + stat_smooth(
method="gls", formula="y ~ np.sin(x)", fill="red", se=True
)
assert p == "gls_formula" |
5,781 | config sqlite 4326 | # =================================================================
#
# Authors: Just van den Broecke <justb4@gmail.com>
# Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2019 Just van den Broecke
# Copyright (c) 2022 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
# Needs to be run like: python3 -m pytest
import logging
import pytest
from pygeoapi.provider.base import ProviderItemNotFoundError
from pygeoapi.provider.ogr import OGRProvider
LOGGER = logging.getLogger(__name__)
# Testing with SQLite files with identical features
# (all 2481 addresses in Otterlo Netherlands).
@pytest.fixture()
def METHOD_NAME():
return {
'name': 'OGR',
'type': 'feature',
'data': {
'source_type': 'SQLite',
'source':
'./tests/data/dutch_addresses_4326.sqlite',
# 'source_srs': 'EPSG:4326',
# 'target_srs': 'EPSG:4326',
'source_capabilities': {
'paging': True
},
},
'id_field': 'id',
'layer': 'ogrgeojson'
}
def test_get_fields_4326(METHOD_NAME):
"""Testing field types"""
p = OGRProvider(METHOD_NAME)
results = p.get_fields()
assert results['straatnaam']['type'] == 'string'
assert results['huisnummer']['type'] == 'string'
def test_get_4326(METHOD_NAME):
"""Testing query for a specific object"""
p = OGRProvider(METHOD_NAME)
result = p.get('inspireadressen.1747652')
assert result['id'] == 'inspireadressen.1747652'
assert 'Mosselsepad' in result['properties']['straatnaam']
def test_get_not_existing_feature_raise_exception(
METHOD_NAME
):
"""Testing query for a not existing object"""
p = OGRProvider(METHOD_NAME)
with pytest.raises(ProviderItemNotFoundError):
p.get(-1)
def test_query_hits_4326(METHOD_NAME):
"""Testing query on entire collection for hits"""
p = OGRProvider(METHOD_NAME)
feature_collection = p.query(resulttype='hits')
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 0
hits = feature_collection.get('numberMatched')
assert hits is not None
assert hits == 2481
def test_query_bbox_hits_4326(METHOD_NAME):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(METHOD_NAME)
# feature_collection = p.query(
# bbox=[120000, 480000, 124000, 487000], resulttype='hits')
feature_collection = p.query(
bbox=[5.763409, 52.060197, 5.769256, 52.061976], resulttype='hits')
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 0
hits = feature_collection.get('numberMatched')
assert hits is not None
assert hits == 1
def test_query_bbox_4326(METHOD_NAME):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(METHOD_NAME)
# feature_collection = p.query(
# bbox=[180800, 452500, 181200, 452700], resulttype='results')
feature_collection = p.query(
bbox=(5.763409, 52.060197, 5.769256, 52.061976), resulttype='results')
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 1
hits = feature_collection.get('numberMatched')
assert hits is None
feature = features[0]
properties = feature.get('properties')
assert properties is not None
geometry = feature.get('geometry')
assert geometry is not None
assert properties['straatnaam'] == 'Planken Wambuisweg'
def test_query_with_limit_4326(METHOD_NAME):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(METHOD_NAME)
feature_collection = p.query(limit=5, resulttype='results')
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 5
hits = feature_collection.get('numberMatched')
assert hits is None
feature = features[0]
properties = feature.get('properties')
assert properties is not None
geometry = feature.get('geometry')
assert geometry is not None
def test_query_with_offset_4326(METHOD_NAME):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(METHOD_NAME)
feature_collection = p.query(offset=20, limit=5, resulttype='results')
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 5
hits = feature_collection.get('numberMatched')
assert hits is None
feature = features[0]
properties = feature.get('properties')
assert properties is not None
assert feature['id'] == 'inspireadressen.1744969'
assert 'Egypte' in properties['straatnaam']
geometry = feature.get('geometry')
assert geometry is not None
def test_query_bbox_with_offset_4326(METHOD_NAME):
"""Testing query for a valid JSON object with geometry"""
p = OGRProvider(METHOD_NAME)
feature_collection = p.query(
offset=1, limit=50,
bbox=(5.742, 52.053, 5.773, 52.098),
resulttype='results')
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) == 3
hits = feature_collection.get('numberMatched')
assert hits is None
feature = features[0]
properties = feature.get('properties')
assert properties is not None
geometry = feature.get('geometry')
assert geometry is not None
assert properties['straatnaam'] == 'Egypte'
assert properties['huisnummer'] == '4'
def test_query_with_property_filtering(METHOD_NAME):
"""Testing query with property filtering"""
p = OGRProvider(METHOD_NAME)
feature_collection = p.query(
properties=[
('straatnaam', 'Arnhemseweg')
]
)
assert feature_collection.get('type') == 'FeatureCollection'
features = feature_collection.get('features')
assert len(features) > 1
for feature in features:
assert 'properties' in feature
assert 'straatnaam' in feature['properties']
assert feature['properties']['straatnaam'] == 'Arnhemseweg' |
5,782 | num int links | # Copyright (c) 2012 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from topologies.BaseTopology import BaseTopology
class Cluster(BaseTopology):
"""A cluster is a group of nodes which are all one hop from eachother
Clusters can also contain other clusters
When creating this kind of topology, return a single cluster (usually
the root cluster) from create_system in configs/ruby/<protocol>.py
"""
_num_int_links = 0
_num_ext_links = 0
_num_routers = 0
# Below methods for auto counting
@classmethod
def METHOD_NAME(cls):
cls._num_int_links += 1
return cls._num_int_links - 1
@classmethod
def num_ext_links(cls):
cls._num_ext_links += 1
return cls._num_ext_links - 1
@classmethod
def num_routers(cls):
cls._num_routers += 1
return cls._num_routers - 1
def __init__(self, intBW=0, extBW=0, intLatency=0, extLatency=0):
"""internalBandwidth is bandwidth of all links within the cluster
externalBandwidth is bandwidth from this cluster to any cluster
connecting to it.
internal/externalLatency are similar
**** When creating a cluster with sub-clusters, the sub-cluster
external bandwidth overrides the internal bandwidth of the
super cluster
"""
self.nodes = []
self.router = None # created in makeTopology
self.intBW = intBW
self.extBW = extBW
self.intLatency = intLatency
self.extLatency = extLatency
def add(self, node):
self.nodes.append(node)
def makeTopology(self, options, network, IntLink, ExtLink, Router):
"""Recursively make all of the links and routers"""
# make a router to connect all of the nodes
self.router = Router(router_id=self.num_routers())
network.routers.append(self.router)
for node in self.nodes:
if type(node) == Cluster:
node.makeTopology(options, network, IntLink, ExtLink, Router)
# connect this cluster to the router
link_out = IntLink(
link_id=self.METHOD_NAME(),
src_node=self.router,
dst_node=node.router,
)
link_in = IntLink(
link_id=self.METHOD_NAME(),
src_node=node.router,
dst_node=self.router,
)
if node.extBW:
link_out.bandwidth_factor = node.extBW
link_in.bandwidth_factor = node.extBW
# if there is an internal b/w for this node
# and no ext b/w to override
elif self.intBW:
link_out.bandwidth_factor = self.intBW
link_in.bandwidth_factor = self.intBW
if node.extLatency:
link_out.latency = node.extLatency
link_in.latency = node.extLatency
elif self.intLatency:
link_out.latency = self.intLatency
link_in.latency = self.intLatency
network.int_links.append(link_out)
network.int_links.append(link_in)
else:
# node is just a controller,
# connect it to the router via a ext_link
link = ExtLink(
link_id=self.num_ext_links(),
ext_node=node,
int_node=self.router,
)
if self.intBW:
link.bandwidth_factor = self.intBW
if self.intLatency:
link.latency = self.intLatency
network.ext_links.append(link)
def __len__(self):
return len([i for i in self.nodes if type(i) != Cluster]) + sum(
[len(i) for i in self.nodes if type(i) == Cluster]
) |
5,783 | add | import queue
import threading
import time
from concurrent.futures import Executor
from typing import Any, Callable, List, Mapping, Optional, Tuple, Union
class ScheduledTask:
"""
Internal representation of a task (a callable) and its scheduling parameters.
"""
def __init__(
self,
task: Callable,
period: Optional[float] = None,
fixed_rate: bool = True,
start: Optional[float] = None,
on_error: Callable[[Exception], None] = None,
args: Optional[Union[tuple, list]] = None,
kwargs: Optional[Mapping[str, Any]] = None,
) -> None:
super().__init__()
self.task = task
self.fixed_rate = fixed_rate
self.period = period
self.start = start
self.on_error = on_error
self.args = args or tuple()
self.kwargs = kwargs or dict()
self.deadline = None
self.error = None
self._cancelled = False
@property
def is_periodic(self) -> bool:
return self.period is not None
@property
def is_cancelled(self) -> bool:
return self._cancelled
def set_next_deadline(self):
"""
Internal method to update the next deadline of this task based on the period and the current time.
"""
if not self.deadline:
raise ValueError("Deadline was not initialized")
if self.fixed_rate:
self.deadline = self.deadline + self.period
else:
self.deadline = time.time() + self.period
def cancel(self):
self._cancelled = True
def run(self):
"""
Executes the task function. If the function raises and Exception, ``on_error`` is called (if set).
"""
try:
self.task(*self.args, **self.kwargs)
except Exception as e:
if self.on_error:
self.on_error(e)
class Scheduler:
"""
An event-loop based task scheduler that can manage multiple scheduled tasks with different periods,
can be parallelized with an executor.
"""
POISON = (-1, "__POISON__")
def __init__(self, executor: Optional[Executor] = None) -> None:
"""
Creates a new Scheduler. If an executor is passed, then that executor will be used to run the scheduled tasks
asynchronously, otherwise they will be executed synchronously inside the event loop. Running tasks
asynchronously in an executor means that they will be effectively executed at a fixed rate (scheduling with
``fixed_rate = False``, will have no effect).
:param executor: an optional executor that tasks will be submitted to.
"""
super().__init__()
self.executor = executor
self._queue = queue.PriorityQueue()
self._condition = threading.Condition()
def schedule(
self,
func: Callable,
period: Optional[float] = None,
fixed_rate: bool = True,
start: Optional[float] = None,
on_error: Callable[[Exception], None] = None,
args: Optional[Union[Tuple, List[Any]]] = None,
kwargs: Optional[Mapping[str, Any]] = None,
) -> ScheduledTask:
"""
Schedules a given task (function call).
:param func: the task to schedule
:param period: the period in which to run the task (in seconds). if not set, task will run once
:param fixed_rate: whether the to run at a fixed rate (neglecting execution duration of the task)
:param start: start time
:param on_error: error callback
:param args: additional positional arguments to pass to the function
:param kwargs: additional keyword arguments to pass to the function
:return: a ScheduledTask instance
"""
st = ScheduledTask(
func,
period=period,
fixed_rate=fixed_rate,
start=start,
on_error=on_error,
args=args,
kwargs=kwargs,
)
self.schedule_task(st)
return st
def schedule_task(self, task: ScheduledTask) -> None:
"""
Schedules the given task and sets the deadline of the task to either ``task.start`` or the current time.
:param task: the task to schedule
"""
task.deadline = max(task.start or 0, time.time())
self.METHOD_NAME(task)
def METHOD_NAME(self, task: ScheduledTask) -> None:
"""
Schedules the given task. Requires that the task has a deadline set. It's better to use ``schedule_task``.
:param task: the task to schedule.
"""
if task.deadline is None:
raise ValueError
task._cancelled = False
with self._condition:
self._queue.put((task.deadline, task))
self._condition.notify()
def close(self) -> None:
"""
Terminates the run loop.
"""
with self._condition:
self._queue.put(self.POISON)
self._condition.notify()
def run(self):
q = self._queue
cond = self._condition
executor = self.executor
poison = self.POISON
task: ScheduledTask
while True:
deadline, task = q.get()
if (deadline, task) == poison:
break
if task.is_cancelled:
continue
# wait until the task should be executed
wait = max(0, deadline - time.time())
if wait > 0:
with cond:
interrupted = cond.wait(timeout=wait)
if interrupted:
# something with a potentially earlier deadline has arrived while waiting, so we re-queue and
# continue. this could be optimized by checking the deadline of the added element(s) first,
# but that would be fairly involved. the assumption is that `schedule` is not invoked frequently
q.put((task.deadline, task))
continue
# run or submit the task
if not task.is_cancelled:
if executor:
executor.submit(task.run)
else:
task.run()
if task.is_periodic:
try:
task.set_next_deadline()
except ValueError:
# task deadline couldn't be set because it was cancelled
continue
q.put((task.deadline, task)) |
5,784 | mock value original type | # Copyright (C) 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import itertools
from collections import namedtuple
from typing import (Any, Dict, Iterable, Optional)
from google.protobuf import descriptor_pb2
from gapic.schema import metadata
from gapic.schema import wrappers
# Injected dummy test types
@dataclasses.dataclass(frozen=True)
class DummyMethod:
name: bool = False
input: bool = False
output: bool = False
lro: bool = False
void: bool = False
paged_result_field: bool = False
client_streaming: bool = False
server_streaming: bool = False
flattened_fields: Dict[str, Any] = dataclasses.field(default_factory=dict)
client_output: bool = False
client_output_async: bool = False
DummyIdent = namedtuple("DummyIdent", ["name", "sphinx"])
DummyIdent.__new__.__defaults__ = (False,) * len(DummyIdent._fields)
DummyMessageTypePB = namedtuple("DummyMessageTypePB", ["name"])
# DummyMessageBase = namedtuple(
# "DummyMessage", ["fields", "type", "options", "ident",])
# DummyMessageBase.__new__.__defaults__ = (False,) * len(DummyMessageBase._fields)
DummyFieldBase = namedtuple("DummyField",
["message",
"enum",
"name",
"repeated",
"required",
"resource_reference",
"oneof",
"field_pb",
"meta",
"is_primitive",
"ident",
"type"])
DummyFieldBase.__new__.__defaults__ = (False,) * len(DummyFieldBase._fields)
class DummyField(DummyFieldBase):
@property
def METHOD_NAME(self):
return "mock_value"
class DummyMessage:
def __init__(self, *, fields={}, type="", options=False, ident=False, resource_path=False, meta=None):
self.fields = fields
self.type = type
self.options = options
self.ident = ident
self.resource_path = resource_path
self.meta = meta or metadata.Metadata()
def get_field(self, field_name: str):
return self.fields[field_name]
def oneof_fields(self):
return dict((field.oneof, field) for field in self.fields.values() if field.oneof)
@property
def required_fields(self):
return [field for field in self.fields.values() if field.required]
@property
def resource_path_args(self):
return wrappers.MessageType.PATH_ARG_RE.findall(self.resource_path or '')
DummyService = namedtuple("DummyService", [
"name", "methods", "client_name", "async_client_name", "resource_messages_dict"])
DummyService.__new__.__defaults__ = (False,) * len(DummyService._fields)
DummyApiSchema = namedtuple("DummyApiSchema",
["services", "naming", "messages"])
DummyApiSchema.__new__.__defaults__ = (False,) * len(DummyApiSchema._fields)
DummyNaming = namedtuple(
"DummyNaming", ["warehouse_package_name", "name", "version", "versioned_module_name", "module_namespace", "proto_package"])
DummyNaming.__new__.__defaults__ = (False,) * len(DummyNaming._fields)
def message_factory(exp: str,
repeated_iter=itertools.repeat(False),
enum: Optional[wrappers.EnumType] = None,
) -> DummyMessage:
# This mimics the structure of MessageType in the wrappers module:
# A MessageType has a map from field names to Fields,
# and a Field has an (optional) MessageType.
# The 'exp' parameter is a dotted attribute expression
# used to describe the field and type hierarchy,
# e.g. "mollusc.cephalopod.coleoid"
toks = exp.split(".")
messages = [DummyMessage(fields={}, type=tok.upper() + "_TYPE")
for tok in toks]
if enum:
messages[-1] = enum
for base, field, attr_name, repeated_field in zip(
messages, messages[1:], toks[1:], repeated_iter
):
base.fields[attr_name] = (DummyField(message=field, repeated=repeated_field)
if isinstance(field, DummyMessage)
else DummyField(enum=field))
return messages[0]
def enum_factory(name: str, variants: Iterable[str]) -> wrappers.EnumType:
enum_pb = descriptor_pb2.EnumDescriptorProto(
name=name,
value=tuple(
descriptor_pb2.EnumValueDescriptorProto(name=v, number=i)
for i, v in enumerate(variants)
)
)
enum = wrappers.EnumType(
enum_pb=enum_pb,
values=[wrappers.EnumValueType(enum_value_pb=v) for v in enum_pb.value]
)
return enum |
5,785 | set params | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support (deprecated)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import six
from tensorflow.python.util.compat import collections_abc
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = {}
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections_abc.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def METHOD_NAME(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.METHOD_NAME(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
USE OF THIS EXCEPTION IS DEPRECATED.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.model_selection import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split |
5,786 | get name | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from buildbot import util
from buildbot.util import lineboundaries
class FakeLogFile:
def __init__(self, name):
self.name = name
self.header = ''
self.stdout = ''
self.stderr = ''
self.lbfs = {}
self.finished = False
self._finish_waiters = []
self._had_errors = False
self.subPoint = util.subscription.SubscriptionPoint(f"{repr(name)} log")
def METHOD_NAME(self):
return self.name
def subscribe(self, callback):
return self.subPoint.subscribe(callback)
def _getLbf(self, stream):
try:
return self.lbfs[stream]
except KeyError:
lbf = self.lbfs[stream] = lineboundaries.LineBoundaryFinder()
return lbf
def _on_whole_lines(self, stream, lines):
self.subPoint.deliver(stream, lines)
assert not self.finished
def _split_lines(self, stream, text):
lbf = self._getLbf(stream)
lines = lbf.append(text)
if lines is None:
return
self._on_whole_lines(stream, lines)
def addHeader(self, text):
if not isinstance(text, str):
text = text.decode('utf-8')
self.header += text
self._split_lines('h', text)
return defer.succeed(None)
def addStdout(self, text):
if not isinstance(text, str):
text = text.decode('utf-8')
self.stdout += text
self._split_lines('o', text)
return defer.succeed(None)
def addStderr(self, text):
if not isinstance(text, str):
text = text.decode('utf-8')
self.stderr += text
self._split_lines('e', text)
return defer.succeed(None)
def add_header_lines(self, text):
if not isinstance(text, str):
text = text.decode('utf-8')
self.header += text
self._on_whole_lines('h', text)
return defer.succeed(None)
def add_stdout_lines(self, text):
if not isinstance(text, str):
text = text.decode('utf-8')
self.stdout += text
self._on_whole_lines('o', text)
return defer.succeed(None)
def add_stderr_lines(self, text):
if not isinstance(text, str):
text = text.decode('utf-8')
self.stderr += text
self._on_whole_lines('e', text)
return defer.succeed(None)
def isFinished(self):
return self.finished
def waitUntilFinished(self):
d = defer.Deferred()
if self.finished:
d.succeed(None)
else:
self._finish_waiters.append(d)
return d
def flushFakeLogfile(self):
for stream, lbf in self.lbfs.items():
lines = lbf.flush()
if lines is not None:
self.subPoint.deliver(stream, lines)
def had_errors(self):
return self._had_errors
@defer.inlineCallbacks
def finish(self):
assert not self.finished
self.flushFakeLogfile()
self.finished = True
# notify subscribers *after* finishing the log
self.subPoint.deliver(None, None)
yield self.subPoint.waitForDeliveriesToFinish()
self._had_errors = len(self.subPoint.pop_exceptions()) > 0
# notify those waiting for finish
for d in self._finish_waiters:
d.callback(None)
def fakeData(self, header='', stdout='', stderr=''):
if header:
self.header += header
if stdout:
self.stdout += stdout
if stderr:
self.stderr += stderr |
5,787 | test writeome baddim | import pytest
import zarr
import numpy as np
from brainlit.utils.write import zarr_to_omezarr, czi_to_zarr, write_trace_layer
import os
import shutil
import zipfile
from pathlib import Path
from cloudvolume import CloudVolume
@pytest.fixture(scope="session")
def init_4dczi(tmp_path_factory):
data_dir = tmp_path_factory.mktemp("data")
czi_path = Path(__file__).parents[0] / "data" / "mosaic_test.czi"
return czi_path, data_dir
@pytest.fixture(scope="session")
def init_3dzarr(tmp_path_factory):
data_dir = tmp_path_factory.mktemp("data")
zarr_path = data_dir / "fg.zarr"
z = zarr.open(
zarr_path, mode="w", shape=(64, 64, 64), dtype="uint16", chunks=(32, 32, 32)
)
z[:, :, :] = np.zeros((64, 64, 64))
return zarr_path, data_dir
@pytest.fixture(scope="session")
def init_4dzarr(tmp_path_factory):
data_dir = tmp_path_factory.mktemp("data")
zarr_path = data_dir / "fg.zarr"
z = zarr.open(
zarr_path,
mode="w",
shape=(1, 64, 64, 64),
dtype="uint16",
chunks=(1, 32, 32, 32),
)
z[:, :, :, :] = np.zeros((1, 64, 64, 64))
return zarr_path, data_dir
@pytest.fixture(scope="function")
def init_omezarr(init_3dzarr):
res = [1, 1, 2] # in nm
zarr_path, data_dir = init_3dzarr
out_path = data_dir / "fg_ome.zarr"
if not os.path.exists(out_path):
zarr_to_omezarr(zarr_path=zarr_path, out_path=out_path, res=res)
else:
print("Relying on existing fg_ome zarr file")
return data_dir, res
##############
### inputs ###
##############
def METHOD_NAME(init_3dzarr, init_4dzarr):
# error for 4d zarrs
zarr_path, data_dir = init_4dzarr
out_path = data_dir / "fg_ome.zarr"
with pytest.raises(ValueError, match=r"Conversion only supported for 3D arrays"):
zarr_to_omezarr(zarr_path=zarr_path, out_path=out_path, res=[1, 1, 1])
# error if ome already exists
zarr_path, data_dir = init_3dzarr
out_path = data_dir / "fg_ome.zarr"
zarr_to_omezarr(zarr_path=zarr_path, out_path=out_path, res=[1, 1, 1])
with pytest.raises(
ValueError,
match=f"{out_path} already exists, please delete the existing file or change the name of the ome-zarr to be created.",
):
zarr_to_omezarr(zarr_path=zarr_path, out_path=out_path, res=[1, 1, 1])
shutil.rmtree(out_path)
def test_writezarr_badpar(init_4dczi):
czi_path, data_dir = init_4dczi
with pytest.raises(ValueError, match="parallel must be positive integer, not 1"):
czi_to_zarr(
czi_path=czi_path, out_dir=str(data_dir), fg_channel=0, parallel="1"
)
##################
### validation ###
##################
def test_writezarr(init_4dczi):
czi_path, data_dir = init_4dczi
zarr_paths = czi_to_zarr(
czi_path=czi_path, out_dir=str(data_dir), fg_channel=0, parallel=1
)
assert len(zarr_paths) == 1
z = zarr.open(zarr_paths[0])
assert z.shape == (1, 624, 1756)
assert z[0, 10, 10] == 411
def test_writezarr_parallel(init_4dczi):
czi_path, data_dir = init_4dczi
zarr_paths = czi_to_zarr(
czi_path=czi_path, out_dir=str(data_dir), fg_channel=0, parallel=2
)
assert len(zarr_paths) == 1
z = zarr.open(zarr_paths[0])
assert z.shape == (1, 624, 1756)
assert z[0, 10, 10] == 411
def test_writeome(init_3dzarr):
res = [1, 1, 2] # in nm
dimension_map = {"x": 0, "y": 1, "z": 2}
zarr_path, data_dir = init_3dzarr
out_path = data_dir / "fg_ome.zarr"
assert not os.path.exists(out_path)
zarr_to_omezarr(zarr_path=zarr_path, out_path=out_path, res=res)
assert os.path.exists(out_path)
# check units are micrometers
ome_zarr = zarr.open(out_path)
metadata = ome_zarr.attrs["multiscales"][0]
dimension_names = []
for dimension in metadata["axes"]:
assert dimension["unit"] == "micrometer"
assert dimension["type"] == "space"
dimension_names.append(dimension["name"])
# check resolutions are multiples of 2 scaled in xy
for resolution in metadata["datasets"]:
lvl = int(resolution["path"])
true_res = np.multiply(res, [2**lvl, 2**lvl, 1]) / 1000 # in microns
true_res = [
true_res[dimension_map[dimension_name]]
for dimension_name in dimension_names
]
np.testing.assert_almost_equal(
true_res, resolution["coordinateTransformations"][0]["scale"], decimal=3
)
def test_write_trace_layer(init_omezarr):
data_dir, res = init_omezarr
write_trace_layer(parent_dir=data_dir, res=res)
vol_path = "precomputed://file://" + str(data_dir / "traces")
vol = CloudVolume(vol_path)
assert vol.info["skeletons"] == "skeletons" |
5,788 | test validate filepath executable | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=redefined-outer-name
"""Tests for the :class:`aiida.orm.nodes.data.code.installed.InstalledCode` class."""
import pathlib
import pytest
from aiida.common.exceptions import ModificationNotAllowed, ValidationError
from aiida.orm import Computer
from aiida.orm.nodes.data.code.installed import InstalledCode
def test_constructor_raises(aiida_localhost):
"""Test the constructor when it is supposed to raise."""
with pytest.raises(TypeError, match=r'missing .* required positional arguments'):
InstalledCode() # pylint: disable=no-value-for-parameter
with pytest.raises(TypeError, match=r'Got object of type .*'):
InstalledCode(computer=aiida_localhost, filepath_executable=pathlib.Path('/usr/bin/bash'))
with pytest.raises(TypeError, match=r'Got object of type .*'):
InstalledCode(computer='computer', filepath_executable='/usr/bin/bash')
def test_constructor(aiida_localhost):
"""Test the constructor."""
filepath_executable = '/usr/bin/bash'
code = InstalledCode(computer=aiida_localhost, filepath_executable=filepath_executable)
assert code.computer.pk == aiida_localhost.pk
assert code.filepath_executable == pathlib.PurePath(filepath_executable)
def test_validate(aiida_localhost):
"""Test the validator is called before storing."""
filepath_executable = '/usr/bin/bash'
code = InstalledCode(computer=aiida_localhost, filepath_executable=filepath_executable)
code.computer = aiida_localhost
code.base.attributes.set(code._KEY_ATTRIBUTE_FILEPATH_EXECUTABLE, None) # pylint: disable=protected-access
with pytest.raises(ValidationError, match='The `filepath_executable` is not set.'):
code.store()
code.filepath_executable = filepath_executable
code.store()
assert code.is_stored
def test_can_run_on_computer(aiida_localhost):
"""Test the :meth:`aiida.orm.nodes.data.code.installed.InstalledCode.can_run_on_computer` method."""
code = InstalledCode(computer=aiida_localhost, filepath_executable='/usr/bin/bash')
computer = Computer()
assert code.can_run_on_computer(aiida_localhost)
assert not code.can_run_on_computer(computer)
def test_filepath_executable(aiida_localhost):
"""Test the :meth:`aiida.orm.nodes.data.code.installed.InstalledCode.filepath_executable` property."""
filepath_executable = '/usr/bin/bash'
code = InstalledCode(computer=aiida_localhost, filepath_executable=filepath_executable)
assert code.filepath_executable == pathlib.PurePath(filepath_executable)
# Relative path
filepath_executable = 'bash'
code = InstalledCode(computer=aiida_localhost, filepath_executable=filepath_executable)
assert code.filepath_executable == pathlib.PurePath(filepath_executable)
# Change through the property
filepath_executable = '/usr/bin/cat'
code.filepath_executable = filepath_executable
assert code.filepath_executable == pathlib.PurePath(filepath_executable)
with pytest.raises(TypeError, match=r'Got object of type .*'):
code.filepath_executable = pathlib.Path(filepath_executable)
code.store()
with pytest.raises(ModificationNotAllowed):
code.filepath_executable = filepath_executable
@pytest.fixture
def computer(request, aiida_computer_local, aiida_computer_ssh):
"""Return a computer configured for ``core.local`` and ``core.ssh`` transport."""
if request.param == 'core.local':
return aiida_computer_local(configure=False)
if request.param == 'core.ssh':
return aiida_computer_ssh(configure=False)
raise ValueError(f'unsupported request parameter: {request.param}')
@pytest.mark.parametrize('computer', ('core.local', 'core.ssh'), indirect=True)
def METHOD_NAME(ssh_key, computer):
"""Test the :meth:`aiida.orm.nodes.data.code.installed.InstalledCode.validate_filepath_executable` method."""
filepath_executable = '/usr/bin/not-existing'
code = InstalledCode(computer=computer, filepath_executable=filepath_executable)
with pytest.raises(ValidationError, match=r'Could not connect to the configured computer.*'):
code.validate_filepath_executable()
if computer.transport_type == 'core.ssh':
computer.configure(key_filename=str(ssh_key), key_policy='AutoAddPolicy')
else:
computer.configure()
with pytest.raises(ValidationError, match=r'The provided remote absolute path .* does not exist on the computer\.'):
code.validate_filepath_executable()
code.filepath_executable = '/usr/bin/bash'
code.validate_filepath_executable()
def test_full_label(aiida_localhost):
"""Test the :meth:`aiida.orm.nodes.data.code.installed.InstalledCode.full_label` property."""
label = 'some-label'
code = InstalledCode(label=label, computer=aiida_localhost, filepath_executable='/usr/bin/bash')
assert code.full_label == f'{label}@{aiida_localhost.label}'
def test_get_execname(aiida_localhost):
"""Test the deprecated :meth:`aiida.orm.nodes.data.code.installed.InstalledCode.get_execname` method."""
code = InstalledCode(label='some-label', computer=aiida_localhost, filepath_executable='/usr/bin/bash')
assert code.get_execname() == '/usr/bin/bash' |
5,789 | get config iam policy output | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetConfigIamPolicyResult',
'AwaitableGetConfigIamPolicyResult',
'get_config_iam_policy',
'get_config_iam_policy_output',
]
@pulumi.output_type
class GetConfigIamPolicyResult:
"""
A collection of values returned by getConfigIamPolicy.
"""
def __init__(__self__, config=None, etag=None, id=None, policy_data=None, project=None):
if config and not isinstance(config, str):
raise TypeError("Expected argument 'config' to be a str")
pulumi.set(__self__, "config", config)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if policy_data and not isinstance(policy_data, str):
raise TypeError("Expected argument 'policy_data' to be a str")
pulumi.set(__self__, "policy_data", policy_data)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
@property
@pulumi.getter
def config(self) -> str:
return pulumi.get(self, "config")
@property
@pulumi.getter
def etag(self) -> str:
"""
(Computed) The etag of the IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> str:
"""
(Required only by `runtimeconfig.ConfigIamPolicy`) The policy data generated by
a `organizations_get_iam_policy` data source.
"""
return pulumi.get(self, "policy_data")
@property
@pulumi.getter
def project(self) -> str:
return pulumi.get(self, "project")
class AwaitableGetConfigIamPolicyResult(GetConfigIamPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConfigIamPolicyResult(
config=self.config,
etag=self.etag,
id=self.id,
policy_data=self.policy_data,
project=self.project)
def get_config_iam_policy(config: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConfigIamPolicyResult:
"""
Use this data source to access information about an existing resource.
:param str config: Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
__args__ = dict()
__args__['config'] = config
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('gcp:runtimeconfig/getConfigIamPolicy:getConfigIamPolicy', __args__, opts=opts, typ=GetConfigIamPolicyResult).value
return AwaitableGetConfigIamPolicyResult(
config=pulumi.get(__ret__, 'config'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
policy_data=pulumi.get(__ret__, 'policy_data'),
project=pulumi.get(__ret__, 'project'))
@_utilities.lift_output_func(get_config_iam_policy)
def METHOD_NAME(config: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConfigIamPolicyResult]:
"""
Use this data source to access information about an existing resource.
:param str config: Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
... |
5,790 | test inject accept | import json
from flexget.api.app import base_message
from flexget.api.core.tasks import ObjectsContainer as OC
class TestExecuteAPI:
@staticmethod
def get_task_queue(manager):
"""Used to execute task queue"""
assert len(manager.task_queue) == 1
task = manager.task_queue.run_queue.get(timeout=0.5)
assert task
return task
config = """
tasks:
test_task:
mock:
- title: accept_me
- title: reject_me
regexp:
accept:
- accept
reject:
- reject
"""
def test_execute(self, api_client, manager, schema_match):
# Minimal payload
payload = {'tasks': ['test_task']}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.task_execution_results_schema, data)
assert not errors
task = self.get_task_queue(manager)
task.execute()
assert len(task.accepted) == 1
def test_inject_plain(self, api_client, manager, schema_match):
entry = {'title': "injected", 'url': 'http://test.com'}
payload = {"inject": [entry], 'tasks': ['test_task']}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.task_execution_results_schema, data)
assert not errors
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
assert len(task.accepted) == 0
def METHOD_NAME(self, api_client, manager, schema_match):
entry = {
'title': "injected",
'url': 'http://test.com',
'accept': True,
'tasks': ['test_task'],
}
payload = {"inject": [entry], 'tasks': ['test_task']}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.task_execution_results_schema, data)
assert not errors
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
assert len(task.accepted) == 1
def test_inject_force(self, api_client, manager, schema_match):
entry = {'title': "accept", 'url': 'http://test.com'}
payload = {"inject": [entry], 'tasks': ['test_task']}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.task_execution_results_schema, data)
assert not errors
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
assert len(task.accepted) == 1
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.task_execution_results_schema, data)
assert not errors
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
# Rejected due to Seen
assert len(task.accepted) == 0
# Forcing the entry not to be disabled
entry['force'] = True
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.task_execution_results_schema, data)
assert not errors
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
assert len(task.accepted) == 1
def test_inject_with_fields(self, api_client, manager, schema_match):
fields = {'imdb_id': "tt1234567", 'tmdb_id': "1234567"}
entry = {'title': "injected", 'url': 'http://test.com', 'fields': fields, 'accept': True}
payload = {"inject": [entry], 'tasks': ['test_task']}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.task_execution_results_schema, data)
assert not errors
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
assert len(task.accepted) == 1
entry = task.find_entry(title='injected')
assert entry['imdb_id'] == "tt1234567"
assert entry['tmdb_id'] == "1234567"
def test_multiple_entries(self, api_client, manager, schema_match):
entry1 = {'title': "entry1", 'url': 'http://test.com', 'accept': True}
entry2 = {'title': "entry2", 'url': 'http://test.com', 'accept': True}
payload = {"inject": [entry1, entry2], 'tasks': ['test_task']}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.task_execution_results_schema, data)
assert not errors
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 2
assert len(task.accepted) == 2
def test_2nd_endpoint(self, api_client, manager, schema_match):
entry = {'title': "injected", 'url': 'http://test.com', 'accept': True}
payload = {"inject": [entry], 'tasks': ['test_task']}
rsp = api_client.json_post('/inject/', data=json.dumps(payload))
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.task_execution_results_schema, data)
assert not errors
task = self.get_task_queue(manager)
task.execute()
assert len(task.all_entries) == 1
assert len(task.accepted) == 1
class TestExecuteMultipleTasks:
config = """
tasks:
test_task1:
mock:
- title: accept_me1
accept_all: yes
test_task2:
mock:
- title: accept_me2
accept_all: yes
"""
def test_execute_multiple_tasks(self, api_client, manager, schema_match):
rsp = api_client.json_post('/tasks/execute/', data=json.dumps({}))
assert rsp.status_code == 422
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload = {'tasks': ['non_existing_test_task']}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 404
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload = {'tasks': ['test_task1', 'test_task2']}
rsp = api_client.json_post('/tasks/execute/', data=json.dumps(payload))
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.task_execution_results_schema, data)
assert not errors |
5,791 | test ignore pre req | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from TestHarnessTestCase import TestHarnessTestCase
class TestHarnessTester(TestHarnessTestCase):
def testIgnoreSkip(self):
"""
Test that `--ignore skip` runs tests normally skipped
"""
# Run a skipped test
output = self.runTests('-i', 'ignore_skipped', '--ignore', 'skip')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_skipped.*?OK')
def testIgnoreHeavy(self):
"""
Test that `--ignore heavy` runs tests normally skipped if heavy
"""
# Run a skipped heavy test
output = self.runTests('-i', 'ignore_heavy', '--ignore', 'heavy')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_heavy.*?OK')
def testIgnoreCompiler(self):
"""
Test that `--ignore compiler` runs tests normally skipped if compiler
is not available
"""
# Run a skipped compiler test
output = self.runTests('-i', 'ignore_compiler', '--ignore', 'compiler')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_compiler.*?OK')
def testIgnorePlatform(self):
"""
Test that `--ignore platform` runs tests normally skipped if platform
is not available
"""
# Run a skipped platform test
output = self.runTests('-i', 'ignore_platform', '--ignore', 'platform')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_platform.*?OK')
def METHOD_NAME(self):
"""
Tests that `--ignore prereq` runs tests normally skipped if prereqs
are not satisfied
"""
# Run a skipped prereq test
output = self.runTests('--no-color', '-i', 'ignore_prereq', '--ignore', 'prereq')
self.assertRegex(output.decode('utf-8'), 'test_harness\.always_skipped.*? \[ALWAYS SKIPPED\] SKIP')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_skipped_dependency.*?OK')
# Check that a dependency test runs when its prereq test is skipped
output = self.runTests('--no-color', '-i', 'ignore_prereq', '--ignore', 'skip')
self.assertRegex(output.decode('utf-8'), 'test_harness\.always_skipped.*?OK')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_skipped_dependency.*?OK')
def testIgnoreMultiple(self):
"""
Test that `--ignore [list]` runs tests with multiple caveats
preventing the test from running
"""
# Run a multiple caveat skipped test by manually supplying each caveat
output = self.runTests('-i', 'ignore_multiple', '--ignore', 'skip heavy compiler platform')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_multiple.*?OK')
def testIgnoreAll(self):
"""
Test that the blanket option `--ignore` will run anything that would
normally be skipped
"""
# Run a multiple caveat skipped test using built in default 'all'
output = self.runTests('-i', 'ignore_multiple', '--ignore')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_multiple.*?OK')
def testIgnoreMissingOne(self):
"""
Test that `--ignore [list]` (but missing one) will still have that
test skipped (platform not ignored)
"""
# Skip a multiple caveat test by not supplying enough caveats to ignore
output = self.runTests('--no-color', '-i', 'ignore_multiple', '--ignore', 'skip heavy compiler')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_multiple.*? \[PLATFORM!=NON_EXISTENT\] SKIP')
def testIgnoreMultiplePreReq(self):
"""
Test that `--ignore [assorted]` on a multi-required caveat test
operates the way it should
"""
# Run a multiple caveat prereq test using built in default 'all'
output = self.runTests('-i', 'ignore_multiple_prereq', '--ignore')
self.assertRegex(output.decode('utf-8'), 'test_harness\.always_skipped.*?OK')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_multi_prereq_dependency.*?OK')
# Run a multiple caveat prereq test by manually supplying each caveat
output = self.runTests('-i', 'ignore_multiple_prereq', '--ignore', 'prereq skip heavy compiler platform')
self.assertRegex(output.decode('utf-8'), 'test_harness\.always_skipped.*?OK')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_multi_prereq_dependency.*?OK')
# Skip a multiple caveat prereq test by not supplying enough caveats to ignore
output = self.runTests('--no-color', '-i', 'ignore_multiple_prereq', '--ignore', 'prereq skip heavy compiler')
self.assertRegex(output.decode('utf-8'), 'test_harness\.always_skipped.*?OK')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_multi_prereq_dependency.*? \[PLATFORM!=NON_EXISTENT\] SKIP')
# Check that a multiple caveat dependency test runs when its prereq test is skipped
# This test may seem redundant, but `prereq` is handled differently than the other caveats
output = self.runTests('--no-color', '-i', 'ignore_multiple_prereq', '--ignore', 'prereq heavy compiler platform')
self.assertRegex(output.decode('utf-8'), 'test_harness\.always_skipped.*? \[ALWAYS SKIPPED\] SKIP')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_multi_prereq_dependency.*?OK')
# Check that by supplying a very specific set of ignored paramaters, we
# can properly trigger a skipped dependency scenario
output = self.runTests('--no-color', '-i', 'ignore_multiple_prereq', '--ignore', 'heavy compiler platform')
self.assertRegex(output.decode('utf-8'), 'test_harness\.always_skipped.*? \[ALWAYS SKIPPED\] SKIP')
self.assertRegex(output.decode('utf-8'), 'test_harness\.ignore_multi_prereq_dependency.*? \[SKIPPED DEPENDENCY\] SKIP') |
5,792 | read data path | from __future__ import print_function
import numpy as np
import os
import kaldiio
from multiprocessing import Pool
import argparse
from tqdm import tqdm
import math
from funasr.utils.types import str2triple_str
import logging
from typing import List, Union, Tuple, Sequence
from funasr.bin.sv_inference import inference_modelscope
import soundfile
import torch
class MultiProcessRunner:
def __init__(self, fn):
self.process = fn
def run(self):
parser = argparse.ArgumentParser("")
# Task-independent options
parser.add_argument("--njobs", type=int, default=16)
parser.add_argument("--debug", action="store_true", default=False)
parser.add_argument("--no_pbar", action="store_true", default=False)
parser.add_argument("--verbose", action="store_true", default=False)
parser.add_argument("--log_level", type=str, default="INFO")
parser.add_argument("--sr", type=int, default=16000)
task_list, shared_param, args = self.prepare(parser)
chunk_size = int(math.ceil(float(len(task_list)) / args.njobs))
if args.verbose:
print("Split {} tasks into {} sub-tasks with chunk_size {}".format(len(task_list), args.njobs, chunk_size))
subtask_list = [(i, task_list[i * chunk_size: (i + 1) * chunk_size], shared_param, args)
for i in range(args.njobs)]
result_list = self.pool_run(subtask_list, args)
self.post(result_list, args)
def prepare(self, parser: argparse.ArgumentParser):
raise NotImplementedError("Please implement the prepare function.")
def post(self, results_list: list, args: argparse.Namespace):
raise NotImplementedError("Please implement the post function.")
def pool_run(self, tasks: list, args: argparse.Namespace):
results = []
if args.debug:
one_result = self.process(tasks[0])
results.append(one_result)
else:
pool = Pool(args.njobs)
for one_result in tqdm(pool.imap(self.process, tasks), total=len(tasks), ascii=True, disable=args.no_pbar):
results.append(one_result)
pool.close()
return results
class MyRunner(MultiProcessRunner):
def prepare(self, parser: argparse.ArgumentParser):
parser.add_argument(
"--gpu_inference",
type=bool,
default=False
)
parser.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append"
)
parser.add_argument(
"--gpu_devices",
type=lambda devices: devices.split(","),
default=None,
)
args = parser.parse_args()
logging.basicConfig(
level=args.log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if args.gpu_inference and (args.gpu_devices is None or len(args.gpu_devices) == 0):
logging.warning("gpu_inference is set to True, but gpu_devices is not given, use CPU instead.")
args.gpu_inference = False
if args.gpu_inference:
args.njobs = args.njobs * len(args.gpu_devices)
speech_dict = {}
ref_speech_dict = {}
for _path, _name, _type in args.data_path_and_name_and_type:
if _name == "speech":
speech_dict = self.METHOD_NAME(_path)
elif _name == "ref_speech":
ref_speech_dict = self.METHOD_NAME(_path)
task_list, args.njobs = self.get_key_list(args.data_path_and_name_and_type, args.njobs)
return task_list, [speech_dict, ref_speech_dict], args
def METHOD_NAME(self, file_path):
results = {}
for line in open(file_path, "r"):
key, path = line.strip().split(" ", 1)
results[key] = path
return results
def get_key_list(
self,
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
njobs: int
):
first_data = data_path_and_name_and_type[0]
content = open(first_data[0], "r").readlines()
line_number = len(content)
njobs = min(njobs, line_number)
logging.warning("njobs is reduced to {}, since only {} lines exist in {}".format(
njobs, line_number, first_data[0],
))
key_list = [line.strip().split(" ", 1)[0] for line in content]
return key_list, njobs
def post(self, results_list: list, args: argparse.Namespace):
for results in results_list:
for key, value in results:
logging.info("{} {}".format(key, value))
def process(task_args):
task_id, key_list, [speech_dict, ref_speech_dict], args = task_args
if args.gpu_inference:
device = args.gpu_devices[task_id % len(args.gpu_devices)]
torch.cuda.set_device("cuda:".format(device))
inference_func = inference_modelscope(
output_dir=None,
batch_size=1,
dtype="float32",
ngpu=1 if args.gpu_inference else 0,
seed=0,
num_workers=0,
log_level=logging.INFO,
key_file=None,
sv_train_config="sv.yaml",
sv_model_file="sv.pb",
model_tag=None,
allow_variable_data_keys=True,
streaming=False,
embedding_node="resnet1_dense",
sv_threshold=0.9465,
)
results = {}
for key in key_list:
speech = soundfile.read(speech_dict[key])[0]
ref_speech = soundfile.read(ref_speech_dict[key])[0]
ret = inference_func(None, (speech, ref_speech))
results[key] = ret["value"]
return results
if __name__ == '__main__':
my_runner = MyRunner(process)
my_runner.run() |
5,793 | on finished | # -*- coding: utf-8 -*-
"""
/***************************************************************************
QFieldCloudDialog
A QGIS plugin
Sync your projects to QField
-------------------
begin : 2020-08-01
git sha : $Format:%H$
copyright : (C) 2020 by OPENGIS.ch
email : info@opengis.ch
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from typing import Callable
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtGui import QPixmap
from qgis.PyQt.QtWidgets import (
QApplication,
QDialog,
QDialogButtonBox,
QMainWindow,
QWidget,
)
from qgis.PyQt.uic import loadUiType
from qfieldsync.core import Preferences
from qfieldsync.core.cloud_api import CloudNetworkAccessManager
CloudLoginDialogUi, _ = loadUiType(
os.path.join(os.path.dirname(__file__), "../ui/cloud_login_dialog.ui")
)
class CloudLoginDialog(QDialog, CloudLoginDialogUi):
instance = None
@staticmethod
def show_auth_dialog(
network_manager: CloudNetworkAccessManager,
accepted_cb: Callable = None,
rejected_cb: Callable = None,
parent: QWidget = None,
):
if CloudLoginDialog.instance:
CloudLoginDialog.instance.show()
return CloudLoginDialog.instance
CloudLoginDialog.instance = CloudLoginDialog(network_manager, parent)
CloudLoginDialog.instance.authenticate()
if accepted_cb:
CloudLoginDialog.instance.accepted.connect(accepted_cb)
if rejected_cb:
CloudLoginDialog.instance.rejected.connect(rejected_cb)
def METHOD_NAME(result):
CloudLoginDialog.instance = None
CloudLoginDialog.instance.finished.connect(METHOD_NAME)
return CloudLoginDialog.instance
def __init__(
self, network_manager: CloudNetworkAccessManager, parent: QWidget = None
) -> None:
"""Constructor."""
super(CloudLoginDialog, self).__init__(parent=parent)
self.setupUi(self)
self.preferences = Preferences()
self.network_manager = network_manager
self.buttonBox.button(QDialogButtonBox.Ok).setText(self.tr("Sign In"))
self.buttonBox.button(QDialogButtonBox.Ok).clicked.connect(
self.on_login_button_clicked
)
self.buttonBox.button(QDialogButtonBox.Cancel).clicked.connect(
self.on_cancel_button_clicked
)
self.serverUrlLabel.setVisible(False)
self.serverUrlCmb.setVisible(False)
for server_url in self.network_manager.server_urls():
self.serverUrlCmb.addItem(server_url)
cfg = self.network_manager.auth()
remember_me = self.preferences.value("qfieldCloudRememberMe")
self.serverUrlCmb.setCurrentText(cfg.uri() or self.network_manager.url)
self.usernameLineEdit.setText(cfg.config("username"))
self.passwordLineEdit.setText(cfg.config("password"))
self.rememberMeCheckBox.setChecked(remember_me)
self.network_manager.login_finished.connect(self.on_login_finished)
self.qfieldCloudIcon.setAlignment(Qt.AlignHCenter)
self.qfieldCloudIcon.setPixmap(
QPixmap(
os.path.join(
os.path.dirname(__file__), "../resources/qfieldcloud_logo.png"
)
)
)
self.qfieldCloudIcon.setMinimumSize(175, 180)
self.qfieldCloudIcon.mouseDoubleClickEvent = (
lambda event: self.toggle_server_url_visibility()
)
self.rejected.connect(self.on_rejected)
self.hide()
def on_rejected(self) -> None:
QApplication.restoreOverrideCursor()
if self.parent():
self.parent().setEnabled(True)
self.setEnabled(True)
def toggle_server_url_visibility(self) -> None:
self.serverUrlLabel.setVisible(not self.serverUrlLabel.isVisible())
self.serverUrlCmb.setVisible(not self.serverUrlCmb.isVisible())
def authenticate(self) -> None:
self.usernameLineEdit.setEnabled(True)
self.passwordLineEdit.setEnabled(True)
self.rememberMeCheckBox.setEnabled(True)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(True)
if self.parent() and not isinstance(self.parent(), QMainWindow):
self.parent().setEnabled(False)
self.setEnabled(True)
cfg = self.network_manager.auth()
if cfg.config("token"):
self.usernameLineEdit.setEnabled(False)
self.passwordLineEdit.setEnabled(False)
self.rememberMeCheckBox.setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.network_manager.set_url(cfg.uri())
self.network_manager.set_auth(self.network_manager.url, token="")
# don't trust the password, just login once again
self.network_manager.login(cfg.config("username"), cfg.config("password"))
if not cfg.config("token") or not self.parent():
self.show()
def on_login_button_clicked(self) -> None:
QApplication.setOverrideCursor(Qt.WaitCursor)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.rememberMeCheckBox.setEnabled(False)
server_url = self.serverUrlCmb.currentText()
username = self.usernameLineEdit.text()
password = self.passwordLineEdit.text()
remember_me = self.rememberMeCheckBox.isChecked()
self.network_manager.set_auth(server_url, username=username, password=password)
self.network_manager.set_url(server_url)
self.network_manager.login(username, password)
self.preferences.set_value("qfieldCloudRememberMe", remember_me)
def on_login_finished(self) -> None:
QApplication.restoreOverrideCursor()
if self.parent():
self.parent().setEnabled(True)
self.setEnabled(True)
if not self.network_manager.has_token():
self.loginFeedbackLabel.setText(self.network_manager.get_last_login_error())
self.loginFeedbackLabel.setVisible(True)
self.usernameLineEdit.setEnabled(True)
self.passwordLineEdit.setEnabled(True)
self.rememberMeCheckBox.setEnabled(True)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(True)
return
self.usernameLineEdit.setEnabled(False)
self.passwordLineEdit.setEnabled(False)
self.rememberMeCheckBox.setEnabled(False)
self.done(QDialog.Accepted)
def on_cancel_button_clicked(self):
self.reject() |
5,794 | test create option with value | import pytest
from labelbox.exceptions import InconsistentOntologyException
from labelbox import Tool, Classification, Option, OntologyBuilder
_SAMPLE_ONTOLOGY = {
"tools": [{
"schemaNodeId": None,
"featureSchemaId": None,
"required": False,
"name": "poly",
"color": "#FF0000",
"tool": "polygon",
"classifications": []
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"required": False,
"name": "segment",
"color": "#FF0000",
"tool": "superpixel",
"classifications": []
}, {
"schemaNodeId":
None,
"featureSchemaId":
None,
"required":
False,
"name":
"bbox",
"color":
"#FF0000",
"tool":
"rectangle",
"classifications": [{
"schemaNodeId":
None,
"featureSchemaId":
None,
"required":
True,
"instructions":
"nested classification",
"name":
"nested classification",
"type":
"radio",
"options": [{
"schemaNodeId":
None,
"featureSchemaId":
None,
"label":
"first",
"value":
"first",
"options": [{
"schemaNodeId": None,
"featureSchemaId": None,
"required": False,
"instructions": "nested nested text",
"name": "nested nested text",
"type": "text",
"options": []
}]
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"label": "second",
"value": "second",
"options": []
}]
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"required": True,
"instructions": "nested text",
"name": "nested text",
"type": "text",
"options": []
}]
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"required": False,
"name": "dot",
"color": "#FF0000",
"tool": "point",
"classifications": []
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"required": False,
"name": "polyline",
"color": "#FF0000",
"tool": "line",
"classifications": []
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"required": False,
"name": "ner",
"color": "#FF0000",
"tool": "named-entity",
"classifications": []
}],
"classifications": [{
"schemaNodeId":
None,
"featureSchemaId":
None,
"required":
True,
"instructions":
"This is a question.",
"name":
"This is a question.",
"type":
"radio",
"scope":
"global",
"options": [{
"schemaNodeId": None,
"featureSchemaId": None,
"label": "yes",
"value": "definitely yes",
"options": []
}, {
"schemaNodeId": None,
"featureSchemaId": None,
"label": "no",
"value": "definitely not",
"options": []
}]
}]
}
@pytest.mark.parametrize("tool_type", list(Tool.Type))
def test_create_tool(tool_type) -> None:
t = Tool(tool=tool_type, name="tool")
assert (t.tool == tool_type)
@pytest.mark.parametrize("class_type", list(Classification.Type))
def test_create_classification(class_type) -> None:
c = Classification(class_type=class_type, name="classification")
assert (c.class_type == class_type)
@pytest.mark.parametrize("value, expected_value, typing",
[(3, 3, int), ("string", "string", str)])
def METHOD_NAME(value, expected_value, typing) -> None:
o = Option(value=value)
assert (o.value == expected_value)
assert (o.value == o.label)
@pytest.mark.parametrize("value, label, expected_value, typing",
[(3, 2, 3, int),
("string", "another string", "string", str)])
def test_create_option_with_value_and_label(value, label, expected_value,
typing) -> None:
o = Option(value=value, label=label)
assert (o.value == expected_value)
assert o.value != o.label
assert isinstance(o.value, typing)
def test_create_empty_ontology() -> None:
o = OntologyBuilder()
assert (o.tools == [])
assert (o.classifications == [])
def test_add_ontology_tool() -> None:
o = OntologyBuilder()
o.add_tool(Tool(tool=Tool.Type.BBOX, name="bounding box"))
second_tool = Tool(tool=Tool.Type.SEGMENTATION, name="segmentation")
o.add_tool(second_tool)
assert len(o.tools) == 2
for tool in o.tools:
assert (type(tool) == Tool)
with pytest.raises(InconsistentOntologyException) as exc:
o.add_tool(Tool(tool=Tool.Type.BBOX, name="bounding box"))
assert "Duplicate tool name" in str(exc.value)
def test_add_ontology_classification() -> None:
o = OntologyBuilder()
o.add_classification(
Classification(class_type=Classification.Type.TEXT, name="text"))
second_classification = Classification(
class_type=Classification.Type.CHECKLIST, name="checklist")
o.add_classification(second_classification)
assert len(o.classifications) == 2
for classification in o.classifications:
assert (type(classification) == Classification)
with pytest.raises(InconsistentOntologyException) as exc:
o.add_classification(
Classification(class_type=Classification.Type.TEXT, name="text"))
assert "Duplicate classification name" in str(exc.value)
def test_tool_add_classification() -> None:
t = Tool(tool=Tool.Type.SEGMENTATION, name="segmentation")
c = Classification(class_type=Classification.Type.TEXT, name="text")
t.add_classification(c)
assert t.classifications == [c]
with pytest.raises(Exception) as exc:
t.add_classification(c)
assert "Duplicate nested classification" in str(exc)
def test_classification_add_option() -> None:
c = Classification(class_type=Classification.Type.RADIO, name="radio")
o = Option(value="option")
c.add_option(o)
assert c.options == [o]
with pytest.raises(InconsistentOntologyException) as exc:
c.add_option(Option(value="option"))
assert "Duplicate option" in str(exc.value)
def test_option_add_option() -> None:
o = Option(value="option")
c = Classification(class_type=Classification.Type.TEXT, name="text")
o.add_option(c)
assert o.options == [c]
with pytest.raises(InconsistentOntologyException) as exc:
o.add_option(c)
assert "Duplicate nested classification" in str(exc.value)
def test_ontology_asdict() -> None:
assert OntologyBuilder.from_dict(
_SAMPLE_ONTOLOGY).asdict() == _SAMPLE_ONTOLOGY
def test_classification_using_instructions_instead_of_name_shows_warning():
with pytest.warns(Warning):
Classification(class_type=Classification.Type.TEXT, instructions="text")
def test_classification_without_name_raises_error():
with pytest.raises(ValueError):
Classification(class_type=Classification.Type.TEXT) |
5,795 | get cluster coes | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
import logging
from typing import List, Optional
from django.utils.translation import ugettext_lazy as _
from backend.components import cluster_manager as cm
from backend.components import paas_cc
from backend.container_service.clusters.base.models import CtxCluster
from backend.container_service.clusters.constants import ClusterType
from backend.resources.namespace import Namespace
from backend.resources.namespace.constants import PROJ_CODE_ANNO_KEY
from backend.resources.node.client import Node
from backend.utils.basic import getitems
from backend.utils.cache import region
from backend.utils.decorators import parse_response_data
from backend.utils.errcodes import ErrorCode
from backend.utils.error_codes import error_codes
logger = logging.getLogger(__name__)
def get_clusters(access_token, project_id):
resp = paas_cc.get_all_clusters(access_token, project_id, desire_all_data=True)
if resp.get('code') != ErrorCode.NoError:
raise error_codes.APIError(f"get clusters error, {resp.get('message')}")
return resp.get("data", {}).get("results") or []
def get_cluster_versions(access_token, kind="", ver_id="", env=""):
resp = paas_cc.get_cluster_versions(access_token, kind=kind, ver_id=ver_id, env=env)
if resp.get('code') != ErrorCode.NoError:
raise error_codes.APIError(f"get cluster version, {resp.get('message')}")
data = resp.get("data") or []
version_list = []
# 以ID排序,稳定版本排在前面
data.sort(key=lambda info: info["id"])
for info in data:
configure = json.loads(info.get("configure") or "{}")
version_list.append(
{"version_id": info["version"], "version_name": configure.get("version_name") or info["version"]}
)
return version_list
def get_cluster_masters(access_token, project_id, cluster_id):
"""获取集群下的master信息"""
resp = paas_cc.get_master_node_list(access_token, project_id, cluster_id)
if resp.get("code") != ErrorCode.NoError:
raise error_codes.APIError(_("获取集群master ip失败,{}").format(resp.get("message")))
results = resp.get("data", {}).get("results") or []
if not results:
raise error_codes.APIError(_("获取集群master ip为空"))
return results
def get_cluster_nodes(access_token, project_id, cluster_id):
"""获取集群下的node信息
NOTE: 节点数据通过集群中获取,避免数据不一致
"""
ctx_cluster = CtxCluster.create(
id=cluster_id,
project_id=project_id,
token=access_token,
)
try:
cluster_nodes = Node(ctx_cluster).list(is_format=False)
except Exception as e:
logger.error("查询集群内节点数据异常, %s", e)
return []
if cluster_nodes:
return [{"inner_ip": node.inner_ip, "status": node.node_status} for node in cluster_nodes.items]
return []
def get_cluster_snapshot(access_token, project_id, cluster_id):
"""获取集群快照"""
resp = paas_cc.get_cluster_snapshot(access_token, project_id, cluster_id)
if resp.get("code") != ErrorCode.NoError:
raise error_codes.APIError(_("获取集群快照失败,{}").format(resp.get("message")))
return resp.get("data") or {}
def update_cluster_status(access_token, project_id, cluster_id, status):
"""更新集群状态"""
data = {"status": status}
resp = paas_cc.update_cluster(access_token, project_id, cluster_id, data)
if resp.get("code") != ErrorCode.NoError:
raise error_codes.APIError(_("更新集群状态失败,{}").format(resp.get("message")))
return resp.get("data") or {}
@parse_response_data(default_data={})
def get_cluster(access_token, project_id, cluster_id):
return paas_cc.get_cluster(access_token, project_id, cluster_id)
@region.cache_on_arguments(expiration_time=3600 * 24 * 7)
def METHOD_NAME(access_token, project_id, cluster_id):
"""获取集群类型,因为集群创建后,集群类型不允许修改
TODO: 为减少调用接口耗时,是否需要缓存?
"""
cluster = get_cluster(access_token, project_id, cluster_id)
return cluster["type"]
@parse_response_data()
def delete_cluster(access_token, project_id, cluster_id):
return paas_cc.delete_cluster(access_token, project_id, cluster_id)
def get_cc_zk_config(access_token, project_id, cluster_id):
resp = paas_cc.get_zk_config(access_token, project_id, cluster_id)
if resp.get("code") != ErrorCode.NoError:
raise error_codes.APIError(_("通过cc获取zk信息出错,{}").format(resp.get("message")))
data = resp.get("data")
if not data:
raise error_codes.APIError(_("通过cc获取zk信息为空"))
return data[0]
def get_cc_repo_domain(access_token, project_id, cluster_id):
return paas_cc.get_jfrog_domain(access_token, project_id, cluster_id)
@parse_response_data()
def update_cc_nodes_status(access_token, project_id, cluster_id, nodes):
"""更新记录的节点状态"""
return paas_cc.update_node_list(access_token, project_id, cluster_id, data=nodes)
def append_shared_clusters(clusters: List) -> List:
""" "追加共享集群,返回包含共享集群的列表"""
shared_clusters = cm.get_shared_clusters()
if not shared_clusters:
return clusters
# 追加到集群列表中
# 转换为字典,方便进行匹配
project_cluster_dict = {cluster["cluster_id"]: cluster for cluster in clusters}
for cluster in shared_clusters:
if cluster["cluster_id"] in project_cluster_dict:
continue
clusters.append(cluster)
return clusters
def get_cluster_type(cluster_id: str) -> ClusterType:
"""根据集群 ID 获取集群类型(独立/联邦/共享)"""
for cluster in cm.get_shared_clusters():
if cluster_id == cluster['cluster_id']:
return ClusterType.SHARED
return ClusterType.SINGLE
def is_proj_ns_in_shared_cluster(ctx_cluster: CtxCluster, namespace: Optional[str], project_code: str) -> bool:
"""
检查命名空间是否在共享集群中且属于指定项目
:param ctx_cluster: 集群 Context 信息
:param namespace: 命名空间
:param project_code: 项目英文名
:return: True / False
"""
if not namespace:
return False
ns = Namespace(ctx_cluster).get(name=namespace, is_format=False)
return ns and getitems(ns.metadata, ['annotations', PROJ_CODE_ANNO_KEY]) == project_code
def get_shared_cluster_proj_namespaces(ctx_cluster: CtxCluster, project_code: str) -> List[str]:
"""
获取指定项目在共享集群中拥有的命名空间
:param ctx_cluster: 集群 Context 信息
:param project_code: 项目英文名
:return: 命名空间列表
"""
return [
getitems(ns, 'metadata.name')
for ns in Namespace(ctx_cluster).list(
is_format=False, cluster_type=ClusterType.SHARED, project_code=project_code
)['items']
] |
5,796 | itervaluerefs | import sys
from _typeshed import SupportsKeysAndGetItem
from _weakref import (
CallableProxyType as CallableProxyType,
ProxyType as ProxyType,
ReferenceType as ReferenceType,
getweakrefcount as getweakrefcount,
getweakrefs as getweakrefs,
proxy as proxy,
ref as ref,
)
from _weakrefset import WeakSet as WeakSet
from collections.abc import Callable, Iterable, Iterator, Mapping, MutableMapping
from typing import Any, Generic, TypeVar, overload
from typing_extensions import ParamSpec, Self
__all__ = [
"ref",
"proxy",
"getweakrefcount",
"getweakrefs",
"WeakKeyDictionary",
"ReferenceType",
"ProxyType",
"CallableProxyType",
"ProxyTypes",
"WeakValueDictionary",
"WeakSet",
"WeakMethod",
"finalize",
]
_T = TypeVar("_T")
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_KT = TypeVar("_KT")
_VT = TypeVar("_VT")
_CallableT = TypeVar("_CallableT", bound=Callable[..., Any])
_P = ParamSpec("_P")
ProxyTypes: tuple[type[Any], ...]
class WeakMethod(ref[_CallableT], Generic[_CallableT]):
def __new__(cls, meth: _CallableT, callback: Callable[[Self], object] | None = None) -> Self: ...
def __call__(self) -> _CallableT | None: ...
def __eq__(self, other: object) -> bool: ...
def __ne__(self, other: object) -> bool: ...
def __hash__(self) -> int: ...
class WeakValueDictionary(MutableMapping[_KT, _VT]):
@overload
def __init__(self) -> None: ...
@overload
def __init__(self: WeakValueDictionary[_KT, _VT], __other: Mapping[_KT, _VT] | Iterable[tuple[_KT, _VT]]) -> None: ...
@overload
def __init__(
self: WeakValueDictionary[str, _VT], __other: Mapping[str, _VT] | Iterable[tuple[str, _VT]] = (), **kwargs: _VT
) -> None: ...
def __len__(self) -> int: ...
def __getitem__(self, key: _KT) -> _VT: ...
def __setitem__(self, key: _KT, value: _VT) -> None: ...
def __delitem__(self, key: _KT) -> None: ...
def __contains__(self, key: object) -> bool: ...
def __iter__(self) -> Iterator[_KT]: ...
def copy(self) -> WeakValueDictionary[_KT, _VT]: ...
__copy__ = copy
def __deepcopy__(self, memo: Any) -> Self: ...
# These are incompatible with Mapping
def keys(self) -> Iterator[_KT]: ... # type: ignore[override]
def values(self) -> Iterator[_VT]: ... # type: ignore[override]
def items(self) -> Iterator[tuple[_KT, _VT]]: ... # type: ignore[override]
def METHOD_NAME(self) -> Iterator[KeyedRef[_KT, _VT]]: ...
def valuerefs(self) -> list[KeyedRef[_KT, _VT]]: ...
def setdefault(self, key: _KT, default: _VT) -> _VT: ... # type: ignore[override]
@overload
def pop(self, key: _KT) -> _VT: ...
@overload
def pop(self, key: _KT, default: _VT) -> _VT: ...
@overload
def pop(self, key: _KT, default: _T) -> _VT | _T: ...
if sys.version_info >= (3, 9):
def __or__(self, other: Mapping[_T1, _T2]) -> WeakValueDictionary[_KT | _T1, _VT | _T2]: ...
def __ror__(self, other: Mapping[_T1, _T2]) -> WeakValueDictionary[_KT | _T1, _VT | _T2]: ...
# WeakValueDictionary.__ior__ should be kept roughly in line with MutableMapping.update()
@overload # type: ignore[misc]
def __ior__(self, other: SupportsKeysAndGetItem[_KT, _VT]) -> Self: ...
@overload
def __ior__(self, other: Iterable[tuple[_KT, _VT]]) -> Self: ...
class KeyedRef(ref[_T], Generic[_KT, _T]):
key: _KT
# This __new__ method uses a non-standard name for the "cls" parameter
def __new__(type, ob: _T, callback: Callable[[_T], Any], key: _KT) -> Self: ...
def __init__(self, ob: _T, callback: Callable[[_T], Any], key: _KT) -> None: ...
class WeakKeyDictionary(MutableMapping[_KT, _VT]):
@overload
def __init__(self, dict: None = None) -> None: ...
@overload
def __init__(self, dict: Mapping[_KT, _VT] | Iterable[tuple[_KT, _VT]]) -> None: ...
def __len__(self) -> int: ...
def __getitem__(self, key: _KT) -> _VT: ...
def __setitem__(self, key: _KT, value: _VT) -> None: ...
def __delitem__(self, key: _KT) -> None: ...
def __contains__(self, key: object) -> bool: ...
def __iter__(self) -> Iterator[_KT]: ...
def copy(self) -> WeakKeyDictionary[_KT, _VT]: ...
__copy__ = copy
def __deepcopy__(self, memo: Any) -> Self: ...
# These are incompatible with Mapping
def keys(self) -> Iterator[_KT]: ... # type: ignore[override]
def values(self) -> Iterator[_VT]: ... # type: ignore[override]
def items(self) -> Iterator[tuple[_KT, _VT]]: ... # type: ignore[override]
def keyrefs(self) -> list[ref[_KT]]: ...
# Keep WeakKeyDictionary.setdefault in line with MutableMapping.setdefault, modulo positional-only differences
@overload
def setdefault(self: WeakKeyDictionary[_KT, _VT | None], key: _KT, default: None = None) -> _VT: ...
@overload
def setdefault(self, key: _KT, default: _VT) -> _VT: ...
@overload
def pop(self, key: _KT) -> _VT: ...
@overload
def pop(self, key: _KT, default: _VT) -> _VT: ...
@overload
def pop(self, key: _KT, default: _T) -> _VT | _T: ...
if sys.version_info >= (3, 9):
def __or__(self, other: Mapping[_T1, _T2]) -> WeakKeyDictionary[_KT | _T1, _VT | _T2]: ...
def __ror__(self, other: Mapping[_T1, _T2]) -> WeakKeyDictionary[_KT | _T1, _VT | _T2]: ...
# WeakKeyDictionary.__ior__ should be kept roughly in line with MutableMapping.update()
@overload # type: ignore[misc]
def __ior__(self, other: SupportsKeysAndGetItem[_KT, _VT]) -> Self: ...
@overload
def __ior__(self, other: Iterable[tuple[_KT, _VT]]) -> Self: ...
class finalize: # TODO: This is a good candidate for to be a `Generic[_P, _T]` class
def __init__(self, __obj: object, __func: Callable[_P, Any], *args: _P.args, **kwargs: _P.kwargs) -> None: ...
def __call__(self, _: Any = None) -> Any | None: ...
def detach(self) -> tuple[Any, Any, tuple[Any, ...], dict[str, Any]] | None: ...
def peek(self) -> tuple[Any, Any, tuple[Any, ...], dict[str, Any]] | None: ...
@property
def alive(self) -> bool: ...
atexit: bool |
5,797 | recvmsg into | import socket
import warnings
class TransportSocket:
"""A socket-like wrapper for exposing real transport sockets.
These objects can be safely returned by APIs like
`transport.get_extra_info('socket')`. All potentially disruptive
operations (like "socket.close()") are banned.
"""
__slots__ = ('_sock',)
def __init__(self, sock: socket.socket):
self._sock = sock
def _na(self, what):
warnings.warn(
f"Using {what} on sockets returned from get_extra_info('socket') "
f"will be prohibited in asyncio 3.9. Please report your use case "
f"to bugs.python.org.",
DeprecationWarning, source=self)
@property
def family(self):
return self._sock.family
@property
def type(self):
return self._sock.type
@property
def proto(self):
return self._sock.proto
def __repr__(self):
s = (
f"<asyncio.TransportSocket fd={self.fileno()}, "
f"family={self.family!s}, type={self.type!s}, "
f"proto={self.proto}"
)
if self.fileno() != -1:
try:
laddr = self.getsockname()
if laddr:
s = f"{s}, laddr={laddr}"
except socket.error:
pass
try:
raddr = self.getpeername()
if raddr:
s = f"{s}, raddr={raddr}"
except socket.error:
pass
return f"{s}>"
def __getstate__(self):
raise TypeError("Cannot serialize asyncio.TransportSocket object")
def fileno(self):
return self._sock.fileno()
def dup(self):
return self._sock.dup()
def get_inheritable(self):
return self._sock.get_inheritable()
def shutdown(self, how):
# asyncio doesn't currently provide a high-level transport API
# to shutdown the connection.
self._sock.shutdown(how)
def getsockopt(self, *args, **kwargs):
return self._sock.getsockopt(*args, **kwargs)
def setsockopt(self, *args, **kwargs):
self._sock.setsockopt(*args, **kwargs)
def getpeername(self):
return self._sock.getpeername()
def getsockname(self):
return self._sock.getsockname()
def getsockbyname(self):
return self._sock.getsockbyname()
def accept(self):
self._na('accept() method')
return self._sock.accept()
def connect(self, *args, **kwargs):
self._na('connect() method')
return self._sock.connect(*args, **kwargs)
def connect_ex(self, *args, **kwargs):
self._na('connect_ex() method')
return self._sock.connect_ex(*args, **kwargs)
def bind(self, *args, **kwargs):
self._na('bind() method')
return self._sock.bind(*args, **kwargs)
def ioctl(self, *args, **kwargs):
self._na('ioctl() method')
return self._sock.ioctl(*args, **kwargs)
def listen(self, *args, **kwargs):
self._na('listen() method')
return self._sock.listen(*args, **kwargs)
def makefile(self):
self._na('makefile() method')
return self._sock.makefile()
def sendfile(self, *args, **kwargs):
self._na('sendfile() method')
return self._sock.sendfile(*args, **kwargs)
def close(self):
self._na('close() method')
return self._sock.close()
def detach(self):
self._na('detach() method')
return self._sock.detach()
def sendmsg_afalg(self, *args, **kwargs):
self._na('sendmsg_afalg() method')
return self._sock.sendmsg_afalg(*args, **kwargs)
def sendmsg(self, *args, **kwargs):
self._na('sendmsg() method')
return self._sock.sendmsg(*args, **kwargs)
def sendto(self, *args, **kwargs):
self._na('sendto() method')
return self._sock.sendto(*args, **kwargs)
def send(self, *args, **kwargs):
self._na('send() method')
return self._sock.send(*args, **kwargs)
def sendall(self, *args, **kwargs):
self._na('sendall() method')
return self._sock.sendall(*args, **kwargs)
def set_inheritable(self, *args, **kwargs):
self._na('set_inheritable() method')
return self._sock.set_inheritable(*args, **kwargs)
def share(self, process_id):
self._na('share() method')
return self._sock.share(process_id)
def recv_into(self, *args, **kwargs):
self._na('recv_into() method')
return self._sock.recv_into(*args, **kwargs)
def recvfrom_into(self, *args, **kwargs):
self._na('recvfrom_into() method')
return self._sock.recvfrom_into(*args, **kwargs)
def METHOD_NAME(self, *args, **kwargs):
self._na('recvmsg_into() method')
return self._sock.METHOD_NAME(*args, **kwargs)
def recvmsg(self, *args, **kwargs):
self._na('recvmsg() method')
return self._sock.recvmsg(*args, **kwargs)
def recvfrom(self, *args, **kwargs):
self._na('recvfrom() method')
return self._sock.recvfrom(*args, **kwargs)
def recv(self, *args, **kwargs):
self._na('recv() method')
return self._sock.recv(*args, **kwargs)
def settimeout(self, value):
if value == 0:
return
raise ValueError(
'settimeout(): only 0 timeout is allowed on transport sockets')
def gettimeout(self):
return 0
def setblocking(self, flag):
if not flag:
return
raise ValueError(
'setblocking(): transport sockets cannot be blocking')
def __enter__(self):
self._na('context manager protocol')
return self._sock.__enter__()
def __exit__(self, *err):
self._na('context manager protocol')
return self._sock.__exit__(*err) |
5,798 | tensor buffer has identifier | # SPDX-License-Identifier: Apache-2.0
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Tensor(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Tensor()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsTensor(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def METHOD_NAME(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# Tensor
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Tensor
def Shape(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Tensor
def ShapeAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# Tensor
def ShapeLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Tensor
def ShapeIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
# Tensor
def Type(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# Tensor
def Buffer(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Tensor
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Tensor
def Quantization(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from tf2onnx.tflite.QuantizationParameters import QuantizationParameters
obj = QuantizationParameters()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Tensor
def IsVariable(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# Tensor
def Sparsity(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from tf2onnx.tflite.SparsityParameters import SparsityParameters
obj = SparsityParameters()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Tensor
def ShapeSignature(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Tensor
def ShapeSignatureAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# Tensor
def ShapeSignatureLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Tensor
def ShapeSignatureIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
return o == 0
def Start(builder): builder.StartObject(8)
def TensorStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
def TensorAddShape(builder, shape):
"""This method is deprecated. Please switch to AddShape."""
return AddShape(builder, shape)
def StartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def TensorStartShapeVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartShapeVector(builder, numElems)
def AddType(builder, type): builder.PrependInt8Slot(1, type, 0)
def TensorAddType(builder, type):
"""This method is deprecated. Please switch to AddType."""
return AddType(builder, type)
def AddBuffer(builder, buffer): builder.PrependUint32Slot(2, buffer, 0)
def TensorAddBuffer(builder, buffer):
"""This method is deprecated. Please switch to AddBuffer."""
return AddBuffer(builder, buffer)
def AddName(builder, name): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def TensorAddName(builder, name):
"""This method is deprecated. Please switch to AddName."""
return AddName(builder, name)
def AddQuantization(builder, quantization): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(quantization), 0)
def TensorAddQuantization(builder, quantization):
"""This method is deprecated. Please switch to AddQuantization."""
return AddQuantization(builder, quantization)
def AddIsVariable(builder, isVariable): builder.PrependBoolSlot(5, isVariable, 0)
def TensorAddIsVariable(builder, isVariable):
"""This method is deprecated. Please switch to AddIsVariable."""
return AddIsVariable(builder, isVariable)
def AddSparsity(builder, sparsity): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(sparsity), 0)
def TensorAddSparsity(builder, sparsity):
"""This method is deprecated. Please switch to AddSparsity."""
return AddSparsity(builder, sparsity)
def AddShapeSignature(builder, shapeSignature): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(shapeSignature), 0)
def TensorAddShapeSignature(builder, shapeSignature):
"""This method is deprecated. Please switch to AddShapeSignature."""
return AddShapeSignature(builder, shapeSignature)
def StartShapeSignatureVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def TensorStartShapeSignatureVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartShapeSignatureVector(builder, numElems)
def End(builder): return builder.EndObject()
def TensorEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder |
5,799 | main | #!/usr/bin/env python3
"""Meant to be run from inside python-test-runner container,
where this track repo is mounted at /python
"""
import argparse
from functools import wraps
from itertools import zip_longest
import json
from pathlib import Path
import shutil
import subprocess
import sys
import tempfile
from typing import List
from data import Config, ExerciseConfig, ExerciseInfo, ExerciseStatus
# Allow high-performance tests to be skipped
ALLOW_SKIP = ['alphametics', 'largest-series-product']
TEST_RUNNER_DIR = Path('/opt/test-runner')
RUNNERS = {}
def runner(name):
def _decorator(runner_func):
RUNNERS[name] = runner_func
@wraps(runner_func)
def _wrapper(exercise: ExerciseInfo, workdir: Path, quiet: bool = False):
return runner_func(exercise, workdir, quiet=quiet)
return _wrapper
return _decorator
def copy_file(src: Path, dst: Path, strip_skips=False):
if strip_skips:
with src.open('r') as src_file:
lines = [line for line in src_file.readlines()
if not line.strip().startswith('@unittest.skip')]
with dst.open('w') as dst_file:
dst_file.writelines(lines)
else:
shutil.copy2(src, dst)
def copy_solution_files(exercise: ExerciseInfo, workdir: Path, exercise_config: ExerciseConfig = None):
if exercise_config is not None:
solution_files = exercise_config.files.solution
exemplar_files = exercise_config.files.exemplar
helper_files = exercise_config.files.editor
else:
solution_files = []
exemplar_files = []
helper_files = []
if helper_files:
helper_files = [exercise.path / h for h in helper_files]
for helper_file in helper_files:
dst = workdir / helper_file.relative_to(exercise.path)
copy_file(helper_file, dst)
if not solution_files:
solution_files.append(exercise.solution_stub.name)
solution_files = [exercise.path / s for s in solution_files]
if not exemplar_files:
exemplar_files.append(exercise.exemplar_file.relative_to(exercise.path))
exemplar_files = [exercise.path / e for e in exemplar_files]
for solution_file, exemplar_file in zip_longest(solution_files, exemplar_files):
if solution_file is None:
copy_file(exemplar_file, workdir / exemplar_file.name)
elif exemplar_file is None:
copy_file(solution_file, workdir / solution_file.name)
else:
dst = workdir / solution_file.relative_to(exercise.path)
copy_file(exemplar_file, dst)
def copy_test_files(exercise: ExerciseInfo, workdir: Path, exercise_config = None):
if exercise_config is not None:
test_files = exercise_config.files.test
helper_files = exercise_config.files.editor
else:
test_files = []
helper_files = []
if helper_files:
for helper_file_name in helper_files:
helper_file = exercise.path / helper_file_name
helper_file_out = workdir / helper_file_name
copy_file(helper_file, helper_file_out, strip_skips=(exercise.slug not in ALLOW_SKIP))
if not test_files:
test_files.append(exercise.test_file.name)
for test_file_name in test_files:
test_file = exercise.path / test_file_name
test_file_out = workdir / test_file_name
copy_file(test_file, test_file_out, strip_skips=(exercise.slug not in ALLOW_SKIP))
def copy_exercise_files(exercise: ExerciseInfo, workdir: Path):
exercise_config = None
if exercise.config_file.is_file():
workdir_meta = workdir / '.meta'
workdir_meta.mkdir(exist_ok=True)
copy_file(exercise.config_file, workdir_meta / exercise.config_file.name)
exercise_config = exercise.load_config()
copy_solution_files(exercise, workdir, exercise_config)
copy_test_files(exercise, workdir, exercise_config)
@runner('pytest')
def run_with_pytest(_exercise, workdir, quiet: bool = False) -> int:
kwargs = {'cwd': str(workdir)}
if quiet:
kwargs['stdout'] = subprocess.DEVNULL
kwargs['stderr'] = subprocess.DEVNULL
return subprocess.run([sys.executable, '-m', 'pytest'], **kwargs).returncode
@runner('test-runner')
def run_with_test_runner(exercise, workdir, quiet: bool = False) -> int:
kwargs = {}
if quiet:
kwargs['stdout'] = subprocess.DEVNULL
kwargs['stderr'] = subprocess.DEVNULL
if TEST_RUNNER_DIR.is_dir():
kwargs['cwd'] = str(TEST_RUNNER_DIR)
args = ['./bin/run.sh', exercise.slug, workdir, workdir]
else:
args = [
'docker-compose',
'run',
'-w', str(TEST_RUNNER_DIR),
'--entrypoint', './bin/run.sh',
'-v', f'{workdir}:/{exercise.slug}',
'test-runner',
exercise.slug,
f'/{exercise.slug}',
f'/{exercise.slug}',
]
subprocess.run(args, **kwargs)
results_file = workdir / 'results.json'
if results_file.is_file():
with results_file.open() as f:
results = json.load(f)
if results['status'] == 'pass':
return 0
return 1
def check_assignment(exercise: ExerciseInfo, runner: str = 'pytest', quiet: bool = False) -> int:
ret = 1
with tempfile.TemporaryDirectory(exercise.slug) as workdir:
workdir = Path(workdir)
copy_exercise_files(exercise, workdir)
ret = RUNNERS[runner](exercise, workdir, quiet=quiet)
return ret
def get_cli() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
runners = list(RUNNERS.keys())
if not runners:
print('No runners registered!')
raise SystemExit(1)
parser.add_argument('-q', '--quiet', action='store_true')
parser.add_argument('--deprecated', action='store_true', help='include deprecated exercises', dest='include_deprecated')
parser.add_argument('--wip', action='store_true', help='include WIP exercises', dest='include_wip')
parser.add_argument('-r', '--runner', choices=runners, default=runners[0])
parser.add_argument('exercises', nargs='*')
return parser
def METHOD_NAME():
opts = get_cli().parse_args()
config = Config.load()
status_filter = {ExerciseStatus.Active, ExerciseStatus.Beta}
if opts.include_deprecated:
status_filter.add(ExerciseStatus.Deprecated)
if opts.include_wip:
status_filter.add(ExerciseStatus.WIP)
exercises = config.exercises.all(status_filter)
if opts.exercises:
# test specific exercises
exercises = [
e for e in exercises if e.slug in opts.exercises
]
not_found = [
slug for slug in opts.exercises
if not any(e.slug == slug for e in exercises)
]
if not_found:
for slug in not_found:
if slug not in exercises:
print(f"unknown or disabled exercise '{slug}'")
raise SystemExit(1)
print(f'TestEnvironment: {sys.executable.capitalize()}')
print(f'Runner: {opts.runner}\n\n')
failures = []
for exercise in exercises:
print('# ', exercise.slug)
if not exercise.test_file:
print('FAIL: File with test cases not found')
failures.append('{} (FileNotFound)'.format(exercise.slug))
else:
if check_assignment(exercise, runner=opts.runner, quiet=opts.quiet):
failures.append('{} (TestFailed)'.format(exercise.slug))
print('')
if failures:
print('FAILURES: ', ', '.join(failures))
raise SystemExit(1)
else:
print('SUCCESS!')
if __name__ == "__main__":
METHOD_NAME() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.